From da9f457caa37b9cc4636e81a74e75c8ead27603b Mon Sep 17 00:00:00 2001 From: taco-paco Date: Wed, 21 Aug 2024 16:43:22 +0700 Subject: [PATCH 01/21] feat: sqs-client with retries --- .gitignore | 5 ++++- worker/Cargo.toml | 11 +++++++++++ worker/src/main.rs | 39 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 worker/Cargo.toml create mode 100644 worker/src/main.rs diff --git a/.gitignore b/.gitignore index 7cd1a50b..1806dc5c 100644 --- a/.gitignore +++ b/.gitignore @@ -21,4 +21,7 @@ api/logs api/hardhat_env/workspaces # MacOS related -.DS_Store \ No newline at end of file +.DS_Store + +# credentials +/worker/credentials \ No newline at end of file diff --git a/worker/Cargo.toml b/worker/Cargo.toml new file mode 100644 index 00000000..4ee51da7 --- /dev/null +++ b/worker/Cargo.toml @@ -0,0 +1,11 @@ +[package] +name = "worker" +version = "0.0.1" +edition = "2021" + +[dependencies] +tokio = {version = "1.39.3", features = ["macros", "rt-multi-thread"]} +aws-config = "1.5.5" +aws-sdk-sqs = "1.39.0" +aws-sdk-dynamodb = "1.42.0" +aws-runtime = "1.4.0" \ No newline at end of file diff --git a/worker/src/main.rs b/worker/src/main.rs new file mode 100644 index 00000000..ec4a3df8 --- /dev/null +++ b/worker/src/main.rs @@ -0,0 +1,39 @@ +use aws_config::profile::profile_file::ProfileFiles; +use aws_config::BehaviorVersion; +use aws_runtime::env_config::file::{EnvConfigFileKind, EnvConfigFiles}; + +const AWS_PROFILE_DEFAULT: &str = "dev"; +const QUEUE_URL_DEFAULT: &str = "https://sqs.ap-southeast-2.amazonaws.com/266735844848/zksync-sqs"; + + + +#[tokio::main] +async fn main() { + let profile_name = std::env::var("AWS_PROFILE").unwrap_or(AWS_PROFILE_DEFAULT.into()); + let profile_files = EnvConfigFiles::builder() + .with_file(EnvConfigFileKind::Credentials, "./credentials") + .build(); + let config = aws_config::defaults(BehaviorVersion::latest()) + .profile_files(profile_files) + .profile_name(profile_name) + .region("ap-southeast-2") + .load() + .await; + + // Initialize SQS client + let sqs_client = aws_sdk_sqs::Client::new(&config); + + // Example: Send a message to an SQS queue + let send_result = sqs_client + .send_message() + .queue_url(QUEUE_URL_DEFAULT) + .message_body("Hello from Rust!") + .send() + .await + .map_err(|err| println!("{}", err.to_string())) + .expect("Oops"); + + let receive = sqs + + println!("{:?}", send_result); +} From 65557292b2e89416e9050a4b1c6c5b00ade0b3ab Mon Sep 17 00:00:00 2001 From: taco-paco Date: Thu, 22 Aug 2024 17:21:51 +0700 Subject: [PATCH 02/21] feat: intermidiate commit --- worker/Cargo.toml | 13 +- worker/src/commands/compile.rs | 170 ++++++++++++++++++++ worker/src/commands/mod.rs | 2 + worker/src/commands/verify.rs | 183 +++++++++++++++++++++ worker/src/dynamodb_client.rs | 32 ++++ worker/src/errors.rs | 7 + worker/src/main.rs | 49 +++--- worker/src/sqs_client.rs | 218 +++++++++++++++++++++++++ worker/src/sqs_listener.rs | 83 ++++++++++ worker/src/types.rs | 132 +++++++++++++++ worker/src/utils/cleaner.rs | 44 +++++ worker/src/utils/hardhat_config.rs | 163 +++++++++++++++++++ worker/src/utils/lib.rs | 248 +++++++++++++++++++++++++++++ worker/src/utils/mod.rs | 3 + worker/src/worker.rs | 182 +++++++++++++++++++++ 15 files changed, 1509 insertions(+), 20 deletions(-) create mode 100644 worker/src/commands/compile.rs create mode 100644 worker/src/commands/mod.rs create mode 100644 worker/src/commands/verify.rs create mode 100644 worker/src/dynamodb_client.rs create mode 100644 worker/src/errors.rs create mode 100644 worker/src/sqs_client.rs create mode 100644 worker/src/sqs_listener.rs create mode 100644 worker/src/types.rs create mode 100644 worker/src/utils/cleaner.rs create mode 100644 worker/src/utils/hardhat_config.rs create mode 100644 worker/src/utils/lib.rs create mode 100644 worker/src/utils/mod.rs create mode 100644 worker/src/worker.rs diff --git a/worker/Cargo.toml b/worker/Cargo.toml index 4ee51da7..e1f2d66e 100644 --- a/worker/Cargo.toml +++ b/worker/Cargo.toml @@ -4,8 +4,17 @@ version = "0.0.1" edition = "2021" [dependencies] -tokio = {version = "1.39.3", features = ["macros", "rt-multi-thread"]} +tokio = {version = "1.39.3", features = ["macros", "rt-multi-thread", "sync"]} aws-config = "1.5.5" +aws-sdk-s3 = "1.43.0" aws-sdk-sqs = "1.39.0" aws-sdk-dynamodb = "1.42.0" -aws-runtime = "1.4.0" \ No newline at end of file +aws-runtime = "1.4.0" +async-channel = "2.3.1" +chrono = "0.4.38" +crossbeam-queue = "0.3.11" +serde = { version = "1.0.152", features = ["derive"] } +serde_json = "1.0.124" +uuid = { version = "1.10.0", features = ["serde", "v4"] } +tracing = { version = "0.1.40", features = ["log"] } +tracing-subscriber = { version = "0.3.18", default-features = false, features = ["fmt", "ansi"] } diff --git a/worker/src/commands/compile.rs b/worker/src/commands/compile.rs new file mode 100644 index 00000000..5c2bedaa --- /dev/null +++ b/worker/src/commands/compile.rs @@ -0,0 +1,170 @@ +use crate::dynamodb_client::DynamoDBClient; +use crate::errors::{ApiError, Result}; +use crate::handlers::process::{do_process_command, fetch_process_result}; +use crate::handlers::types::{ + ApiCommand, ApiCommandResult, CompilationRequest, CompileResponse, CompiledFile, +}; +use crate::handlers::SPAWN_SEMAPHORE; +use crate::rate_limiter::RateLimited; +use crate::utils::cleaner::AutoCleanUp; +use crate::utils::hardhat_config::HardhatConfigBuilder; +use crate::utils::lib::{ + generate_folder_name, initialize_files, list_files_in_directory, status_code_to_message, + DEFAULT_SOLIDITY_VERSION, SOL_ROOT, ZKSOLC_VERSIONS, +}; +use crate::worker::WorkerEngine; +use rocket::serde::json; +use rocket::serde::json::Json; +use rocket::{tokio, State}; +use std::path::Path; +use std::process::Stdio; +use tracing::instrument; +use tracing::{error, info}; +use uuid::Uuid; + +pub async fn do_compile( + id: Uuid, + db_client: DynamoDBClient, + s3_client: aws, + compilation_request: CompilationRequest, +) -> Result> { + let zksolc_version = compilation_request.config.version; + + // check if the version is supported + if !ZKSOLC_VERSIONS.contains(&zksolc_version.as_str()) { + return Err(ApiError::VersionNotSupported(zksolc_version)); + } + + if compilation_request.contracts.is_empty() { + return Ok(Json(CompileResponse { + file_content: vec![], + status: status_code_to_message(Some(0)), + message: "Nothing to compile".into(), + })); + } + + let namespace = generate_folder_name(); + + // root directory for the contracts + let workspace_path_str = format!("{}/{}", SOL_ROOT, namespace); + let workspace_path = Path::new(&workspace_path_str); + + // root directory for the artifacts + let artifacts_path_str = format!("{}/{}", workspace_path_str, "artifacts-zk"); + let artifacts_path = Path::new(&artifacts_path_str); + + // root directory for user files (hardhat config, etc) + let user_files_path_str = workspace_path_str.clone(); + let hardhat_config_path = Path::new(&user_files_path_str).join("hardhat.config.ts"); + + // instantly create the directories + tokio::fs::create_dir_all(workspace_path) + .await + .map_err(ApiError::FailedToWriteFile)?; + tokio::fs::create_dir_all(artifacts_path) + .await + .map_err(ApiError::FailedToWriteFile)?; + + // when the compilation is done, clean up the directories + // it will be called when the AutoCleanUp struct is dropped + let auto_clean_up = AutoCleanUp { + dirs: vec![workspace_path.to_str().unwrap()], + }; + + // write the hardhat config file + let mut hardhat_config_builder = HardhatConfigBuilder::new(); + hardhat_config_builder + .zksolc_version(&zksolc_version) + .solidity_version(DEFAULT_SOLIDITY_VERSION); + if let Some(target_path) = compilation_request.target_path { + hardhat_config_builder.paths_sources(&target_path); + } + + let hardhat_config_content = hardhat_config_builder.build().to_string_config(); + + // create parent directories + tokio::fs::create_dir_all(hardhat_config_path.parent().unwrap()) + .await + .map_err(ApiError::FailedToWriteFile)?; + + tokio::fs::write(hardhat_config_path, hardhat_config_content) + .await + .map_err(ApiError::FailedToWriteFile)?; + + // filter test files from compilation candidates + let contracts = compilation_request + .contracts + .into_iter() + .filter(|contract| !contract.file_name.ends_with("_test.sol")) + .collect(); + + // initialize the files + initialize_files(contracts, workspace_path).await?; + + // Limit number of spawned processes. RAII released + let _permit = SPAWN_SEMAPHORE.acquire().await.expect("Expired semaphore"); + + let command = tokio::process::Command::new("npx") + .arg("hardhat") + .arg("compile") + .current_dir(workspace_path) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn(); + let process = command.map_err(ApiError::FailedToExecuteCommand)?; + let output = process + .wait_with_output() + .await + .map_err(ApiError::FailedToReadOutput)?; + + let status = output.status; + let message = String::from_utf8_lossy(&output.stdout).to_string(); + + info!("Output: \n{:?}", String::from_utf8_lossy(&output.stdout)); + if !status.success() { + error!( + "Compilation error: {}", + String::from_utf8_lossy(&output.stderr) + ); + return Ok(Json(CompileResponse { + file_content: vec![], + message: format!( + "Failed to compile:\n{}", + String::from_utf8_lossy(&output.stderr) + ), + status: "Error".to_string(), + })); + } + + // fetch the files in the artifacts directory + let mut file_contents: Vec = vec![]; + let file_paths = list_files_in_directory(artifacts_path); + + for file_path in file_paths.iter() { + let file_content = tokio::fs::read_to_string(file_path) + .await + .map_err(ApiError::FailedToReadFile)?; + let full_path = Path::new(file_path); + let relative_path = full_path.strip_prefix(artifacts_path).unwrap_or(full_path); + let relative_path_str = relative_path.to_str().unwrap(); + + // todo(varex83): is it the best way to check? + let is_contract = + !relative_path_str.ends_with(".dbg.json") && relative_path_str.ends_with(".json"); + + file_contents.push(CompiledFile { + file_name: relative_path_str.to_string(), + file_content, + is_contract, + }); + } + + // calling here explicitly to avoid dropping the AutoCleanUp struct + auto_clean_up.clean_up().await; + + Ok(Json(CompileResponse { + file_content: file_contents, + status: status_code_to_message(status.code()), + message, + })) +} diff --git a/worker/src/commands/mod.rs b/worker/src/commands/mod.rs new file mode 100644 index 00000000..cd0dbc1c --- /dev/null +++ b/worker/src/commands/mod.rs @@ -0,0 +1,2 @@ +mod compile; +mod verify; diff --git a/worker/src/commands/verify.rs b/worker/src/commands/verify.rs new file mode 100644 index 00000000..85642f4c --- /dev/null +++ b/worker/src/commands/verify.rs @@ -0,0 +1,183 @@ +use crate::errors::{ApiError, Result}; +use crate::handlers::process::{do_process_command, fetch_process_result}; +use crate::handlers::types::{ApiCommand, ApiCommandResult, VerificationRequest, VerifyResponse}; +use crate::handlers::SPAWN_SEMAPHORE; +use crate::rate_limiter::RateLimited; +use crate::utils::cleaner::AutoCleanUp; +use crate::utils::hardhat_config::HardhatConfigBuilder; +use crate::utils::lib::{ + generate_folder_name, initialize_files, ALLOWED_NETWORKS, DEFAULT_SOLIDITY_VERSION, SOL_ROOT, + ZKSOLC_VERSIONS, +}; +use crate::worker::WorkerEngine; +use rocket::serde::{json, json::Json}; +use rocket::{tokio, State}; +use std::path::Path; +use std::process::Stdio; +use tracing::info; +use tracing::instrument; + +#[instrument] +#[post("/verify", format = "json", data = "")] +pub async fn verify( + verification_request_json: Json, + _rate_limited: RateLimited, +) -> Json { + info!("/verify"); + + do_verify(verification_request_json.0) + .await + .unwrap_or_else(|e| { + Json(VerifyResponse { + message: e.to_string(), + status: "Error".to_string(), + }) + }) +} + +#[instrument] +#[post("/verify-async", format = "json", data = "")] +pub fn verify_async( + verification_request_json: Json, + _rate_limited: RateLimited, + engine: &State, +) -> String { + info!("/verify-async",); + + do_process_command(ApiCommand::Verify(verification_request_json.0), engine) +} + +#[instrument] +#[get("/verify-result/")] +pub async fn get_verify_result(process_id: String, engine: &State) -> String { + info!("/verify-result/{:?}", process_id); + + fetch_process_result(process_id, engine, |result| match result { + ApiCommandResult::Verify(verification_result) => { + json::to_string(&verification_result).unwrap_or_default() + } + _ => String::from("Result not available"), + }) +} + +fn extract_verify_args(request: &VerificationRequest) -> Vec { + let mut args: Vec = vec!["hardhat".into(), "verify".into(), "--network".into()]; + if request.config.network == "sepolia" { + args.push("zkSyncTestnet".into()) + } else { + args.push("zkSyncMainnet".into()) + } + + if let Some(ref target_contract) = request.target_contract { + args.push("--contract".into()); + args.push(target_contract.clone()); + } + + args.push(request.config.contract_address.clone()); + args.extend(request.config.inputs.clone()); + + args +} + +pub async fn do_verify(verification_request: VerificationRequest) -> Result> { + let zksolc_version = verification_request.config.zksolc_version.clone(); + + // check if the version is supported + if !ZKSOLC_VERSIONS.contains(&zksolc_version.as_str()) { + return Err(ApiError::VersionNotSupported(zksolc_version)); + } + + let solc_version = verification_request + .config + .solc_version + .clone() + .unwrap_or(DEFAULT_SOLIDITY_VERSION.to_string()); + + let network = verification_request.config.network.clone(); + + // check if the network is supported + if !ALLOWED_NETWORKS.contains(&network.as_str()) { + return Err(ApiError::UnknownNetwork(network)); + } + + let namespace = generate_folder_name(); + + // root directory for the contracts + let workspace_path_str = format!("{}/{}", SOL_ROOT, namespace); + let workspace_path = Path::new(&workspace_path_str); + + // root directory for the artifacts + let artifacts_path_str = format!("{}/{}", workspace_path_str, "artifacts-zk"); + let artifacts_path = Path::new(&artifacts_path_str); + + // root directory for user files (hardhat config, etc) + let user_files_path_str = workspace_path_str.clone(); + let hardhat_config_path = Path::new(&user_files_path_str).join("hardhat.config.ts"); + + // instantly create the directories + tokio::fs::create_dir_all(workspace_path) + .await + .map_err(ApiError::FailedToWriteFile)?; + tokio::fs::create_dir_all(artifacts_path) + .await + .map_err(ApiError::FailedToWriteFile)?; + + // when the compilation is done, clean up the directories + // it will be called when the AutoCleanUp struct is dropped + let auto_clean_up = AutoCleanUp { + dirs: vec![workspace_path.to_str().unwrap()], + }; + + // write the hardhat config file + let hardhat_config_content = HardhatConfigBuilder::new() + .zksolc_version(&zksolc_version) + .solidity_version(&solc_version) + .build() + .to_string_config(); + + // create parent directories + tokio::fs::create_dir_all(hardhat_config_path.parent().unwrap()) + .await + .map_err(ApiError::FailedToWriteFile)?; + + tokio::fs::write(hardhat_config_path, hardhat_config_content) + .await + .map_err(ApiError::FailedToWriteFile)?; + + // initialize the files + initialize_files(verification_request.contracts.clone(), workspace_path).await?; + + // Limit number of spawned processes. RAII released + let _permit = SPAWN_SEMAPHORE.acquire().await.expect("Expired semaphore"); + + let args = extract_verify_args(&verification_request); + let command = tokio::process::Command::new("npx") + .args(args) + .current_dir(workspace_path) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn(); + + let process = command.map_err(ApiError::FailedToExecuteCommand)?; + let output = process + .wait_with_output() + .await + .map_err(ApiError::FailedToReadOutput)?; + let status = output.status; + let message = String::from_utf8_lossy(&output.stdout).to_string(); + + // calling here explicitly to avoid dropping the AutoCleanUp struct + auto_clean_up.clean_up().await; + + if !status.success() { + return Ok(Json(VerifyResponse { + status: "Error".to_string(), + message: String::from_utf8_lossy(&output.stderr).to_string(), + })); + } + + Ok(Json(VerifyResponse { + status: "Success".to_string(), + message, + })) +} diff --git a/worker/src/dynamodb_client.rs b/worker/src/dynamodb_client.rs new file mode 100644 index 00000000..15da030c --- /dev/null +++ b/worker/src/dynamodb_client.rs @@ -0,0 +1,32 @@ +use crate::types::Item; +use aws_sdk_dynamodb::types::AttributeValue; +use aws_sdk_dynamodb::Client; + +#[derive(Clone)] +pub struct DynamoDBClient { + client: Client, + table_name: String, +} + +impl DynamoDBClient { + pub fn new(client: Client, table_name: &str) -> Self { + Self { + client, + table_name: table_name.into(), + } + } + + pub async fn delete_item(&self, id: String) { + // TODO: + } + + pub async fn get_item(&self, id: String) -> Item { + let result = self + .client + .get_item() + .table_name(self.table_name.clone()) + .key("ID", AttributeValue::S(id)) + .send() + .await?; + } +} diff --git a/worker/src/errors.rs b/worker/src/errors.rs new file mode 100644 index 00000000..0844cb00 --- /dev/null +++ b/worker/src/errors.rs @@ -0,0 +1,7 @@ +use aws_sdk_dynamodb::config::http::HttpResponse; +use aws_sdk_sqs::error::SdkError; +use aws_sdk_sqs::operation::delete_message::DeleteMessageError; +use aws_sdk_sqs::operation::receive_message::ReceiveMessageError; + +pub type ReceiveError = SdkError; +pub type DeleteError = SdkError; diff --git a/worker/src/main.rs b/worker/src/main.rs index ec4a3df8..fdb32ead 100644 --- a/worker/src/main.rs +++ b/worker/src/main.rs @@ -1,11 +1,23 @@ -use aws_config::profile::profile_file::ProfileFiles; +mod commands; +mod dynamodb_client; +mod errors; +mod sqs_client; +mod sqs_listener; +mod types; +mod utils; +mod worker; + use aws_config::BehaviorVersion; use aws_runtime::env_config::file::{EnvConfigFileKind, EnvConfigFiles}; +use std::ops::Deref; +use std::time::Duration; -const AWS_PROFILE_DEFAULT: &str = "dev"; -const QUEUE_URL_DEFAULT: &str = "https://sqs.ap-southeast-2.amazonaws.com/266735844848/zksync-sqs"; - +use crate::{sqs_client::SqsClient, sqs_listener::SqsListener}; +const AWS_PROFILE_DEFAULT: &str = "dev"; +// TODO: remove +pub(crate) const QUEUE_URL_DEFAULT: &str = + "https://sqs.ap-southeast-2.amazonaws.com/266735844848/zksync-sqs"; #[tokio::main] async fn main() { @@ -22,18 +34,19 @@ async fn main() { // Initialize SQS client let sqs_client = aws_sdk_sqs::Client::new(&config); - - // Example: Send a message to an SQS queue - let send_result = sqs_client - .send_message() - .queue_url(QUEUE_URL_DEFAULT) - .message_body("Hello from Rust!") - .send() - .await - .map_err(|err| println!("{}", err.to_string())) - .expect("Oops"); - - let receive = sqs - - println!("{:?}", send_result); + let sqs_client = SqsClient::new(sqs_client, QUEUE_URL_DEFAULT); + + let sqs_listener = SqsListener::new(sqs_client, Duration::from_secs(1)); + let sqs_receiver = sqs_listener.receiver(); + + while let Ok(message) = sqs_receiver.recv().await { + println!("{:?}", message); + if let Some(receipt_handle) = message.receipt_handle { + sqs_receiver + .delete_message(receipt_handle) + .await + .map_err(|err| println!("delete error: {}", err.to_string())) + .unwrap(); + } + } } diff --git a/worker/src/sqs_client.rs b/worker/src/sqs_client.rs new file mode 100644 index 00000000..3e4f6bdb --- /dev/null +++ b/worker/src/sqs_client.rs @@ -0,0 +1,218 @@ +use crate::errors::{DeleteError, ReceiveError}; +use aws_config::retry::ErrorKind; +use aws_sdk_sqs::operation::delete_message::DeleteMessageOutput; +use aws_sdk_sqs::operation::receive_message::ReceiveMessageOutput; +use aws_sdk_sqs::Client; +use std::sync::atomic::{AtomicU8, Ordering}; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::{oneshot, Mutex}; +use tokio::time::sleep; + +macro_rules! match_result { + ($err_type:ident, $result:expr) => { + match $result { + Ok(val) => Ok(Some(val)), + Err(err) => match err { + $err_type::ConstructionFailure(_) => Err(err), + $err_type::TimeoutError(_) => Ok(None), + $err_type::DispatchFailure(dispatch_err) => { + if dispatch_err.is_io() { + return Ok(None); + } + if dispatch_err.is_timeout() { + return Ok(None); + } + if dispatch_err.is_user() { + return Err($err_type::DispatchFailure(dispatch_err)); + } + if let Some(other) = dispatch_err.as_other() { + return match other { + ErrorKind::ClientError => Err($err_type::DispatchFailure(dispatch_err)), + _ => Ok(None), + }; + } + Err($err_type::DispatchFailure(dispatch_err)) + } + other => Err(other), + }, + Err(err) => Err(err.into()), + } + }; +} + +enum Action { + Default, + Receive(oneshot::Sender>), + Delete { + receipt_handle: String, + sender: oneshot::Sender>, + }, +} + +impl Default for Action { + fn default() -> Self { + Action::Default + } +} + +enum State { + Connected = 0, + Reconnecting = 1, +} + +#[derive(Clone)] +pub struct SqsClient { + client: Client, + queue_url: String, + pending_actions: Arc>>, + state: Arc, +} + +impl SqsClient { + pub fn new(client: Client, queue_url: impl Into) -> Self { + Self { + client, + queue_url: queue_url.into(), + pending_actions: Arc::new(Mutex::new(vec![])), + state: Arc::new(AtomicU8::new(State::Connected as u8)), + } + } + + async fn receive_attempt(&self) -> Result, ReceiveError> { + let result = self + .client + .receive_message() + .queue_url(self.queue_url.clone()) + .max_number_of_messages(1) + .send() + .await; + + match_result!(ReceiveError, result) + } + + async fn delete_attempt( + &self, + receipt_handle: impl Into, + ) -> Result, DeleteError> { + let result = self + .client + .delete_message() + .queue_url(self.queue_url.clone()) + .receipt_handle(receipt_handle) + .send() + .await; + + match_result!(DeleteError, result) + } + + async fn worker(self) { + loop { + let mut actions = self.pending_actions.lock().await; + + let mut pivot = 0; + for i in 0..actions.len() { + let action = std::mem::take(&mut actions[i]); + match action { + Action::Receive(sender) => match self.receive_attempt().await { + Ok(Some(val)) => { + self.state.store(State::Connected as u8, Ordering::Release); + if sender.send(Ok(val)).is_err() { + break; + } + } + Err(err) => { + if sender.send(Err(err)).is_err() { + break; + } + } + Ok(None) => { + // Keeping in the array to resend. + actions[pivot] = Action::Receive(sender); + pivot += 1; + } + }, + Action::Delete { + receipt_handle, + sender, + } => match self.delete_attempt(receipt_handle.clone()).await { + Ok(Some(val)) => { + self.state.store(State::Connected as u8, Ordering::Release); + if sender.send(Ok(val)).is_err() { + break; + } + } + Err(err) => { + if sender.send(Err(err)).is_err() { + break; + } + } + Ok(None) => { + actions[pivot] = Action::Delete { + receipt_handle, + sender, + }; + pivot += 1; + } + }, + Action::Default => unreachable!(), + }; + } + + actions.truncate(pivot); + drop(actions); + + sleep(Duration::from_secs(3)).await; + } + } + + pub async fn receive_message(&self) -> Result { + match self.state.load(Ordering::Acquire) { + 0 => match self.receive_attempt().await { + Ok(None) => self + .state + .store(State::Reconnecting as u8, Ordering::Release), + Ok(Some(val)) => return Ok(val), + Err(err) => return Err(err), + }, + 1 => {} + _ => unreachable!(), + }; + + // State::Reconnecting branch + let (sender, receiver) = oneshot::channel(); + self.pending_actions + .lock() + .await + .push(Action::Receive(sender)); + + receiver.await.unwrap() // TODO: for now + } + + pub async fn delete_message( + &self, + receipt_handle: impl Into, + ) -> Result<(), DeleteError> { + let receipt_handle = receipt_handle.into(); + match self.state.load(Ordering::Acquire) { + 0 => match self.delete_attempt(receipt_handle.clone()).await { + Ok(None) => self + .state + .store(State::Reconnecting as u8, Ordering::Release), + Ok(Some(_)) => return Ok(()), + Err(err) => return Err(err), + }, + 1 => {} + _ => unreachable!(), + }; + + // State::Reconnecting branch + let (sender, receiver) = oneshot::channel(); + self.pending_actions.lock().await.push(Action::Delete { + receipt_handle, + sender, + }); + + receiver.await.unwrap().map(|_| ()) // TODO: for now + } +} diff --git a/worker/src/sqs_listener.rs b/worker/src/sqs_listener.rs new file mode 100644 index 00000000..b0faca46 --- /dev/null +++ b/worker/src/sqs_listener.rs @@ -0,0 +1,83 @@ +use crate::errors::{DeleteError, ReceiveError}; +use async_channel::{Receiver, Recv, Sender}; +use aws_sdk_sqs::config::http::HttpResponse; +use aws_sdk_sqs::error::SdkError; +use aws_sdk_sqs::operation::receive_message::ReceiveMessageError; +use aws_sdk_sqs::types::Message; +use std::time::Duration; +use tokio::task::JoinHandle; +use tokio::time::sleep; + +use crate::sqs_client::SqsClient; + +pub struct SqsListener { + handle: JoinHandle>, + receiver: Receiver, + client: SqsClient, +} + +impl SqsListener { + pub fn new(client: SqsClient, poll_interval: Duration) -> Self { + // TODO: unbounded? + let (sender, receiver) = async_channel::bounded(1000); + let handle = tokio::spawn(Self::listen(client.clone(), sender, poll_interval)); + + Self { + handle, + receiver, + client, + } + } + + async fn listen( + client: SqsClient, + sender: Sender, + poll_interval: Duration, + ) -> Result<(), SdkError> { + loop { + let response = client.receive_message().await?; + let messages = if let Some(messages) = response.messages { + messages + } else { + continue; + }; + + for message in messages { + if sender.send(message).await.is_err() { + return Ok(()); + } + } + + sleep(poll_interval).await; + } + } + + pub fn receiver(&self) -> SqsReceiver { + SqsReceiver { + client: self.client.clone(), + receiver: self.receiver.clone(), + } + } + + pub fn handle(self) -> JoinHandle> { + self.handle + } +} + +pub struct SqsReceiver { + client: SqsClient, + receiver: Receiver, +} + +impl SqsReceiver { + pub fn recv(&self) -> Recv<'_, Message> { + self.receiver.recv() + } + + pub async fn delete_message( + &self, + receipt_handle: impl Into, + ) -> Result<(), DeleteError> { + self.client.delete_message(receipt_handle).await + } +} diff --git a/worker/src/types.rs b/worker/src/types.rs new file mode 100644 index 00000000..aad8c479 --- /dev/null +++ b/worker/src/types.rs @@ -0,0 +1,132 @@ +// TODO: move to separate crate + +use aws_sdk_dynamodb::types::AttributeValue; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; +use std::os::macos::raw::stat; +use uuid::Uuid; + +#[derive(Serialize, Deserialize)] +#[serde(tag = "type")] +pub enum SqsMessage { + Compile { id: Uuid }, + Verify { id: Uuid }, +} + +#[derive(Debug, Clone, Serialize)] +pub enum Status { + // TODO: add FilesUploaded(?) + Pending, + Compiling, + Ready(String), + Failed(String), +} + +impl From<&Status> for u32 { + fn from(value: &Status) -> Self { + match value { + Status::Pending => 0, + Status::Compiling => 1, + Status::Ready(_) => 2, + Status::Failed(_) => 3, + } + } +} + +impl From for HashMap { + fn from(value: Status) -> Self { + match value.clone() { + Status::Pending | Status::Compiling => HashMap::from([( + "Status".into(), + AttributeValue::N(u32::from(&value).to_string()), + )]), + Status::Ready(val) | Status::Failed(val) => HashMap::from([ + ( + "Status".into(), + AttributeValue::N(u32::from(&value).to_string()), + ), + ("Data".into(), AttributeValue::S(val)), + ]), + } + } +} + +pub struct Item { + // TODO: uuid? + pub id: String, + pub status: Status, +} + +impl From for HashMap { + fn from(value: Item) -> Self { + let mut item_map = HashMap::from([("ID".into(), AttributeValue::S(value.id))]); + item_map.extend(HashMap::from(value.status)); + + item_map + } +} + +impl TryFrom<&HashMap> for Status { + // TODO: + type Error = (); + fn try_from(value: &HashMap) -> Result { + let status = value.get("Status").ok_or(())?; + let status: u32 = status.as_n().map_err(())?.parse::().map_err(())?; + let status = match status { + 0 => Status::Pending, + 1 => Status::Compiling, + 2 => { + let data = value.get("Data").ok_or(())?; + let data = data.as_s().map_err(())?; + + Status::Ready(data.clone()) + } + 3 => { + let data = value.get("Data").ok_or(())?; + let data = data.as_s().map_err(())?; + + Status::Failed(data.clone()) + } + _ => return Err(()) + }; + + Ok(status) + } +} + +impl TryFrom> for Item { + // TODO; + type Error = (); + fn try_from(value: HashMap) -> Result { + let id = value.get("ID").ok_or(())?; + let id = id.as_s().map_err(())?; + + let status = value.get("Status").ok_or(())?; + let status: u32 = status.as_n().map_err(())?.parse::().map_err(())?; + let status= status.try_into()?; + + // TODO: move to TryFrom + // let status = match status { + // 0 => Status::Pending, + // 1 => Status::Compiling, + // 2 => { + // let data = value.get("Data").ok_or(())?; + // let data = data.as_s().map_err(())?; + // + // Status::Ready(data.clone()) + // } + // 3 => { + // let data = value.get("Data").ok_or(())?; + // let data = data.as_s().map_err(())?; + // + // Status::Failed(data.clone()) + // } + // _ => return Err(()) + // }; + + Ok(Item { + id: id.clone(), + status + }) + } +} diff --git a/worker/src/utils/cleaner.rs b/worker/src/utils/cleaner.rs new file mode 100644 index 00000000..e087bbbe --- /dev/null +++ b/worker/src/utils/cleaner.rs @@ -0,0 +1,44 @@ +use rocket::tokio; +use std::path::Path; + +pub struct AutoCleanUp<'a> { + pub(crate) dirs: Vec<&'a str>, +} + +impl Drop for AutoCleanUp<'_> { + fn drop(&mut self) { + self.clean_up_sync(); + } +} + +impl AutoCleanUp<'_> { + pub async fn clean_up(&self) { + for path in self.dirs.iter() { + println!("Removing path: {:?}", path); + + // check if the path exists + if !Path::new(path).exists() { + continue; + } + + if let Err(e) = tokio::fs::remove_dir_all(path).await { + tracing::info!("Failed to remove file: {:?}", e); + } + } + } + + pub fn clean_up_sync(&self) { + for path in self.dirs.iter() { + println!("Removing path: {:?}", path); + + // check if the path exists + if !Path::new(path).exists() { + continue; + } + + if let Err(e) = std::fs::remove_dir_all(path) { + tracing::info!("Failed to remove file: {:?}", e); + } + } + } +} diff --git a/worker/src/utils/hardhat_config.rs b/worker/src/utils/hardhat_config.rs new file mode 100644 index 00000000..2a191bbe --- /dev/null +++ b/worker/src/utils/hardhat_config.rs @@ -0,0 +1,163 @@ +use crate::utils::lib::{DEFAULT_SOLIDITY_VERSION, DEFAULT_ZKSOLC_VERSION}; +use rocket::serde::json::serde_json; +use std::fmt::Formatter; + +const DEFAULT_CONTRACTS_LOCATION: &str = "./contracts"; + +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +pub struct HardhatConfig { + pub zksolc: ZksolcConfig, + pub solidity: SolidityConfig, + pub paths: ProjectPathsUserConfig, +} + +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Default)] +pub struct ZksolcConfig { + pub version: String, + pub settings: serde_json::Value, +} + +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug, Default)] +pub struct SolidityConfig { + pub version: String, +} + +#[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] +pub struct ProjectPathsUserConfig { + pub sources: String, +} + +impl std::fmt::Display for ProjectPathsUserConfig { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, r#"{{sources: "{}",}}"#, self.sources) + } +} + +impl Default for ProjectPathsUserConfig { + fn default() -> Self { + Self { + sources: DEFAULT_CONTRACTS_LOCATION.into(), + } + } +} + +#[derive(Default)] +pub struct HardhatConfigBuilder { + config: HardhatConfig, +} + +impl Default for HardhatConfig { + fn default() -> Self { + Self { + zksolc: ZksolcConfig { + version: DEFAULT_ZKSOLC_VERSION.to_string(), + settings: serde_json::json!({}), + }, + solidity: SolidityConfig { + version: DEFAULT_SOLIDITY_VERSION.to_string(), + }, + paths: ProjectPathsUserConfig::default(), + } + } +} + +impl HardhatConfig { + pub fn new() -> Self { + Self::default() + } + + pub fn to_string_config(&self) -> String { + let config_prefix_js = r#" +import { HardhatUserConfig } from "hardhat/config"; + +import "@matterlabs/hardhat-zksync-solc"; +import "@matterlabs/hardhat-zksync-verify"; + +export const zkSyncTestnet = process.env.NODE_ENV == "test" +? { + url: "http://127.0.0.1:8011", + ethNetwork: "http://127.0.0.1:8045", + zksync: true, + } +: { + url: "https://sepolia.era.zksync.dev", + ethNetwork: "sepolia", + zksync: true, + verifyURL: "https://explorer.sepolia.era.zksync.dev/contract_verification" + }; + +export const zkSyncMainnet = { + url: "https://mainnet.era.zksync.io", + ethNetwork: "mainnet", + zksync: true, + verifyURL: "https://zksync2-mainnet-explorer.zksync.io/contract_verification" + }; +"#; + + let config = format!( + r#"{} +const config: HardhatUserConfig = {{ + zksolc: {{ + version: "{}", + settings: {{}}, + }}, + defaultNetwork: "zkSyncTestnet", + networks: {{ + hardhat: {{ + zksync: false, + }}, + zkSyncTestnet, + zkSyncMainnet, + }}, + solidity: {{ + version: "{}", + }}, + paths: {}, +}}; + +export default config; +"#, + config_prefix_js, self.zksolc.version, self.solidity.version, self.paths + ); + + config + } +} + +impl HardhatConfigBuilder { + pub fn new() -> Self { + Self::default() + } + + pub fn zksolc_version(&mut self, version: &str) -> &mut Self { + self.config.zksolc.version = version.to_string(); + self + } + + pub fn solidity_version(&mut self, version: &str) -> &mut Self { + self.config.solidity.version = version.to_string(); + self + } + + pub fn paths_sources(&mut self, target_path: &str) -> &mut Self { + self.config.paths.sources = target_path.to_string(); + self + } + + pub fn build(&self) -> HardhatConfig { + self.config.clone() + } +} + +#[test] +fn test_paths_user_config_display() { + const SOURCES: &str = "./some/folder"; + + let expected = format!(r#"{{sources: "{}"}}"#, SOURCES); + let paths = ProjectPathsUserConfig { + sources: SOURCES.to_string(), + }; + let actual = format!("{}", paths); + + assert_eq!(expected, actual); +} diff --git a/worker/src/utils/lib.rs b/worker/src/utils/lib.rs new file mode 100644 index 00000000..7008c8cc --- /dev/null +++ b/worker/src/utils/lib.rs @@ -0,0 +1,248 @@ +use crate::errors::{ApiError, Result}; +use crate::handlers::types::{CompilationConfig, CompilationRequest, CompiledFile}; +use rocket::tokio; +use rocket::tokio::fs; +use solang_parser::diagnostics::{Diagnostic, ErrorType, Level}; +use solang_parser::pt::Loc; +use std::path::{Path, PathBuf}; +use uuid::Uuid; +use walkdir::WalkDir; + +pub const SOL_ROOT: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/", "hardhat_env/workspaces/"); +pub const ZK_CACHE_ROOT: &str = concat!( + env!("CARGO_MANIFEST_DIR"), + "/", + "hardhat_env/workspaces/cache-zk/" +); +pub const HARDHAT_ENV_ROOT: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/", "hardhat_env/"); + +pub const ARTIFACTS_ROOT: &str = + concat!(env!("CARGO_MANIFEST_DIR"), "/", "hardhat_env/artifacts-zk"); + +pub const CARGO_MANIFEST_DIR: &str = env!("CARGO_MANIFEST_DIR"); + +pub const DURATION_TO_PURGE: u64 = 60 * 5; // 5 minutes + +pub const ZKSOLC_VERSIONS: [&str; 2] = ["1.4.1", "1.4.0"]; + +pub const DEFAULT_SOLIDITY_VERSION: &str = "0.8.24"; + +pub const DEFAULT_ZKSOLC_VERSION: &str = "1.4.1"; + +pub const ALLOWED_NETWORKS: [&str; 2] = ["sepolia", "mainnet"]; + +#[allow(dead_code)] +pub const TEMP_DIR: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/", "temp/"); + +pub fn get_file_ext(file_path: &str) -> String { + match file_path.split('.').last() { + Some(ext) => ext.to_string(), + None => { + debug!("LOG: File extension not found"); + "".to_string() + } + } +} + +pub fn check_file_ext(file_path: &str, ext: &str) -> Result<()> { + let actual_ext = get_file_ext(file_path); + if actual_ext == *ext { + Ok(()) + } else { + Err(ApiError::FileExtensionNotSupported(actual_ext)) + } +} + +pub fn path_buf_to_string(path_buf: PathBuf) -> Result { + path_buf + .to_str() + .ok_or(ApiError::FailedToParseString) + .map(|s| s.to_string()) +} + +pub async fn init_parent_directories(file_path: PathBuf) { + match file_path.parent() { + Some(parent) => match fs::create_dir_all(parent).await { + Ok(_) => { + debug!("LOG: Created directory: {:?}", parent); + } + Err(e) => { + debug!("LOG: Error creating directory: {:?}", e); + } + }, + None => { + debug!("LOG: Error creating directory"); + } + } +} + +pub fn status_code_to_message(status: Option) -> String { + match status { + Some(0) => "Success", + Some(_) => "CompilationFailed", + None => "UnknownError", + } + .to_string() +} + +pub fn get_file_path(version: &str, file_path: &str) -> PathBuf { + match get_file_ext(file_path).to_string() { + // Leaving this here for potential use with vyper + ext if ext == "sol" => { + let file_path = Path::new(SOL_ROOT).join(version).join(file_path); + let file_name = file_path.file_name().unwrap().to_str().unwrap(); + + // Trim .sol extension + let file_name_without_ext = file_name.trim_end_matches(".sol"); + + // make //.sol + file_path + .parent() + .unwrap() + .join(file_name_without_ext) + .join(file_name) + } + _ => Path::new(SOL_ROOT).join(version).join(file_path), + } +} + +pub fn timestamp() -> u64 { + chrono::Utc::now().timestamp() as u64 +} + +pub fn to_human_error( + Diagnostic { + loc, + level, + ty, + message, + notes, + }: Diagnostic, +) -> String { + let level = match level { + Level::Debug => "Debug", + Level::Info => "Info", + Level::Warning => "Warning", + Level::Error => "Error", + }; + + let loc = match loc { + Loc::Builtin => "Builtin".to_string(), + Loc::CommandLine => "CommandLine".to_string(), + Loc::Implicit => "Implicit".to_string(), + Loc::Codegen => "Codegen".to_string(), + Loc::File(_, start, end) => format!("{}:{}", start, end), + }; + + let ty = match ty { + ErrorType::None => "None", + ErrorType::ParserError => "ParserError", + ErrorType::SyntaxError => "SyntaxError", + ErrorType::DeclarationError => "DeclarationError", + ErrorType::CastError => "CastError", + ErrorType::TypeError => "TypeError", + ErrorType::Warning => "Warning", + }; + + let notes = notes + .iter() + .map(|note| note.message.clone()) + .collect::>() + .join("\n"); + + format!( + "level: {}, loc: {}, ty: {}, message: {}, notes: {}\n", + level, loc, ty, message, notes + ) +} + +pub fn to_human_error_batch(diagnostics: Vec) -> String { + diagnostics + .into_iter() + .map(to_human_error) + .collect::>() + .join("\n") +} + +pub async fn clean_up(paths: Vec) { + for path in paths { + let _ = fs::remove_dir_all(path).await; + } + + let _ = fs::remove_dir_all(ZK_CACHE_ROOT).await; +} + +pub fn generate_folder_name() -> String { + let uuid = Uuid::new_v4(); + uuid.to_string() +} + +pub fn list_files_in_directory>(path: P) -> Vec { + let mut file_paths = Vec::new(); + + for entry in WalkDir::new(path) { + match entry { + Ok(entry) => { + if entry.file_type().is_file() { + file_paths.push(entry.path().display().to_string()); + } + } + Err(e) => println!("Error reading directory: {}", e), + } + } + + file_paths +} + +pub fn generate_mock_compile_request() -> CompilationRequest { + CompilationRequest { + config: CompilationConfig { + version: "1.4.1".to_string(), + user_libraries: vec![], + }, + contracts: vec![CompiledFile { + file_name: "SimpleStorage.sol".to_string(), + file_content: generate_mock_solidity_file_content(), + is_contract: false, + }], + target_path: None, + } +} + +pub fn generate_mock_solidity_file_content() -> String { + r#" + pragma solidity ^0.8.0; + + contract SimpleStorage { + uint256 storedData; + + function set(uint256 x) public { + storedData = x; + } + + function get() public view returns (uint256) { + return storedData; + } + } + "# + .to_string() +} + +pub async fn initialize_files(files: Vec, file_path: &Path) -> Result<()> { + for file in files { + let file_path_str = format!("{}/{}", file_path.to_str().unwrap(), file.file_name); + let file_path = Path::new(&file_path_str); + + // create parent directories + tokio::fs::create_dir_all(file_path.parent().unwrap()) + .await + .map_err(ApiError::FailedToWriteFile)?; + + // write file + tokio::fs::write(file_path, file.file_content.clone()) + .await + .map_err(ApiError::FailedToWriteFile)?; + } + + Ok(()) +} diff --git a/worker/src/utils/mod.rs b/worker/src/utils/mod.rs new file mode 100644 index 00000000..8bc0af56 --- /dev/null +++ b/worker/src/utils/mod.rs @@ -0,0 +1,3 @@ +pub mod cleaner; +pub mod hardhat_config; +pub mod lib; diff --git a/worker/src/worker.rs b/worker/src/worker.rs new file mode 100644 index 00000000..78de6c12 --- /dev/null +++ b/worker/src/worker.rs @@ -0,0 +1,182 @@ +use crate::dynamodb_client::DynamoDBClient; +use crossbeam_queue::ArrayQueue; +use std::fmt::{Display, Formatter}; +use std::num::NonZeroUsize; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; +use tokio::time::sleep; +use tracing::{error, info, warn}; +use uuid::Uuid; + +use crate::handlers; +use crate::handlers::types::{ApiCommand, ApiCommandResult}; +use crate::sqs_client::SqsClient; +use crate::sqs_listener::{SqsListener, SqsReceiver}; +use crate::types::SqsMessage; +use crate::utils::lib::{timestamp, DURATION_TO_PURGE}; + +pub type Timestamp = u64; +pub struct RunningWorker { + sqs_listener: SqsListener, + expiration_timestamps: Arc>, + num_workers: usize, + worker_threads: Vec>, +} + +impl RunningWorker { + pub fn new( + sqs_listener: SqsListener, + num_workers: usize, + expiration_timestamps: Arc>>, + ) -> Self { + let mut worker_threads = Vec::with_capacity(num_workers); + for _ in 0..num_workers { + // add to collection + let sqs_receiver = sqs_listener.receiver(); + let expiration_timestamps = expiration_timestamps.clone(); + worker_threads.push(tokio::spawn(async move { + RunningWorker::worker(sqs_receiver, expiration_timestamps).await; + })); + } + + Self { + sqs_listener, + expiration_timestamps, + num_workers, + worker_threads, + } + } + + async fn worker( + sqs_receiver: SqsReceiver, + expiration_timestamps: Arc>>, + ) { + // TODO: process error + while let Ok(message) = sqs_receiver.recv().await { + let body = if let Some(body) = message.body { + body + } else { + continue; + }; + + let receipt_handle = if let Some(receipt_handle) = message.receipt_handle { + receipt_handle + } else { + warn!("Has body but not handle"); + continue; + }; + + let sqs_message = match serde_json::from_str::(body) { + Ok(sqs_message) => sqs_message, + Err(err) => { + error!("Could not deserialize message: {}", err.to_string()); + let _ = sqs_receiver.delete_message(receipt_handle).await; + } + }; + + match sqs_message { + SqsMessage::Compile { id } => {} + SqsMessage::Verify { id } => {} + } + } + } +} + +pub struct WorkerEngine { + sqs_client: SqsClient, + db_client: DynamoDBClient, + expiration_timestamps: Arc>>, + is_supervisor_enabled: Arc, + running_workers: Vec, + supervisor_thread: Arc>>, +} + +impl WorkerEngine { + pub fn new(sqs_client: SqsClient, db_client: DynamoDBClient, supervisor_enabled: bool) -> Self { + let is_supervisor_enabled = Arc::new(AtomicBool::new(supervisor_enabled)); + let expiration_timestamps = Arc::new(Mutex::new(vec![])); + + WorkerEngine { + sqs_client, + db_client, + supervisor_thread: Arc::new(None), + expiration_timestamps, + running_workers: vec![], + is_supervisor_enabled, + } + } + + pub fn start(&mut self, num_workers: NonZeroUsize) { + let sqs_listener = SqsListener::new(self.sqs_client.clone(), Duration::from_millis(500)); + self.running_workers.push(RunningWorker::new( + sqs_listener, + num_workers.get(), + self.expiration_timestamps.clone(), + )); + + if self.is_supervisor_enabled.load(Ordering::Acquire) && self.supervisor_thread.is_none() { + let expiration_timestamps = self.expiration_timestamps.clone(); + + self.supervisor_thread = Arc::new(Some(tokio::spawn(async move { + WorkerEngine::supervisor(self.db_client.clone(), expiration_timestamps).await; + }))); + } + } + + // pub async fn enable_supervisor_thread(&mut self) { + // if self.supervisor_thread.is_some() { + // return; + // } + // + // self.is_supervisor_enabled.store(true, Ordering::Release); + // let expiration_timestamps = self.expiration_timestamps.clone(); + // + // self.supervisor_thread = Arc::new(Some(tokio::spawn(async move { + // WorkerEngine::supervisor(self.db_client.clone(), expiration_timestamps).await; + // }))); + // } + + pub async fn supervisor( + db_client: DynamoDBClient, + expiration_timestamps: Arc>>, + ) { + loop { + let now = timestamp(); + + let to_delete = { + let mut to_delete = vec![]; + let mut expiration_timestamps = expiration_timestamps.lock().await; + expiration_timestamps.retain(|&(uuid, expiration)| { + if expiration < now { + to_delete.push(uuid); + false + } else { + true + } + }); + + to_delete + }; + + for uuid in to_delete { + db_client.delete_item(uuid.to_string()).await; + } + + sleep(Duration::from_millis(2000)).await; + } + } + + // pub async fn disable_supervisor_thread(&mut self) { + // let mut is_enabled = self.is_supervisor_enabled.lock().await; + // *is_enabled = false; + // + // if let Ok(Some(join_handle)) = Arc::try_unwrap(self.supervisor_thread.clone()) { + // let _ = join_handle.await; + // } + // + // self.supervisor_thread = Arc::new(None); + // } +} From 6da3ebfd75ff53a9744910dc19bde4baf706ac95 Mon Sep 17 00:00:00 2001 From: taco-paco Date: Thu, 22 Aug 2024 19:18:21 +0700 Subject: [PATCH 03/21] compiles --- worker/Cargo.toml | 2 + worker/src/commands/compile.rs | 390 +++++++++++++++++------------ worker/src/commands/verify.rs | 366 +++++++++++++-------------- worker/src/dynamodb_client.rs | 12 +- worker/src/types.rs | 40 +-- worker/src/utils/cleaner.rs | 1 - worker/src/utils/hardhat_config.rs | 1 - worker/src/utils/lib.rs | 261 ++++++++----------- worker/src/worker.rs | 14 +- 9 files changed, 559 insertions(+), 528 deletions(-) diff --git a/worker/Cargo.toml b/worker/Cargo.toml index e1f2d66e..ec22e972 100644 --- a/worker/Cargo.toml +++ b/worker/Cargo.toml @@ -16,5 +16,7 @@ crossbeam-queue = "0.3.11" serde = { version = "1.0.152", features = ["derive"] } serde_json = "1.0.124" uuid = { version = "1.10.0", features = ["serde", "v4"] } +thiserror = "1.0.63" tracing = { version = "0.1.40", features = ["log"] } tracing-subscriber = { version = "0.3.18", default-features = false, features = ["fmt", "ansi"] } +log = "0.4.22" diff --git a/worker/src/commands/compile.rs b/worker/src/commands/compile.rs index 5c2bedaa..1c96830f 100644 --- a/worker/src/commands/compile.rs +++ b/worker/src/commands/compile.rs @@ -1,170 +1,252 @@ +use aws_sdk_s3::types::Object; +use std::path::Path; +use std::process::Stdio; +use tracing::{instrument, warn}; +use tracing::{error, info}; +use uuid::Uuid; +use std::io::BufWriter; +use std::io::{Write, IoSlice}; +use std::ops::Add; +use tokio::io::AsyncWriteExt; + use crate::dynamodb_client::DynamoDBClient; -use crate::errors::{ApiError, Result}; -use crate::handlers::process::{do_process_command, fetch_process_result}; -use crate::handlers::types::{ - ApiCommand, ApiCommandResult, CompilationRequest, CompileResponse, CompiledFile, -}; -use crate::handlers::SPAWN_SEMAPHORE; -use crate::rate_limiter::RateLimited; use crate::utils::cleaner::AutoCleanUp; use crate::utils::hardhat_config::HardhatConfigBuilder; use crate::utils::lib::{ - generate_folder_name, initialize_files, list_files_in_directory, status_code_to_message, + generate_folder_name, status_code_to_message, DEFAULT_SOLIDITY_VERSION, SOL_ROOT, ZKSOLC_VERSIONS, }; -use crate::worker::WorkerEngine; -use rocket::serde::json; -use rocket::serde::json::Json; -use rocket::{tokio, State}; -use std::path::Path; -use std::process::Stdio; -use tracing::instrument; -use tracing::{error, info}; -use uuid::Uuid; -pub async fn do_compile( - id: Uuid, - db_client: DynamoDBClient, - s3_client: aws, - compilation_request: CompilationRequest, -) -> Result> { - let zksolc_version = compilation_request.config.version; - - // check if the version is supported - if !ZKSOLC_VERSIONS.contains(&zksolc_version.as_str()) { - return Err(ApiError::VersionNotSupported(zksolc_version)); - } - - if compilation_request.contracts.is_empty() { - return Ok(Json(CompileResponse { - file_content: vec![], - status: status_code_to_message(Some(0)), - message: "Nothing to compile".into(), - })); +async fn list_all_keys( + client: &aws_sdk_s3::Client, + id: String, + bucket: &str, +) -> Result, ()> { + // TODO: errors + let mut objects = Vec::new(); + let mut continuation_token: Option = None; + + let id = id.clone().add("/"); + loop { + let mut request = client + .list_objects_v2() + .bucket(bucket) + .delimiter('/') + .prefix(id.clone()); + if let Some(token) = continuation_token { + request = request.continuation_token(token); + } + + let response = request.send().await.map_err(|_| ())?; + if let Some(contents) = response.contents { + objects.extend(contents); + } + + let is_truncated = if let Some(is_truncated) = response.is_truncated { + is_truncated + } else { + warn!("is_truncated empty"); + break; + }; + + if !is_truncated { + break; + } + + continuation_token = response.next_continuation_token; + if continuation_token.is_none() { + error!("continuation_token wasn't set!"); + break; + } } - let namespace = generate_folder_name(); - - // root directory for the contracts - let workspace_path_str = format!("{}/{}", SOL_ROOT, namespace); - let workspace_path = Path::new(&workspace_path_str); - - // root directory for the artifacts - let artifacts_path_str = format!("{}/{}", workspace_path_str, "artifacts-zk"); - let artifacts_path = Path::new(&artifacts_path_str); - - // root directory for user files (hardhat config, etc) - let user_files_path_str = workspace_path_str.clone(); - let hardhat_config_path = Path::new(&user_files_path_str).join("hardhat.config.ts"); - - // instantly create the directories - tokio::fs::create_dir_all(workspace_path) - .await - .map_err(ApiError::FailedToWriteFile)?; - tokio::fs::create_dir_all(artifacts_path) - .await - .map_err(ApiError::FailedToWriteFile)?; + Ok(objects) +} - // when the compilation is done, clean up the directories - // it will be called when the AutoCleanUp struct is dropped - let auto_clean_up = AutoCleanUp { - dirs: vec![workspace_path.to_str().unwrap()], +pub async fn compile( + id: Uuid, + db_client: DynamoDBClient, + s3_client: aws_sdk_s3::Client, +) -> Result<(), ()> { + // TODO: errors + let item = db_client.get_item(id.to_string()).await.unwrap(); + let item = match item { + Some(item) => item, + None => { + error!("No item id: {}", id); + return Err(()); + } }; - // write the hardhat config file - let mut hardhat_config_builder = HardhatConfigBuilder::new(); - hardhat_config_builder - .zksolc_version(&zksolc_version) - .solidity_version(DEFAULT_SOLIDITY_VERSION); - if let Some(target_path) = compilation_request.target_path { - hardhat_config_builder.paths_sources(&target_path); - } - - let hardhat_config_content = hardhat_config_builder.build().to_string_config(); - - // create parent directories - tokio::fs::create_dir_all(hardhat_config_path.parent().unwrap()) - .await - .map_err(ApiError::FailedToWriteFile)?; - - tokio::fs::write(hardhat_config_path, hardhat_config_content) - .await - .map_err(ApiError::FailedToWriteFile)?; - - // filter test files from compilation candidates - let contracts = compilation_request - .contracts - .into_iter() - .filter(|contract| !contract.file_name.ends_with("_test.sol")) - .collect(); - - // initialize the files - initialize_files(contracts, workspace_path).await?; - - // Limit number of spawned processes. RAII released - let _permit = SPAWN_SEMAPHORE.acquire().await.expect("Expired semaphore"); - - let command = tokio::process::Command::new("npx") - .arg("hardhat") - .arg("compile") - .current_dir(workspace_path) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .spawn(); - let process = command.map_err(ApiError::FailedToExecuteCommand)?; - let output = process - .wait_with_output() - .await - .map_err(ApiError::FailedToReadOutput)?; - - let status = output.status; - let message = String::from_utf8_lossy(&output.stdout).to_string(); - - info!("Output: \n{:?}", String::from_utf8_lossy(&output.stdout)); - if !status.success() { - error!( - "Compilation error: {}", - String::from_utf8_lossy(&output.stderr) - ); - return Ok(Json(CompileResponse { - file_content: vec![], - message: format!( - "Failed to compile:\n{}", - String::from_utf8_lossy(&output.stderr) - ), - status: "Error".to_string(), - })); - } - - // fetch the files in the artifacts directory - let mut file_contents: Vec = vec![]; - let file_paths = list_files_in_directory(artifacts_path); - - for file_path in file_paths.iter() { - let file_content = tokio::fs::read_to_string(file_path) + let objects = list_all_keys(&s3_client, id.to_string(), "TODO").await?; + for object in objects { + let key = object.key().ok_or(())?; + let mut object = s3_client + .get_object() + .bucket("TODO") + .key(key) + .send() .await - .map_err(ApiError::FailedToReadFile)?; - let full_path = Path::new(file_path); - let relative_path = full_path.strip_prefix(artifacts_path).unwrap_or(full_path); - let relative_path_str = relative_path.to_str().unwrap(); - - // todo(varex83): is it the best way to check? - let is_contract = - !relative_path_str.ends_with(".dbg.json") && relative_path_str.ends_with(".json"); - - file_contents.push(CompiledFile { - file_name: relative_path_str.to_string(), - file_content, - is_contract, - }); + .map_err(|_| ())?; + + let mut byte_count = 0_usize; + let mut contents = Vec::new(); + while let Some(bytes) = object.body.try_next().await.map_err(|_| ())? { + let bytes_len = bytes.len(); + std::io::Write::write_all(&mut contents, &bytes).map_err(|_| ()); + byte_count += bytes_len; + } } - // calling here explicitly to avoid dropping the AutoCleanUp struct - auto_clean_up.clean_up().await; - - Ok(Json(CompileResponse { - file_content: file_contents, - status: status_code_to_message(status.code()), - message, - })) + // TODO: + Ok(()) } +// +// pub async fn do_compile( +// id: Uuid, +// db_client: DynamoDBClient, +// s3_client: aws_sdk_s3::Client, +// compilation_request: CompilationRequest, +// ) -> Result> { +// // TODO: errors +// +// let zksolc_version = compilation_request.config.version; +// +// // check if the version is supported +// if !ZKSOLC_VERSIONS.contains(&zksolc_version.as_str()) { +// return Err(ApiError::VersionNotSupported(zksolc_version)); +// } +// +// if compilation_request.contracts.is_empty() { +// return Ok(Json(CompileResponse { +// file_content: vec![], +// status: status_code_to_message(Some(0)), +// message: "Nothing to compile".into(), +// })); +// } +// +// let namespace = generate_folder_name(); +// +// // root directory for the contracts +// let workspace_path_str = format!("{}/{}", SOL_ROOT, namespace); +// let workspace_path = Path::new(&workspace_path_str); +// +// // root directory for the artifacts +// let artifacts_path_str = format!("{}/{}", workspace_path_str, "artifacts-zk"); +// let artifacts_path = Path::new(&artifacts_path_str); +// +// // root directory for user files (hardhat config, etc) +// let user_files_path_str = workspace_path_str.clone(); +// let hardhat_config_path = Path::new(&user_files_path_str).join("hardhat.config.ts"); +// +// // instantly create the directories +// tokio::fs::create_dir_all(workspace_path) +// .await +// .map_err(ApiError::FailedToWriteFile)?; +// tokio::fs::create_dir_all(artifacts_path) +// .await +// .map_err(ApiError::FailedToWriteFile)?; +// +// // when the compilation is done, clean up the directories +// // it will be called when the AutoCleanUp struct is dropped +// let auto_clean_up = AutoCleanUp { +// dirs: vec![workspace_path.to_str().unwrap()], +// }; +// +// // write the hardhat config file +// let mut hardhat_config_builder = HardhatConfigBuilder::new(); +// hardhat_config_builder +// .zksolc_version(&zksolc_version) +// .solidity_version(DEFAULT_SOLIDITY_VERSION); +// if let Some(target_path) = compilation_request.target_path { +// hardhat_config_builder.paths_sources(&target_path); +// } +// +// let hardhat_config_content = hardhat_config_builder.build().to_string_config(); +// +// // create parent directories +// tokio::fs::create_dir_all(hardhat_config_path.parent().unwrap()) +// .await +// .map_err(ApiError::FailedToWriteFile)?; +// +// tokio::fs::write(hardhat_config_path, hardhat_config_content) +// .await +// .map_err(ApiError::FailedToWriteFile)?; +// +// // filter test files from compilation candidates +// let contracts = compilation_request +// .contracts +// .into_iter() +// .filter(|contract| !contract.file_name.ends_with("_test.sol")) +// .collect(); +// +// // initialize the files +// initialize_files(contracts, workspace_path).await?; +// +// // Limit number of spawned processes. RAII released +// let _permit = SPAWN_SEMAPHORE.acquire().await.expect("Expired semaphore"); +// +// let command = tokio::process::Command::new("npx") +// .arg("hardhat") +// .arg("compile") +// .current_dir(workspace_path) +// .stdout(Stdio::piped()) +// .stderr(Stdio::piped()) +// .spawn(); +// let process = command.map_err(ApiError::FailedToExecuteCommand)?; +// let output = process +// .wait_with_output() +// .await +// .map_err(ApiError::FailedToReadOutput)?; +// +// let status = output.status; +// let message = String::from_utf8_lossy(&output.stdout).to_string(); +// +// info!("Output: \n{:?}", String::from_utf8_lossy(&output.stdout)); +// if !status.success() { +// error!( +// "Compilation error: {}", +// String::from_utf8_lossy(&output.stderr) +// ); +// return Ok(Json(CompileResponse { +// file_content: vec![], +// message: format!( +// "Failed to compile:\n{}", +// String::from_utf8_lossy(&output.stderr) +// ), +// status: "Error".to_string(), +// })); +// } +// +// // fetch the files in the artifacts directory +// let mut file_contents: Vec = vec![]; +// let file_paths = list_files_in_directory(artifacts_path); +// +// for file_path in file_paths.iter() { +// let file_content = tokio::fs::read_to_string(file_path) +// .await +// .map_err(ApiError::FailedToReadFile)?; +// let full_path = Path::new(file_path); +// let relative_path = full_path.strip_prefix(artifacts_path).unwrap_or(full_path); +// let relative_path_str = relative_path.to_str().unwrap(); +// +// // todo(varex83): is it the best way to check? +// let is_contract = +// !relative_path_str.ends_with(".dbg.json") && relative_path_str.ends_with(".json"); +// +// file_contents.push(CompiledFile { +// file_name: relative_path_str.to_string(), +// file_content, +// is_contract, +// }); +// } +// +// // calling here explicitly to avoid dropping the AutoCleanUp struct +// auto_clean_up.clean_up().await; +// +// Ok(Json(CompileResponse { +// file_content: file_contents, +// status: status_code_to_message(status.code()), +// message, +// })) +// } diff --git a/worker/src/commands/verify.rs b/worker/src/commands/verify.rs index 85642f4c..a479eebd 100644 --- a/worker/src/commands/verify.rs +++ b/worker/src/commands/verify.rs @@ -1,183 +1,183 @@ -use crate::errors::{ApiError, Result}; -use crate::handlers::process::{do_process_command, fetch_process_result}; -use crate::handlers::types::{ApiCommand, ApiCommandResult, VerificationRequest, VerifyResponse}; -use crate::handlers::SPAWN_SEMAPHORE; -use crate::rate_limiter::RateLimited; -use crate::utils::cleaner::AutoCleanUp; -use crate::utils::hardhat_config::HardhatConfigBuilder; -use crate::utils::lib::{ - generate_folder_name, initialize_files, ALLOWED_NETWORKS, DEFAULT_SOLIDITY_VERSION, SOL_ROOT, - ZKSOLC_VERSIONS, -}; -use crate::worker::WorkerEngine; -use rocket::serde::{json, json::Json}; -use rocket::{tokio, State}; -use std::path::Path; -use std::process::Stdio; -use tracing::info; -use tracing::instrument; - -#[instrument] -#[post("/verify", format = "json", data = "")] -pub async fn verify( - verification_request_json: Json, - _rate_limited: RateLimited, -) -> Json { - info!("/verify"); - - do_verify(verification_request_json.0) - .await - .unwrap_or_else(|e| { - Json(VerifyResponse { - message: e.to_string(), - status: "Error".to_string(), - }) - }) -} - -#[instrument] -#[post("/verify-async", format = "json", data = "")] -pub fn verify_async( - verification_request_json: Json, - _rate_limited: RateLimited, - engine: &State, -) -> String { - info!("/verify-async",); - - do_process_command(ApiCommand::Verify(verification_request_json.0), engine) -} - -#[instrument] -#[get("/verify-result/")] -pub async fn get_verify_result(process_id: String, engine: &State) -> String { - info!("/verify-result/{:?}", process_id); - - fetch_process_result(process_id, engine, |result| match result { - ApiCommandResult::Verify(verification_result) => { - json::to_string(&verification_result).unwrap_or_default() - } - _ => String::from("Result not available"), - }) -} - -fn extract_verify_args(request: &VerificationRequest) -> Vec { - let mut args: Vec = vec!["hardhat".into(), "verify".into(), "--network".into()]; - if request.config.network == "sepolia" { - args.push("zkSyncTestnet".into()) - } else { - args.push("zkSyncMainnet".into()) - } - - if let Some(ref target_contract) = request.target_contract { - args.push("--contract".into()); - args.push(target_contract.clone()); - } - - args.push(request.config.contract_address.clone()); - args.extend(request.config.inputs.clone()); - - args -} - -pub async fn do_verify(verification_request: VerificationRequest) -> Result> { - let zksolc_version = verification_request.config.zksolc_version.clone(); - - // check if the version is supported - if !ZKSOLC_VERSIONS.contains(&zksolc_version.as_str()) { - return Err(ApiError::VersionNotSupported(zksolc_version)); - } - - let solc_version = verification_request - .config - .solc_version - .clone() - .unwrap_or(DEFAULT_SOLIDITY_VERSION.to_string()); - - let network = verification_request.config.network.clone(); - - // check if the network is supported - if !ALLOWED_NETWORKS.contains(&network.as_str()) { - return Err(ApiError::UnknownNetwork(network)); - } - - let namespace = generate_folder_name(); - - // root directory for the contracts - let workspace_path_str = format!("{}/{}", SOL_ROOT, namespace); - let workspace_path = Path::new(&workspace_path_str); - - // root directory for the artifacts - let artifacts_path_str = format!("{}/{}", workspace_path_str, "artifacts-zk"); - let artifacts_path = Path::new(&artifacts_path_str); - - // root directory for user files (hardhat config, etc) - let user_files_path_str = workspace_path_str.clone(); - let hardhat_config_path = Path::new(&user_files_path_str).join("hardhat.config.ts"); - - // instantly create the directories - tokio::fs::create_dir_all(workspace_path) - .await - .map_err(ApiError::FailedToWriteFile)?; - tokio::fs::create_dir_all(artifacts_path) - .await - .map_err(ApiError::FailedToWriteFile)?; - - // when the compilation is done, clean up the directories - // it will be called when the AutoCleanUp struct is dropped - let auto_clean_up = AutoCleanUp { - dirs: vec![workspace_path.to_str().unwrap()], - }; - - // write the hardhat config file - let hardhat_config_content = HardhatConfigBuilder::new() - .zksolc_version(&zksolc_version) - .solidity_version(&solc_version) - .build() - .to_string_config(); - - // create parent directories - tokio::fs::create_dir_all(hardhat_config_path.parent().unwrap()) - .await - .map_err(ApiError::FailedToWriteFile)?; - - tokio::fs::write(hardhat_config_path, hardhat_config_content) - .await - .map_err(ApiError::FailedToWriteFile)?; - - // initialize the files - initialize_files(verification_request.contracts.clone(), workspace_path).await?; - - // Limit number of spawned processes. RAII released - let _permit = SPAWN_SEMAPHORE.acquire().await.expect("Expired semaphore"); - - let args = extract_verify_args(&verification_request); - let command = tokio::process::Command::new("npx") - .args(args) - .current_dir(workspace_path) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .spawn(); - - let process = command.map_err(ApiError::FailedToExecuteCommand)?; - let output = process - .wait_with_output() - .await - .map_err(ApiError::FailedToReadOutput)?; - let status = output.status; - let message = String::from_utf8_lossy(&output.stdout).to_string(); - - // calling here explicitly to avoid dropping the AutoCleanUp struct - auto_clean_up.clean_up().await; - - if !status.success() { - return Ok(Json(VerifyResponse { - status: "Error".to_string(), - message: String::from_utf8_lossy(&output.stderr).to_string(), - })); - } - - Ok(Json(VerifyResponse { - status: "Success".to_string(), - message, - })) -} +// use crate::errors::{ApiError, Result}; +// use crate::handlers::process::{do_process_command, fetch_process_result}; +// use crate::handlers::types::{ApiCommand, ApiCommandResult, VerificationRequest, VerifyResponse}; +// use crate::handlers::SPAWN_SEMAPHORE; +// use crate::rate_limiter::RateLimited; +// use crate::utils::cleaner::AutoCleanUp; +// use crate::utils::hardhat_config::HardhatConfigBuilder; +// use crate::utils::lib::{ +// generate_folder_name, initialize_files, ALLOWED_NETWORKS, DEFAULT_SOLIDITY_VERSION, SOL_ROOT, +// ZKSOLC_VERSIONS, +// }; +// use crate::worker::WorkerEngine; +// use rocket::serde::{json, json::Json}; +// use rocket::{tokio, State}; +// use std::path::Path; +// use std::process::Stdio; +// use tracing::info; +// use tracing::instrument; +// +// #[instrument] +// #[post("/verify", format = "json", data = "")] +// pub async fn verify( +// verification_request_json: Json, +// _rate_limited: RateLimited, +// ) -> Json { +// info!("/verify"); +// +// do_verify(verification_request_json.0) +// .await +// .unwrap_or_else(|e| { +// Json(VerifyResponse { +// message: e.to_string(), +// status: "Error".to_string(), +// }) +// }) +// } +// +// #[instrument] +// #[post("/verify-async", format = "json", data = "")] +// pub fn verify_async( +// verification_request_json: Json, +// _rate_limited: RateLimited, +// engine: &State, +// ) -> String { +// info!("/verify-async",); +// +// do_process_command(ApiCommand::Verify(verification_request_json.0), engine) +// } +// +// #[instrument] +// #[get("/verify-result/")] +// pub async fn get_verify_result(process_id: String, engine: &State) -> String { +// info!("/verify-result/{:?}", process_id); +// +// fetch_process_result(process_id, engine, |result| match result { +// ApiCommandResult::Verify(verification_result) => { +// json::to_string(&verification_result).unwrap_or_default() +// } +// _ => String::from("Result not available"), +// }) +// } +// +// fn extract_verify_args(request: &VerificationRequest) -> Vec { +// let mut args: Vec = vec!["hardhat".into(), "verify".into(), "--network".into()]; +// if request.config.network == "sepolia" { +// args.push("zkSyncTestnet".into()) +// } else { +// args.push("zkSyncMainnet".into()) +// } +// +// if let Some(ref target_contract) = request.target_contract { +// args.push("--contract".into()); +// args.push(target_contract.clone()); +// } +// +// args.push(request.config.contract_address.clone()); +// args.extend(request.config.inputs.clone()); +// +// args +// } +// +// pub async fn do_verify(verification_request: VerificationRequest) -> Result> { +// let zksolc_version = verification_request.config.zksolc_version.clone(); +// +// // check if the version is supported +// if !ZKSOLC_VERSIONS.contains(&zksolc_version.as_str()) { +// return Err(ApiError::VersionNotSupported(zksolc_version)); +// } +// +// let solc_version = verification_request +// .config +// .solc_version +// .clone() +// .unwrap_or(DEFAULT_SOLIDITY_VERSION.to_string()); +// +// let network = verification_request.config.network.clone(); +// +// // check if the network is supported +// if !ALLOWED_NETWORKS.contains(&network.as_str()) { +// return Err(ApiError::UnknownNetwork(network)); +// } +// +// let namespace = generate_folder_name(); +// +// // root directory for the contracts +// let workspace_path_str = format!("{}/{}", SOL_ROOT, namespace); +// let workspace_path = Path::new(&workspace_path_str); +// +// // root directory for the artifacts +// let artifacts_path_str = format!("{}/{}", workspace_path_str, "artifacts-zk"); +// let artifacts_path = Path::new(&artifacts_path_str); +// +// // root directory for user files (hardhat config, etc) +// let user_files_path_str = workspace_path_str.clone(); +// let hardhat_config_path = Path::new(&user_files_path_str).join("hardhat.config.ts"); +// +// // instantly create the directories +// tokio::fs::create_dir_all(workspace_path) +// .await +// .map_err(ApiError::FailedToWriteFile)?; +// tokio::fs::create_dir_all(artifacts_path) +// .await +// .map_err(ApiError::FailedToWriteFile)?; +// +// // when the compilation is done, clean up the directories +// // it will be called when the AutoCleanUp struct is dropped +// let auto_clean_up = AutoCleanUp { +// dirs: vec![workspace_path.to_str().unwrap()], +// }; +// +// // write the hardhat config file +// let hardhat_config_content = HardhatConfigBuilder::new() +// .zksolc_version(&zksolc_version) +// .solidity_version(&solc_version) +// .build() +// .to_string_config(); +// +// // create parent directories +// tokio::fs::create_dir_all(hardhat_config_path.parent().unwrap()) +// .await +// .map_err(ApiError::FailedToWriteFile)?; +// +// tokio::fs::write(hardhat_config_path, hardhat_config_content) +// .await +// .map_err(ApiError::FailedToWriteFile)?; +// +// // initialize the files +// initialize_files(verification_request.contracts.clone(), workspace_path).await?; +// +// // Limit number of spawned processes. RAII released +// let _permit = SPAWN_SEMAPHORE.acquire().await.expect("Expired semaphore"); +// +// let args = extract_verify_args(&verification_request); +// let command = tokio::process::Command::new("npx") +// .args(args) +// .current_dir(workspace_path) +// .stdout(Stdio::piped()) +// .stderr(Stdio::piped()) +// .spawn(); +// +// let process = command.map_err(ApiError::FailedToExecuteCommand)?; +// let output = process +// .wait_with_output() +// .await +// .map_err(ApiError::FailedToReadOutput)?; +// let status = output.status; +// let message = String::from_utf8_lossy(&output.stdout).to_string(); +// +// // calling here explicitly to avoid dropping the AutoCleanUp struct +// auto_clean_up.clean_up().await; +// +// if !status.success() { +// return Ok(Json(VerifyResponse { +// status: "Error".to_string(), +// message: String::from_utf8_lossy(&output.stderr).to_string(), +// })); +// } +// +// Ok(Json(VerifyResponse { +// status: "Success".to_string(), +// message, +// })) +// } diff --git a/worker/src/dynamodb_client.rs b/worker/src/dynamodb_client.rs index 15da030c..d165c980 100644 --- a/worker/src/dynamodb_client.rs +++ b/worker/src/dynamodb_client.rs @@ -20,13 +20,21 @@ impl DynamoDBClient { // TODO: } - pub async fn get_item(&self, id: String) -> Item { + // TODO: remove unwraps + pub async fn get_item(&self, id: String) -> Result, ()> { let result = self .client .get_item() .table_name(self.table_name.clone()) .key("ID", AttributeValue::S(id)) .send() - .await?; + .await + .unwrap(); + + if let Some(item) = result.item { + Ok(Some(item.try_into().unwrap())) + } else { + Ok(None) + } } } diff --git a/worker/src/types.rs b/worker/src/types.rs index aad8c479..efbbeb51 100644 --- a/worker/src/types.rs +++ b/worker/src/types.rs @@ -67,27 +67,27 @@ impl From for HashMap { } impl TryFrom<&HashMap> for Status { - // TODO: + // TODO: error type Error = (); fn try_from(value: &HashMap) -> Result { let status = value.get("Status").ok_or(())?; - let status: u32 = status.as_n().map_err(())?.parse::().map_err(())?; + let status: u32 = status.as_n().map_err(|_| ())?.parse::().map_err(|_| ())?; let status = match status { 0 => Status::Pending, 1 => Status::Compiling, 2 => { let data = value.get("Data").ok_or(())?; - let data = data.as_s().map_err(())?; + let data = data.as_s().map_err(|_|())?; Status::Ready(data.clone()) } 3 => { let data = value.get("Data").ok_or(())?; - let data = data.as_s().map_err(())?; + let data = data.as_s().map_err(|_|())?; Status::Failed(data.clone()) } - _ => return Err(()) + _ => return Err(()), }; Ok(status) @@ -95,38 +95,16 @@ impl TryFrom<&HashMap> for Status { } impl TryFrom> for Item { - // TODO; + // TODO: error type Error = (); fn try_from(value: HashMap) -> Result { let id = value.get("ID").ok_or(())?; - let id = id.as_s().map_err(())?; - - let status = value.get("Status").ok_or(())?; - let status: u32 = status.as_n().map_err(())?.parse::().map_err(())?; - let status= status.try_into()?; - - // TODO: move to TryFrom - // let status = match status { - // 0 => Status::Pending, - // 1 => Status::Compiling, - // 2 => { - // let data = value.get("Data").ok_or(())?; - // let data = data.as_s().map_err(())?; - // - // Status::Ready(data.clone()) - // } - // 3 => { - // let data = value.get("Data").ok_or(())?; - // let data = data.as_s().map_err(())?; - // - // Status::Failed(data.clone()) - // } - // _ => return Err(()) - // }; + let id = id.as_s().map_err(|_| ())?; + let status = (&value).try_into()?; Ok(Item { id: id.clone(), - status + status, }) } } diff --git a/worker/src/utils/cleaner.rs b/worker/src/utils/cleaner.rs index e087bbbe..b3e9e0bc 100644 --- a/worker/src/utils/cleaner.rs +++ b/worker/src/utils/cleaner.rs @@ -1,4 +1,3 @@ -use rocket::tokio; use std::path::Path; pub struct AutoCleanUp<'a> { diff --git a/worker/src/utils/hardhat_config.rs b/worker/src/utils/hardhat_config.rs index 2a191bbe..a63f0916 100644 --- a/worker/src/utils/hardhat_config.rs +++ b/worker/src/utils/hardhat_config.rs @@ -1,5 +1,4 @@ use crate::utils::lib::{DEFAULT_SOLIDITY_VERSION, DEFAULT_ZKSOLC_VERSION}; -use rocket::serde::json::serde_json; use std::fmt::Formatter; const DEFAULT_CONTRACTS_LOCATION: &str = "./contracts"; diff --git a/worker/src/utils/lib.rs b/worker/src/utils/lib.rs index 7008c8cc..f4f0e018 100644 --- a/worker/src/utils/lib.rs +++ b/worker/src/utils/lib.rs @@ -1,12 +1,6 @@ -use crate::errors::{ApiError, Result}; -use crate::handlers::types::{CompilationConfig, CompilationRequest, CompiledFile}; -use rocket::tokio; -use rocket::tokio::fs; -use solang_parser::diagnostics::{Diagnostic, ErrorType, Level}; -use solang_parser::pt::Loc; use std::path::{Path, PathBuf}; use uuid::Uuid; -use walkdir::WalkDir; +use log::debug; pub const SOL_ROOT: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/", "hardhat_env/workspaces/"); pub const ZK_CACHE_ROOT: &str = concat!( @@ -43,39 +37,6 @@ pub fn get_file_ext(file_path: &str) -> String { } } } - -pub fn check_file_ext(file_path: &str, ext: &str) -> Result<()> { - let actual_ext = get_file_ext(file_path); - if actual_ext == *ext { - Ok(()) - } else { - Err(ApiError::FileExtensionNotSupported(actual_ext)) - } -} - -pub fn path_buf_to_string(path_buf: PathBuf) -> Result { - path_buf - .to_str() - .ok_or(ApiError::FailedToParseString) - .map(|s| s.to_string()) -} - -pub async fn init_parent_directories(file_path: PathBuf) { - match file_path.parent() { - Some(parent) => match fs::create_dir_all(parent).await { - Ok(_) => { - debug!("LOG: Created directory: {:?}", parent); - } - Err(e) => { - debug!("LOG: Error creating directory: {:?}", e); - } - }, - None => { - debug!("LOG: Error creating directory"); - } - } -} - pub fn status_code_to_message(status: Option) -> String { match status { Some(0) => "Success", @@ -110,104 +71,104 @@ pub fn timestamp() -> u64 { chrono::Utc::now().timestamp() as u64 } -pub fn to_human_error( - Diagnostic { - loc, - level, - ty, - message, - notes, - }: Diagnostic, -) -> String { - let level = match level { - Level::Debug => "Debug", - Level::Info => "Info", - Level::Warning => "Warning", - Level::Error => "Error", - }; - - let loc = match loc { - Loc::Builtin => "Builtin".to_string(), - Loc::CommandLine => "CommandLine".to_string(), - Loc::Implicit => "Implicit".to_string(), - Loc::Codegen => "Codegen".to_string(), - Loc::File(_, start, end) => format!("{}:{}", start, end), - }; - - let ty = match ty { - ErrorType::None => "None", - ErrorType::ParserError => "ParserError", - ErrorType::SyntaxError => "SyntaxError", - ErrorType::DeclarationError => "DeclarationError", - ErrorType::CastError => "CastError", - ErrorType::TypeError => "TypeError", - ErrorType::Warning => "Warning", - }; - - let notes = notes - .iter() - .map(|note| note.message.clone()) - .collect::>() - .join("\n"); - - format!( - "level: {}, loc: {}, ty: {}, message: {}, notes: {}\n", - level, loc, ty, message, notes - ) -} - -pub fn to_human_error_batch(diagnostics: Vec) -> String { - diagnostics - .into_iter() - .map(to_human_error) - .collect::>() - .join("\n") -} - -pub async fn clean_up(paths: Vec) { - for path in paths { - let _ = fs::remove_dir_all(path).await; - } - - let _ = fs::remove_dir_all(ZK_CACHE_ROOT).await; -} +// pub fn to_human_error( +// Diagnostic { +// loc, +// level, +// ty, +// message, +// notes, +// }: Diagnostic, +// ) -> String { +// let level = match level { +// Level::Debug => "Debug", +// Level::Info => "Info", +// Level::Warning => "Warning", +// Level::Error => "Error", +// }; +// +// let loc = match loc { +// Loc::Builtin => "Builtin".to_string(), +// Loc::CommandLine => "CommandLine".to_string(), +// Loc::Implicit => "Implicit".to_string(), +// Loc::Codegen => "Codegen".to_string(), +// Loc::File(_, start, end) => format!("{}:{}", start, end), +// }; +// +// let ty = match ty { +// ErrorType::None => "None", +// ErrorType::ParserError => "ParserError", +// ErrorType::SyntaxError => "SyntaxError", +// ErrorType::DeclarationError => "DeclarationError", +// ErrorType::CastError => "CastError", +// ErrorType::TypeError => "TypeError", +// ErrorType::Warning => "Warning", +// }; +// +// let notes = notes +// .iter() +// .map(|note| note.message.clone()) +// .collect::>() +// .join("\n"); +// +// format!( +// "level: {}, loc: {}, ty: {}, message: {}, notes: {}\n", +// level, loc, ty, message, notes +// ) +// } + +// pub fn to_human_error_batch(diagnostics: Vec) -> String { +// diagnostics +// .into_iter() +// .map(to_human_error) +// .collect::>() +// .join("\n") +// } + +// pub async fn clean_up(paths: Vec) { +// for path in paths { +// let _ = fs::remove_dir_all(path).await; +// } +// +// let _ = fs::remove_dir_all(ZK_CACHE_ROOT).await; +// } pub fn generate_folder_name() -> String { let uuid = Uuid::new_v4(); uuid.to_string() } -pub fn list_files_in_directory>(path: P) -> Vec { - let mut file_paths = Vec::new(); - - for entry in WalkDir::new(path) { - match entry { - Ok(entry) => { - if entry.file_type().is_file() { - file_paths.push(entry.path().display().to_string()); - } - } - Err(e) => println!("Error reading directory: {}", e), - } - } - - file_paths -} - -pub fn generate_mock_compile_request() -> CompilationRequest { - CompilationRequest { - config: CompilationConfig { - version: "1.4.1".to_string(), - user_libraries: vec![], - }, - contracts: vec![CompiledFile { - file_name: "SimpleStorage.sol".to_string(), - file_content: generate_mock_solidity_file_content(), - is_contract: false, - }], - target_path: None, - } -} +// pub fn list_files_in_directory>(path: P) -> Vec { +// let mut file_paths = Vec::new(); +// +// for entry in WalkDir::new(path) { +// match entry { +// Ok(entry) => { +// if entry.file_type().is_file() { +// file_paths.push(entry.path().display().to_string()); +// } +// } +// Err(e) => println!("Error reading directory: {}", e), +// } +// } +// +// file_paths +// } + +// pub fn generate_mock_compile_request() -> CompilationRequest { +// CompilationRequest { +// config: CompilationConfig { +// version: "1.4.1".to_string(), +// user_libraries: vec![], +// }, +// contracts: vec![CompiledFile { +// file_name: "SimpleStorage.sol".to_string(), +// file_content: generate_mock_solidity_file_content(), +// is_contract: false, +// }], +// target_path: None, +// } +// } pub fn generate_mock_solidity_file_content() -> String { r#" @@ -228,21 +189,21 @@ pub fn generate_mock_solidity_file_content() -> String { .to_string() } -pub async fn initialize_files(files: Vec, file_path: &Path) -> Result<()> { - for file in files { - let file_path_str = format!("{}/{}", file_path.to_str().unwrap(), file.file_name); - let file_path = Path::new(&file_path_str); - - // create parent directories - tokio::fs::create_dir_all(file_path.parent().unwrap()) - .await - .map_err(ApiError::FailedToWriteFile)?; - - // write file - tokio::fs::write(file_path, file.file_content.clone()) - .await - .map_err(ApiError::FailedToWriteFile)?; - } - - Ok(()) -} +// pub async fn initialize_files(files: Vec, file_path: &Path) -> Result<()> { +// for file in files { +// let file_path_str = format!("{}/{}", file_path.to_str().unwrap(), file.file_name); +// let file_path = Path::new(&file_path_str); +// +// // create parent directories +// tokio::fs::create_dir_all(file_path.parent().unwrap()) +// .await +// .map_err(ApiError::FailedToWriteFile)?; +// +// // write file +// tokio::fs::write(file_path, file.file_content.clone()) +// .await +// .map_err(ApiError::FailedToWriteFile)?; +// } +// +// Ok(()) +// } diff --git a/worker/src/worker.rs b/worker/src/worker.rs index 78de6c12..fb40e88d 100644 --- a/worker/src/worker.rs +++ b/worker/src/worker.rs @@ -11,8 +11,6 @@ use tokio::time::sleep; use tracing::{error, info, warn}; use uuid::Uuid; -use crate::handlers; -use crate::handlers::types::{ApiCommand, ApiCommandResult}; use crate::sqs_client::SqsClient; use crate::sqs_listener::{SqsListener, SqsReceiver}; use crate::types::SqsMessage; @@ -21,7 +19,7 @@ use crate::utils::lib::{timestamp, DURATION_TO_PURGE}; pub type Timestamp = u64; pub struct RunningWorker { sqs_listener: SqsListener, - expiration_timestamps: Arc>, + expiration_timestamps: Arc>>, num_workers: usize, worker_threads: Vec>, } @@ -69,11 +67,12 @@ impl RunningWorker { continue; }; - let sqs_message = match serde_json::from_str::(body) { + let sqs_message = match serde_json::from_str::(&body) { Ok(sqs_message) => sqs_message, Err(err) => { error!("Could not deserialize message: {}", err.to_string()); let _ = sqs_receiver.delete_message(receipt_handle).await; + continue; } }; @@ -81,6 +80,8 @@ impl RunningWorker { SqsMessage::Compile { id } => {} SqsMessage::Verify { id } => {} } + + let _ = sqs_receiver.delete_message(receipt_handle).await; } } } @@ -117,11 +118,12 @@ impl WorkerEngine { self.expiration_timestamps.clone(), )); + // TODO: not protection really if self.is_supervisor_enabled.load(Ordering::Acquire) && self.supervisor_thread.is_none() { + let db_client = self.db_client.clone(); let expiration_timestamps = self.expiration_timestamps.clone(); - self.supervisor_thread = Arc::new(Some(tokio::spawn(async move { - WorkerEngine::supervisor(self.db_client.clone(), expiration_timestamps).await; + WorkerEngine::supervisor(db_client, expiration_timestamps).await; }))); } } From 8543cbed2d02a233d601b57d943ec1d88c899e99 Mon Sep 17 00:00:00 2001 From: taco-paco Date: Fri, 23 Aug 2024 11:55:44 +0700 Subject: [PATCH 04/21] feat: introduce worksapce and types crate --- .gitignore | 2 +- worker/Cargo.toml => Cargo.toml | 23 ++++++------- crates/lambdas/Cargo.toml | 32 +++++++++++++++++++ {lambdas => crates/lambdas}/README.md | 0 .../lambdas}/src/common/errors.rs | 0 {lambdas => crates/lambdas}/src/common/mod.rs | 0 .../lambdas}/src/common/utils.rs | 0 {lambdas => crates/lambdas}/src/compile.rs | 0 .../lambdas}/src/generate_presigned_urls.rs | 0 crates/types/Cargo.toml | 6 ++++ crates/types/src/lib.rs | 14 ++++++++ crates/worker/Cargo.toml | 22 +++++++++++++ .../worker}/src/commands/compile.rs | 0 {worker => crates/worker}/src/commands/mod.rs | 0 .../worker}/src/commands/verify.rs | 0 .../worker}/src/dynamodb_client.rs | 0 {worker => crates/worker}/src/errors.rs | 0 {worker => crates/worker}/src/main.rs | 0 {worker => crates/worker}/src/sqs_client.rs | 0 {worker => crates/worker}/src/sqs_listener.rs | 0 {worker => crates/worker}/src/types.rs | 0 .../worker}/src/utils/cleaner.rs | 0 .../worker}/src/utils/hardhat_config.rs | 0 {worker => crates/worker}/src/utils/lib.rs | 2 +- {worker => crates/worker}/src/utils/mod.rs | 0 {worker => crates/worker}/src/worker.rs | 0 lambdas/Cargo.toml | 30 ----------------- 27 files changed, 86 insertions(+), 45 deletions(-) rename worker/Cargo.toml => Cargo.toml (52%) create mode 100644 crates/lambdas/Cargo.toml rename {lambdas => crates/lambdas}/README.md (100%) rename {lambdas => crates/lambdas}/src/common/errors.rs (100%) rename {lambdas => crates/lambdas}/src/common/mod.rs (100%) rename {lambdas => crates/lambdas}/src/common/utils.rs (100%) rename {lambdas => crates/lambdas}/src/compile.rs (100%) rename {lambdas => crates/lambdas}/src/generate_presigned_urls.rs (100%) create mode 100644 crates/types/Cargo.toml create mode 100644 crates/types/src/lib.rs create mode 100644 crates/worker/Cargo.toml rename {worker => crates/worker}/src/commands/compile.rs (100%) rename {worker => crates/worker}/src/commands/mod.rs (100%) rename {worker => crates/worker}/src/commands/verify.rs (100%) rename {worker => crates/worker}/src/dynamodb_client.rs (100%) rename {worker => crates/worker}/src/errors.rs (100%) rename {worker => crates/worker}/src/main.rs (100%) rename {worker => crates/worker}/src/sqs_client.rs (100%) rename {worker => crates/worker}/src/sqs_listener.rs (100%) rename {worker => crates/worker}/src/types.rs (100%) rename {worker => crates/worker}/src/utils/cleaner.rs (100%) rename {worker => crates/worker}/src/utils/hardhat_config.rs (100%) rename {worker => crates/worker}/src/utils/lib.rs (99%) rename {worker => crates/worker}/src/utils/mod.rs (100%) rename {worker => crates/worker}/src/worker.rs (100%) delete mode 100644 lambdas/Cargo.toml diff --git a/.gitignore b/.gitignore index 1806dc5c..f8b720fe 100644 --- a/.gitignore +++ b/.gitignore @@ -24,4 +24,4 @@ api/hardhat_env/workspaces .DS_Store # credentials -/worker/credentials \ No newline at end of file +/crates/worker/credentials \ No newline at end of file diff --git a/worker/Cargo.toml b/Cargo.toml similarity index 52% rename from worker/Cargo.toml rename to Cargo.toml index ec22e972..d940460e 100644 --- a/worker/Cargo.toml +++ b/Cargo.toml @@ -1,22 +1,19 @@ -[package] -name = "worker" -version = "0.0.1" -edition = "2021" +[workspace] +members = [ + "crates/types", + "crates/lambdas", + "crates/worker", +] -[dependencies] -tokio = {version = "1.39.3", features = ["macros", "rt-multi-thread", "sync"]} +[workspace.dependencies] aws-config = "1.5.5" aws-sdk-s3 = "1.43.0" aws-sdk-sqs = "1.39.0" aws-sdk-dynamodb = "1.42.0" -aws-runtime = "1.4.0" -async-channel = "2.3.1" -chrono = "0.4.38" -crossbeam-queue = "0.3.11" -serde = { version = "1.0.152", features = ["derive"] } +tokio = {version = "1.39.3", features = ["macros"]} +serde = "1.0.207" serde_json = "1.0.124" -uuid = { version = "1.10.0", features = ["serde", "v4"] } thiserror = "1.0.63" tracing = { version = "0.1.40", features = ["log"] } tracing-subscriber = { version = "0.3.18", default-features = false, features = ["fmt", "ansi"] } -log = "0.4.22" +uuid = { version = "1.10.0", features = ["serde", "v4"] } diff --git a/crates/lambdas/Cargo.toml b/crates/lambdas/Cargo.toml new file mode 100644 index 00000000..7d901b38 --- /dev/null +++ b/crates/lambdas/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "zksync-lambdas" +version = "0.0.1" +edition = "2021" +authors = ["edwinswatpako@gmail.com"] + +[dependencies] +aws-config = {workspace = true} +aws-sdk-s3 = {workspace = true} +aws-sdk-sqs = {workspace = true} +aws-sdk-dynamodb = {workspace = true} +tokio = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +thiserror = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +uuid = {workspace = true} + +lambda_runtime = "0.13.0" +lambda_http = "0.13.0" + + +[[bin]] +name = "generate-presigned-urls" +version = "0.0.1" +path = "src/generate_presigned_urls.rs" + +[[bin]] +name = "compile" +version = "0.0.1" +path = "src/compile.rs" diff --git a/lambdas/README.md b/crates/lambdas/README.md similarity index 100% rename from lambdas/README.md rename to crates/lambdas/README.md diff --git a/lambdas/src/common/errors.rs b/crates/lambdas/src/common/errors.rs similarity index 100% rename from lambdas/src/common/errors.rs rename to crates/lambdas/src/common/errors.rs diff --git a/lambdas/src/common/mod.rs b/crates/lambdas/src/common/mod.rs similarity index 100% rename from lambdas/src/common/mod.rs rename to crates/lambdas/src/common/mod.rs diff --git a/lambdas/src/common/utils.rs b/crates/lambdas/src/common/utils.rs similarity index 100% rename from lambdas/src/common/utils.rs rename to crates/lambdas/src/common/utils.rs diff --git a/lambdas/src/compile.rs b/crates/lambdas/src/compile.rs similarity index 100% rename from lambdas/src/compile.rs rename to crates/lambdas/src/compile.rs diff --git a/lambdas/src/generate_presigned_urls.rs b/crates/lambdas/src/generate_presigned_urls.rs similarity index 100% rename from lambdas/src/generate_presigned_urls.rs rename to crates/lambdas/src/generate_presigned_urls.rs diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml new file mode 100644 index 00000000..1dc96814 --- /dev/null +++ b/crates/types/Cargo.toml @@ -0,0 +1,6 @@ +[package] +name = "types" +version = "0.1.0" +edition = "2021" + +[dependencies] diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs new file mode 100644 index 00000000..7d12d9af --- /dev/null +++ b/crates/types/src/lib.rs @@ -0,0 +1,14 @@ +pub fn add(left: usize, right: usize) -> usize { + left + right +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_works() { + let result = add(2, 2); + assert_eq!(result, 4); + } +} diff --git a/crates/worker/Cargo.toml b/crates/worker/Cargo.toml new file mode 100644 index 00000000..987ad95d --- /dev/null +++ b/crates/worker/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "worker" +version = "0.0.1" +edition = "2021" + +[dependencies] +aws-config = { workspace = true } +aws-sdk-s3 = {workspace = true } +aws-sdk-sqs = {workspace = true} +aws-sdk-dynamodb = {workspace = true} +tokio = {workspace = true, features = ["rt-multi-thread", "sync"]} +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +thiserror = { workspace = true } +tracing = { workspace = true } +tracing-subscriber = { workspace = true } +uuid = {workspace = true} + +aws-runtime = "1.4.0" +async-channel = "2.3.1" +chrono = "0.4.38" +crossbeam-queue = "0.3.11" diff --git a/worker/src/commands/compile.rs b/crates/worker/src/commands/compile.rs similarity index 100% rename from worker/src/commands/compile.rs rename to crates/worker/src/commands/compile.rs diff --git a/worker/src/commands/mod.rs b/crates/worker/src/commands/mod.rs similarity index 100% rename from worker/src/commands/mod.rs rename to crates/worker/src/commands/mod.rs diff --git a/worker/src/commands/verify.rs b/crates/worker/src/commands/verify.rs similarity index 100% rename from worker/src/commands/verify.rs rename to crates/worker/src/commands/verify.rs diff --git a/worker/src/dynamodb_client.rs b/crates/worker/src/dynamodb_client.rs similarity index 100% rename from worker/src/dynamodb_client.rs rename to crates/worker/src/dynamodb_client.rs diff --git a/worker/src/errors.rs b/crates/worker/src/errors.rs similarity index 100% rename from worker/src/errors.rs rename to crates/worker/src/errors.rs diff --git a/worker/src/main.rs b/crates/worker/src/main.rs similarity index 100% rename from worker/src/main.rs rename to crates/worker/src/main.rs diff --git a/worker/src/sqs_client.rs b/crates/worker/src/sqs_client.rs similarity index 100% rename from worker/src/sqs_client.rs rename to crates/worker/src/sqs_client.rs diff --git a/worker/src/sqs_listener.rs b/crates/worker/src/sqs_listener.rs similarity index 100% rename from worker/src/sqs_listener.rs rename to crates/worker/src/sqs_listener.rs diff --git a/worker/src/types.rs b/crates/worker/src/types.rs similarity index 100% rename from worker/src/types.rs rename to crates/worker/src/types.rs diff --git a/worker/src/utils/cleaner.rs b/crates/worker/src/utils/cleaner.rs similarity index 100% rename from worker/src/utils/cleaner.rs rename to crates/worker/src/utils/cleaner.rs diff --git a/worker/src/utils/hardhat_config.rs b/crates/worker/src/utils/hardhat_config.rs similarity index 100% rename from worker/src/utils/hardhat_config.rs rename to crates/worker/src/utils/hardhat_config.rs diff --git a/worker/src/utils/lib.rs b/crates/worker/src/utils/lib.rs similarity index 99% rename from worker/src/utils/lib.rs rename to crates/worker/src/utils/lib.rs index f4f0e018..74db6b2a 100644 --- a/worker/src/utils/lib.rs +++ b/crates/worker/src/utils/lib.rs @@ -1,6 +1,6 @@ use std::path::{Path, PathBuf}; +use tracing::debug; use uuid::Uuid; -use log::debug; pub const SOL_ROOT: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/", "hardhat_env/workspaces/"); pub const ZK_CACHE_ROOT: &str = concat!( diff --git a/worker/src/utils/mod.rs b/crates/worker/src/utils/mod.rs similarity index 100% rename from worker/src/utils/mod.rs rename to crates/worker/src/utils/mod.rs diff --git a/worker/src/worker.rs b/crates/worker/src/worker.rs similarity index 100% rename from worker/src/worker.rs rename to crates/worker/src/worker.rs diff --git a/lambdas/Cargo.toml b/lambdas/Cargo.toml deleted file mode 100644 index 8a420efe..00000000 --- a/lambdas/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "zksync-lambdas" -version = "0.0.1" -edition = "2021" -authors = ["edwinswatpako@gmail.com"] - -[dependencies] -aws-config = "1.5.5" -aws-sdk-s3 = "1.43.0" -aws-sdk-dynamodb = "1.40.0" -aws-sdk-sqs = "1.37.0" -lambda_runtime = "0.13.0" -lambda_http = "0.13.0" -tokio = { version = "1.39.2", features = ["macros"] } -serde = "1.0.207" -serde_json = "1.0.124" -thiserror = "1.0.63" -tracing = { version = "0.1.40", features = ["log"] } -tracing-subscriber = { version = "0.3.18", default-features = false, features = ["fmt", "ansi"] } -uuid = { version = "1.10.0", features = ["serde", "v4"] } - -[[bin]] -name = "generate-presigned-urls" -version = "0.0.1" -path = "src/generate_presigned_urls.rs" - -[[bin]] -name = "compile" -version = "0.0.1" -path = "src/compile.rs" From 0e63e16207887f8a39649993225f9ed94ccca55c Mon Sep 17 00:00:00 2001 From: taco-paco Date: Fri, 23 Aug 2024 14:01:09 +0700 Subject: [PATCH 05/21] feat: introduce errors to types crate --- Cargo.toml | 2 + api/src/handlers/types.rs | 1 - crates/lambdas/Cargo.toml | 2 + crates/lambdas/src/common/mod.rs | 57 ------- crates/lambdas/src/compile.rs | 30 ++-- crates/lambdas/src/generate_presigned_urls.rs | 2 +- crates/types/Cargo.toml | 6 +- crates/types/src/lib.rs | 155 +++++++++++++++++- crates/worker/Cargo.toml | 3 + crates/worker/src/dynamodb_client.rs | 2 +- crates/worker/src/main.rs | 2 + crates/worker/src/types.rs | 110 ------------- crates/worker/src/worker.rs | 2 +- 13 files changed, 179 insertions(+), 195 deletions(-) delete mode 100644 crates/worker/src/types.rs diff --git a/Cargo.toml b/Cargo.toml index d940460e..6ff3d014 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,3 +17,5 @@ thiserror = "1.0.63" tracing = { version = "0.1.40", features = ["log"] } tracing-subscriber = { version = "0.3.18", default-features = false, features = ["fmt", "ansi"] } uuid = { version = "1.10.0", features = ["serde", "v4"] } + +types = {version = "0.0.1", path = "crates/types"} \ No newline at end of file diff --git a/api/src/handlers/types.rs b/api/src/handlers/types.rs index ab655953..51dddd17 100644 --- a/api/src/handlers/types.rs +++ b/api/src/handlers/types.rs @@ -40,7 +40,6 @@ pub struct CompilationConfig { pub struct CompilationRequest { pub config: CompilationConfig, pub contracts: Vec, - pub target_path: Option, } #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] diff --git a/crates/lambdas/Cargo.toml b/crates/lambdas/Cargo.toml index 7d901b38..57db80d9 100644 --- a/crates/lambdas/Cargo.toml +++ b/crates/lambdas/Cargo.toml @@ -20,6 +20,8 @@ uuid = {workspace = true} lambda_runtime = "0.13.0" lambda_http = "0.13.0" +# Inner crates +types = {workspace = true} [[bin]] name = "generate-presigned-urls" diff --git a/crates/lambdas/src/common/mod.rs b/crates/lambdas/src/common/mod.rs index ae5f9ac3..6599b8ee 100644 --- a/crates/lambdas/src/common/mod.rs +++ b/crates/lambdas/src/common/mod.rs @@ -1,61 +1,4 @@ pub mod errors; pub mod utils; -use aws_sdk_dynamodb::types::AttributeValue; -use serde::Serialize; -use std::collections::HashMap; - pub const BUCKET_NAME_DEFAULT: &str = "zksync-compilation-s3"; - -#[derive(Debug, Clone, Serialize)] -pub enum Status { - // TODO: add FilesUploaded(?) - Pending, - Compiling, - Ready(String), - Failed(String), -} - -impl From<&Status> for u32 { - fn from(value: &Status) -> Self { - match value { - Status::Pending => 0, - Status::Compiling => 1, - Status::Ready(_) => 2, - Status::Failed(_) => 3, - } - } -} - -impl From for HashMap { - fn from(value: Status) -> Self { - match value.clone() { - Status::Pending | Status::Compiling => HashMap::from([( - "Status".into(), - AttributeValue::N(u32::from(&value).to_string()), - )]), - Status::Ready(val) | Status::Failed(val) => HashMap::from([ - ( - "Status".into(), - AttributeValue::N(u32::from(&value).to_string()), - ), - ("Data".into(), AttributeValue::S(val)), - ]), - } - } -} - -pub struct Item { - // TODO: uuid? - pub id: String, - pub status: Status, -} - -impl From for HashMap { - fn from(value: Item) -> Self { - let mut item_map = HashMap::from([("ID".into(), AttributeValue::S(value.id))]); - item_map.extend(HashMap::from(value.status)); - - item_map - } -} diff --git a/crates/lambdas/src/compile.rs b/crates/lambdas/src/compile.rs index 0df7bcfe..ed1e5477 100644 --- a/crates/lambdas/src/compile.rs +++ b/crates/lambdas/src/compile.rs @@ -3,12 +3,12 @@ use aws_sdk_dynamodb::{error::SdkError, operation::put_item::PutItemError}; use lambda_http::{ run, service_fn, Error as LambdaError, Request as LambdaRequest, Response as LambdaResponse, }; -use serde::Deserialize; use std::ops::Add; use tracing::{error, info}; +use types::{CompilationRequest, Item, SqsMessage, Status}; mod common; -use crate::common::{errors::Error, utils::extract_request, Item, Status, BUCKET_NAME_DEFAULT}; +use crate::common::{errors::Error, utils::extract_request, BUCKET_NAME_DEFAULT}; // TODO: remove on release const QUEUE_URL_DEFAULT: &str = "https://sqs.ap-southeast-2.amazonaws.com/266735844848/zksync-sqs"; @@ -16,12 +16,6 @@ const TABLE_NAME_DEFAULT: &str = "zksync-table"; const NO_OBJECTS_TO_COMPILE_ERROR: &str = "There are no objects to compile"; const RECOMPILATION_ATTEMPT_ERROR: &str = "Recompilation attemp"; - -#[derive(Debug, Deserialize)] -struct Request { - pub id: String, -} - // impl Deserialize for Response { // fn deserialize<'de, D>(deserializer: D) -> Result where D: Deserializer<'de> { // todo!() @@ -35,14 +29,14 @@ struct Request { // } async fn compile( - id: String, + request: CompilationRequest, dynamo_client: &aws_sdk_dynamodb::Client, table_name: &str, sqs_client: &aws_sdk_sqs::Client, queue_url: &str, ) -> Result<(), Error> { let item = Item { - id: id.clone(), + id: request.id.clone(), status: Status::Pending, }; @@ -58,7 +52,7 @@ async fn compile( Ok(val) => val, Err(SdkError::ServiceError(val)) => match val.err() { PutItemError::ConditionalCheckFailedException(_) => { - error!("Recompilation attempt, id: {}", id); + error!("Recompilation attempt, id: {}", request.id); let response = lambda_http::Response::builder() .status(400) .header("content-type", "text/html") @@ -72,10 +66,18 @@ async fn compile( Err(err) => return Err(Box::new(err).into()), }; + let message = SqsMessage::Compile { request }; + let message = match serde_json::to_string(&message) { + Ok(val) => val, + Err(err) => { + error!("Serialization failed, id: {:?}", message); + return Err(Box::new(err).into()); + } + }; let message_output = sqs_client .send_message() .queue_url(queue_url) - .message_body(id) + .message_body(message) .send() .await .map_err(Box::new)?; @@ -105,7 +107,7 @@ async fn process_request( s3_client: &aws_sdk_s3::Client, bucket_name: &str, ) -> Result, Error> { - let request = extract_request::(request)?; + let request = extract_request::(request)?; let objects = s3_client .list_objects_v2() @@ -128,7 +130,7 @@ async fn process_request( } info!("Compile"); - compile(request.id, dynamo_client, table_name, sqs_client, queue_url).await?; + compile(request, dynamo_client, table_name, sqs_client, queue_url).await?; let response = LambdaResponse::builder() .status(200) diff --git a/crates/lambdas/src/generate_presigned_urls.rs b/crates/lambdas/src/generate_presigned_urls.rs index 2683e4a2..0041809a 100644 --- a/crates/lambdas/src/generate_presigned_urls.rs +++ b/crates/lambdas/src/generate_presigned_urls.rs @@ -110,5 +110,5 @@ async fn main() -> Result<(), LambdaError> { Err(Error::LambdaError(err)) => Err(err), } })) - .await + .await } diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index 1dc96814..d68de448 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -1,6 +1,10 @@ [package] name = "types" -version = "0.1.0" +version = "0.0.1" edition = "2021" [dependencies] +aws-sdk-dynamodb = {workspace = true} +serde = {workspace = true} +thiserror = {worksapce = true} +uuid = {workspace = true} diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 7d12d9af..b6e324c5 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -1,14 +1,151 @@ -pub fn add(left: usize, right: usize) -> usize { - left + right +use aws_sdk_dynamodb::types::AttributeValue; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct CompilationConfig { + pub version: String, + #[serde(default)] + pub user_libraries: Vec, + // TODO: reflect change in UI-code + pub target_path: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct CompilationRequest { + pub id: String, + pub config: CompilationConfig, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct VerifyConfig { + pub zksolc_version: String, + pub solc_version: Option, + pub network: String, + pub contract_address: String, + pub inputs: Vec, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub struct VerificationRequest { + pub id: String, + pub config: VerifyConfig, +} + +#[derive(Serialize, Deserialize, Debug)] +#[serde(tag = "type")] +pub enum SqsMessage { + Compile { + #[serde(flatten)] + request: CompilationRequest, + }, + Verify { + #[serde(flatten)] + request: VerificationRequest, + }, +} + +#[derive(Debug, Clone, Serialize)] +pub enum Status { + // TODO: add FilesUploaded(?) + Pending, + Compiling, + Ready(String), + Failed(String), +} + +impl From<&Status> for u32 { + fn from(value: &Status) -> Self { + match value { + Status::Pending => 0, + Status::Compiling => 1, + Status::Ready(_) => 2, + Status::Failed(_) => 3, + } + } +} + +impl From for HashMap { + fn from(value: Status) -> Self { + match value.clone() { + Status::Pending | Status::Compiling => HashMap::from([( + "Status".into(), + AttributeValue::N(u32::from(&value).to_string()), + )]), + Status::Ready(val) | Status::Failed(val) => HashMap::from([ + ( + "Status".into(), + AttributeValue::N(u32::from(&value).to_string()), + ), + ("Data".into(), AttributeValue::S(val)), + ]), + } + } +} + +#[derive(thiserror::Error, Debug)] +pub enum ItemError { + #[error("Invalid Item format")] + FormatError, + #[error(transparent)] + ParseError(#[from] std::num::ParseIntError), +} + +pub struct Item { + // TODO: uuid? + pub id: String, + pub status: Status, +} + +impl From for HashMap { + fn from(value: Item) -> Self { + let mut item_map = HashMap::from([("ID".into(), AttributeValue::S(value.id))]); + item_map.extend(HashMap::from(value.status)); + + item_map + } +} + +impl TryFrom<&HashMap> for Status { + type Error = ItemError; + fn try_from(value: &HashMap) -> Result { + let status = value.get("Status").ok_or(ItemError::FormatError)?; + let status: u32 = status + .as_n() + .map_err(|_| ItemError::FormatError)? + .parse::()?; + let status = match status { + 0 => Status::Pending, + 1 => Status::Compiling, + 2 => { + let data = value.get("Data").ok_or(ItemError::FormatError)?; + let data = data.as_s().map_err(|_| ItemError::FormatError)?; + + Status::Ready(data.clone()) + } + 3 => { + let data = value.get("Data").ok_or(ItemError::FormatError)?; + let data = data.as_s().map_err(|_| ItemError::FormatError)?; + + Status::Failed(data.clone()) + } + _ => return Err(ItemError::FormatError), + }; + + Ok(status) + } } -#[cfg(test)] -mod tests { - use super::*; +impl TryFrom> for Item { + type Error = ItemError; + fn try_from(value: HashMap) -> Result { + let id = value.get("ID").ok_or(ItemError::FormatError)?; + let id = id.as_s().map_err(|_| ItemError::FormatError)?; + let status = (&value).try_into()?; - #[test] - fn it_works() { - let result = add(2, 2); - assert_eq!(result, 4); + Ok(Item { + id: id.clone(), + status, + }) } } diff --git a/crates/worker/Cargo.toml b/crates/worker/Cargo.toml index 987ad95d..be726bd1 100644 --- a/crates/worker/Cargo.toml +++ b/crates/worker/Cargo.toml @@ -20,3 +20,6 @@ aws-runtime = "1.4.0" async-channel = "2.3.1" chrono = "0.4.38" crossbeam-queue = "0.3.11" + +# Inner crates +types = {workspace = true} \ No newline at end of file diff --git a/crates/worker/src/dynamodb_client.rs b/crates/worker/src/dynamodb_client.rs index d165c980..8caecd8f 100644 --- a/crates/worker/src/dynamodb_client.rs +++ b/crates/worker/src/dynamodb_client.rs @@ -1,4 +1,4 @@ -use crate::types::Item; +use types::Item; use aws_sdk_dynamodb::types::AttributeValue; use aws_sdk_dynamodb::Client; diff --git a/crates/worker/src/main.rs b/crates/worker/src/main.rs index fdb32ead..a7404e1b 100644 --- a/crates/worker/src/main.rs +++ b/crates/worker/src/main.rs @@ -19,6 +19,8 @@ const AWS_PROFILE_DEFAULT: &str = "dev"; pub(crate) const QUEUE_URL_DEFAULT: &str = "https://sqs.ap-southeast-2.amazonaws.com/266735844848/zksync-sqs"; +// TODO: state synchronization + #[tokio::main] async fn main() { let profile_name = std::env::var("AWS_PROFILE").unwrap_or(AWS_PROFILE_DEFAULT.into()); diff --git a/crates/worker/src/types.rs b/crates/worker/src/types.rs deleted file mode 100644 index efbbeb51..00000000 --- a/crates/worker/src/types.rs +++ /dev/null @@ -1,110 +0,0 @@ -// TODO: move to separate crate - -use aws_sdk_dynamodb::types::AttributeValue; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::os::macos::raw::stat; -use uuid::Uuid; - -#[derive(Serialize, Deserialize)] -#[serde(tag = "type")] -pub enum SqsMessage { - Compile { id: Uuid }, - Verify { id: Uuid }, -} - -#[derive(Debug, Clone, Serialize)] -pub enum Status { - // TODO: add FilesUploaded(?) - Pending, - Compiling, - Ready(String), - Failed(String), -} - -impl From<&Status> for u32 { - fn from(value: &Status) -> Self { - match value { - Status::Pending => 0, - Status::Compiling => 1, - Status::Ready(_) => 2, - Status::Failed(_) => 3, - } - } -} - -impl From for HashMap { - fn from(value: Status) -> Self { - match value.clone() { - Status::Pending | Status::Compiling => HashMap::from([( - "Status".into(), - AttributeValue::N(u32::from(&value).to_string()), - )]), - Status::Ready(val) | Status::Failed(val) => HashMap::from([ - ( - "Status".into(), - AttributeValue::N(u32::from(&value).to_string()), - ), - ("Data".into(), AttributeValue::S(val)), - ]), - } - } -} - -pub struct Item { - // TODO: uuid? - pub id: String, - pub status: Status, -} - -impl From for HashMap { - fn from(value: Item) -> Self { - let mut item_map = HashMap::from([("ID".into(), AttributeValue::S(value.id))]); - item_map.extend(HashMap::from(value.status)); - - item_map - } -} - -impl TryFrom<&HashMap> for Status { - // TODO: error - type Error = (); - fn try_from(value: &HashMap) -> Result { - let status = value.get("Status").ok_or(())?; - let status: u32 = status.as_n().map_err(|_| ())?.parse::().map_err(|_| ())?; - let status = match status { - 0 => Status::Pending, - 1 => Status::Compiling, - 2 => { - let data = value.get("Data").ok_or(())?; - let data = data.as_s().map_err(|_|())?; - - Status::Ready(data.clone()) - } - 3 => { - let data = value.get("Data").ok_or(())?; - let data = data.as_s().map_err(|_|())?; - - Status::Failed(data.clone()) - } - _ => return Err(()), - }; - - Ok(status) - } -} - -impl TryFrom> for Item { - // TODO: error - type Error = (); - fn try_from(value: HashMap) -> Result { - let id = value.get("ID").ok_or(())?; - let id = id.as_s().map_err(|_| ())?; - let status = (&value).try_into()?; - - Ok(Item { - id: id.clone(), - status, - }) - } -} diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index fb40e88d..519f4406 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -10,10 +10,10 @@ use tokio::task::JoinHandle; use tokio::time::sleep; use tracing::{error, info, warn}; use uuid::Uuid; +use types::SqsMessage; use crate::sqs_client::SqsClient; use crate::sqs_listener::{SqsListener, SqsReceiver}; -use crate::types::SqsMessage; use crate::utils::lib::{timestamp, DURATION_TO_PURGE}; pub type Timestamp = u64; From cc3e1c10056666d2e8dedbc1f5ed22ad8a1fa48e Mon Sep 17 00:00:00 2001 From: taco-paco Date: Sat, 24 Aug 2024 19:19:20 +0700 Subject: [PATCH 06/21] fix: error types + compilation --- crates/types/src/lib.rs | 13 ++++ crates/worker/Cargo.toml | 1 + crates/worker/src/commands/compile.rs | 91 +++++++++++++++++---------- crates/worker/src/commands/mod.rs | 4 +- crates/worker/src/dynamodb_client.rs | 27 +++++--- crates/worker/src/errors.rs | 60 +++++++++++++++++- crates/worker/src/main.rs | 1 - crates/worker/src/sqs_client.rs | 29 +++++---- crates/worker/src/sqs_listener.rs | 8 +-- crates/worker/src/worker.rs | 39 ++++++++++-- 10 files changed, 204 insertions(+), 69 deletions(-) diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index b6e324c5..2fbd720e 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -1,6 +1,8 @@ use aws_sdk_dynamodb::types::AttributeValue; use serde::{Deserialize, Serialize}; use std::collections::HashMap; +use std::fmt; +use std::fmt::Formatter; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct CompilationConfig { @@ -54,6 +56,17 @@ pub enum Status { Failed(String), } +impl fmt::Display for Status { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Status::Pending => write!(f, "Pending"), + Status::Compiling => write!(f, "Compiling"), + Status::Ready(msg) => write!(f, "Ready: {}", msg), + Status::Failed(msg) => write!(f, "Failed: {}", msg), + } + } +} + impl From<&Status> for u32 { fn from(value: &Status) -> Self { match value { diff --git a/crates/worker/Cargo.toml b/crates/worker/Cargo.toml index be726bd1..f3758310 100644 --- a/crates/worker/Cargo.toml +++ b/crates/worker/Cargo.toml @@ -17,6 +17,7 @@ tracing-subscriber = { workspace = true } uuid = {workspace = true} aws-runtime = "1.4.0" +aws-smithy-types = "1.2.2" async-channel = "2.3.1" chrono = "0.4.38" crossbeam-queue = "0.3.11" diff --git a/crates/worker/src/commands/compile.rs b/crates/worker/src/commands/compile.rs index 1c96830f..2854d2c3 100644 --- a/crates/worker/src/commands/compile.rs +++ b/crates/worker/src/commands/compile.rs @@ -1,28 +1,27 @@ use aws_sdk_s3::types::Object; -use std::path::Path; +use std::ops::Add; use std::process::Stdio; -use tracing::{instrument, warn}; +use tracing::log::Level::Error; +use tracing::warn; use tracing::{error, info}; +use types::{CompilationRequest, Item, Status}; use uuid::Uuid; -use std::io::BufWriter; -use std::io::{Write, IoSlice}; -use std::ops::Add; -use tokio::io::AsyncWriteExt; use crate::dynamodb_client::DynamoDBClient; +use crate::errors::CompilationError::NoDBItemError; +use crate::errors::{CompilationError, DBError, S3Error}; use crate::utils::cleaner::AutoCleanUp; use crate::utils::hardhat_config::HardhatConfigBuilder; use crate::utils::lib::{ - generate_folder_name, status_code_to_message, - DEFAULT_SOLIDITY_VERSION, SOL_ROOT, ZKSOLC_VERSIONS, + generate_folder_name, status_code_to_message, DEFAULT_SOLIDITY_VERSION, SOL_ROOT, + ZKSOLC_VERSIONS, }; async fn list_all_keys( client: &aws_sdk_s3::Client, id: String, bucket: &str, -) -> Result, ()> { - // TODO: errors +) -> Result, S3Error> { let mut objects = Vec::new(); let mut continuation_token: Option = None; @@ -37,7 +36,7 @@ async fn list_all_keys( request = request.continuation_token(token); } - let response = request.send().await.map_err(|_| ())?; + let response = request.send().await?; if let Some(contents) = response.contents { objects.extend(contents); } @@ -63,41 +62,67 @@ async fn list_all_keys( Ok(objects) } -pub async fn compile( - id: Uuid, - db_client: DynamoDBClient, - s3_client: aws_sdk_s3::Client, -) -> Result<(), ()> { - // TODO: errors - let item = db_client.get_item(id.to_string()).await.unwrap(); - let item = match item { - Some(item) => item, - None => { - error!("No item id: {}", id); - return Err(()); - } - }; - +async fn extract_files( + id: String, + s3_client: &aws_sdk_s3::Client, +) -> Result>, S3Error> { let objects = list_all_keys(&s3_client, id.to_string(), "TODO").await?; + + let mut files = vec![]; for object in objects { - let key = object.key().ok_or(())?; + let key = object.key().ok_or(S3Error::InvalidObjectError)?; + let expected_size = object.size.ok_or(S3Error::InvalidObjectError)?; + let mut object = s3_client .get_object() - .bucket("TODO") + .bucket("TODO:") .key(key) .send() - .await - .map_err(|_| ())?; + .await?; - let mut byte_count = 0_usize; + let mut byte_count = 0; let mut contents = Vec::new(); - while let Some(bytes) = object.body.try_next().await.map_err(|_| ())? { + while let Some(bytes) = object.body.try_next().await? { let bytes_len = bytes.len(); - std::io::Write::write_all(&mut contents, &bytes).map_err(|_| ()); + std::io::Write::write_all(&mut contents, &bytes)?; byte_count += bytes_len; } + + if byte_count as i64 != expected_size { + error!("Fetched num bytes != expected size of file."); + return Err(S3Error::InvalidObjectError); + } + + files.push(contents); } + Ok(files) +} + +pub async fn compile( + request: CompilationRequest, + db_client: &DynamoDBClient, + s3_client: &aws_sdk_s3::Client, +) -> Result<(), CompilationError> { + let item = db_client.get_item(request.id.clone()).await?; + let item: Item = match item { + Some(item) => item, + None => { + error!("No item id: {}", request.id); + return Err(NoDBItemError(request.id)); + } + }; + + match item.status { + Status::Pending => {} + status => { + warn!("Item already processing: {}", status); + return Err(CompilationError::UnexpectedStatusError(status)); + } + } + + let files = extract_files(request.id, s3_client).await?; + // TODO: Ok(()) } diff --git a/crates/worker/src/commands/mod.rs b/crates/worker/src/commands/mod.rs index cd0dbc1c..48755763 100644 --- a/crates/worker/src/commands/mod.rs +++ b/crates/worker/src/commands/mod.rs @@ -1,2 +1,2 @@ -mod compile; -mod verify; +pub mod compile; +pub mod verify; diff --git a/crates/worker/src/dynamodb_client.rs b/crates/worker/src/dynamodb_client.rs index 8caecd8f..777a1dc0 100644 --- a/crates/worker/src/dynamodb_client.rs +++ b/crates/worker/src/dynamodb_client.rs @@ -1,11 +1,12 @@ -use types::Item; +use crate::errors::{DBDeleteError, DBError, DBGetError}; use aws_sdk_dynamodb::types::AttributeValue; use aws_sdk_dynamodb::Client; +use types::Item; #[derive(Clone)] pub struct DynamoDBClient { - client: Client, - table_name: String, + pub client: Client, + pub table_name: String, } impl DynamoDBClient { @@ -16,23 +17,29 @@ impl DynamoDBClient { } } - pub async fn delete_item(&self, id: String) { - // TODO: + pub async fn delete_item(&self, id: String) -> Result<(), DBError> { + self.client + .delete_item() + .table_name(self.table_name.clone()) + .key("ID", AttributeValue::S(id)) + .send() + .await?; + + Ok(()) } - // TODO: remove unwraps - pub async fn get_item(&self, id: String) -> Result, ()> { + pub async fn get_item(&self, id: String) -> Result, DBError> { let result = self .client .get_item() .table_name(self.table_name.clone()) .key("ID", AttributeValue::S(id)) .send() - .await - .unwrap(); + .await?; if let Some(item) = result.item { - Ok(Some(item.try_into().unwrap())) + // TODO: maybe change status when error? + Ok(Some(item.try_into()?)) } else { Ok(None) } diff --git a/crates/worker/src/errors.rs b/crates/worker/src/errors.rs index 0844cb00..cd810b81 100644 --- a/crates/worker/src/errors.rs +++ b/crates/worker/src/errors.rs @@ -1,7 +1,63 @@ use aws_sdk_dynamodb::config::http::HttpResponse; +use aws_sdk_dynamodb::operation::delete_item::DeleteItemError; +use aws_sdk_dynamodb::operation::get_item::GetItemError; +use aws_sdk_s3::operation::get_object::GetObjectError; +use aws_sdk_s3::operation::list_objects_v2::ListObjectsV2Error; use aws_sdk_sqs::error::SdkError; use aws_sdk_sqs::operation::delete_message::DeleteMessageError; use aws_sdk_sqs::operation::receive_message::ReceiveMessageError; +use types::{ItemError, Status}; -pub type ReceiveError = SdkError; -pub type DeleteError = SdkError; +// SQS related errors +pub(crate) type SqsReceiveError = SdkError; +pub(crate) type SqsDeleteError = SdkError; + +// DynamoDB related errors +pub(crate) type DBDeleteError = SdkError; +pub(crate) type DBGetError = SdkError; + +// S3 related errors +pub(crate) type S3ListObjectsError = SdkError; +pub(crate) type S3GetObjectError = SdkError; + +#[derive(thiserror::Error, Debug)] +pub enum DBError { + #[error(transparent)] + DeleteItemError(#[from] DBDeleteError), + #[error(transparent)] + GetItemError(#[from] DBGetError), + #[error(transparent)] + ItemFormatError(#[from] ItemError), +} + +#[derive(thiserror::Error, Debug)] +pub enum S3Error { + #[error("Invalid object")] + InvalidObjectError, + #[error(transparent)] + GetObjectError(#[from] S3GetObjectError), + #[error(transparent)] + ListObjectsError(#[from] S3ListObjectsError), + #[error(transparent)] + IoError(#[from] std::io::Error), + #[error(transparent)] + ByteStreamError(#[from] aws_smithy_types::byte_stream::error::Error), +} + +#[derive(thiserror::Error, Debug)] +pub enum CompilationError { + #[error(transparent)] + DBError(#[from] DBError), + #[error(transparent)] + S3Error(#[from] S3Error), + #[error("Item isn't id DB: {0}")] + NoDBItemError(String), + #[error("Unexpected status: {0}")] + UnexpectedStatusError(Status), +} + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + DBError(#[from] DBError), +} diff --git a/crates/worker/src/main.rs b/crates/worker/src/main.rs index a7404e1b..5556e6af 100644 --- a/crates/worker/src/main.rs +++ b/crates/worker/src/main.rs @@ -3,7 +3,6 @@ mod dynamodb_client; mod errors; mod sqs_client; mod sqs_listener; -mod types; mod utils; mod worker; diff --git a/crates/worker/src/sqs_client.rs b/crates/worker/src/sqs_client.rs index 3e4f6bdb..83cc7109 100644 --- a/crates/worker/src/sqs_client.rs +++ b/crates/worker/src/sqs_client.rs @@ -1,4 +1,4 @@ -use crate::errors::{DeleteError, ReceiveError}; +use crate::errors::{SqsDeleteError, SqsReceiveError}; use aws_config::retry::ErrorKind; use aws_sdk_sqs::operation::delete_message::DeleteMessageOutput; use aws_sdk_sqs::operation::receive_message::ReceiveMessageOutput; @@ -43,10 +43,10 @@ macro_rules! match_result { enum Action { Default, - Receive(oneshot::Sender>), + Receive(oneshot::Sender>), Delete { receipt_handle: String, - sender: oneshot::Sender>, + sender: oneshot::Sender>, }, } @@ -71,15 +71,20 @@ pub struct SqsClient { impl SqsClient { pub fn new(client: Client, queue_url: impl Into) -> Self { - Self { + let this = Self { client, queue_url: queue_url.into(), pending_actions: Arc::new(Mutex::new(vec![])), state: Arc::new(AtomicU8::new(State::Connected as u8)), - } + }; + + // TODO: improve the lauch + tokio::spawn(SqsClient::worker(this.clone())); + + this } - async fn receive_attempt(&self) -> Result, ReceiveError> { + async fn receive_attempt(&self) -> Result, SqsReceiveError> { let result = self .client .receive_message() @@ -88,13 +93,13 @@ impl SqsClient { .send() .await; - match_result!(ReceiveError, result) + match_result!(SqsReceiveError, result) } async fn delete_attempt( &self, receipt_handle: impl Into, - ) -> Result, DeleteError> { + ) -> Result, SqsDeleteError> { let result = self .client .delete_message() @@ -103,10 +108,12 @@ impl SqsClient { .send() .await; - match_result!(DeleteError, result) + match_result!(SqsDeleteError, result) } + // TODO: start async fn worker(self) { + // TODO: get the tasks through receiver + maintain the inner queue loop { let mut actions = self.pending_actions.lock().await; @@ -166,7 +173,7 @@ impl SqsClient { } } - pub async fn receive_message(&self) -> Result { + pub async fn receive_message(&self) -> Result { match self.state.load(Ordering::Acquire) { 0 => match self.receive_attempt().await { Ok(None) => self @@ -192,7 +199,7 @@ impl SqsClient { pub async fn delete_message( &self, receipt_handle: impl Into, - ) -> Result<(), DeleteError> { + ) -> Result<(), SqsDeleteError> { let receipt_handle = receipt_handle.into(); match self.state.load(Ordering::Acquire) { 0 => match self.delete_attempt(receipt_handle.clone()).await { diff --git a/crates/worker/src/sqs_listener.rs b/crates/worker/src/sqs_listener.rs index b0faca46..fdbc2914 100644 --- a/crates/worker/src/sqs_listener.rs +++ b/crates/worker/src/sqs_listener.rs @@ -1,4 +1,4 @@ -use crate::errors::{DeleteError, ReceiveError}; +use crate::errors::{SqsDeleteError, SqsReceiveError}; use async_channel::{Receiver, Recv, Sender}; use aws_sdk_sqs::config::http::HttpResponse; use aws_sdk_sqs::error::SdkError; @@ -11,7 +11,7 @@ use tokio::time::sleep; use crate::sqs_client::SqsClient; pub struct SqsListener { - handle: JoinHandle>, + handle: JoinHandle>, receiver: Receiver, client: SqsClient, } @@ -59,7 +59,7 @@ impl SqsListener { } } - pub fn handle(self) -> JoinHandle> { + pub fn handle(self) -> JoinHandle> { self.handle } } @@ -77,7 +77,7 @@ impl SqsReceiver { pub async fn delete_message( &self, receipt_handle: impl Into, - ) -> Result<(), DeleteError> { + ) -> Result<(), SqsDeleteError> { self.client.delete_message(receipt_handle).await } } diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index 519f4406..ec211ef5 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -1,3 +1,4 @@ +use crate::commands::compile::compile; use crate::dynamodb_client::DynamoDBClient; use crossbeam_queue::ArrayQueue; use std::fmt::{Display, Formatter}; @@ -9,8 +10,8 @@ use tokio::sync::Mutex; use tokio::task::JoinHandle; use tokio::time::sleep; use tracing::{error, info, warn}; -use uuid::Uuid; use types::SqsMessage; +use uuid::Uuid; use crate::sqs_client::SqsClient; use crate::sqs_listener::{SqsListener, SqsReceiver}; @@ -27,16 +28,27 @@ pub struct RunningWorker { impl RunningWorker { pub fn new( sqs_listener: SqsListener, + db_client: DynamoDBClient, + s3_client: aws_sdk_s3::Client, num_workers: usize, expiration_timestamps: Arc>>, ) -> Self { let mut worker_threads = Vec::with_capacity(num_workers); for _ in 0..num_workers { - // add to collection + // Start worker let sqs_receiver = sqs_listener.receiver(); + let db_client_copy = db_client.clone(); + let s3_client_copy = s3_client.clone(); let expiration_timestamps = expiration_timestamps.clone(); + worker_threads.push(tokio::spawn(async move { - RunningWorker::worker(sqs_receiver, expiration_timestamps).await; + RunningWorker::worker( + sqs_receiver, + db_client_copy, + s3_client_copy, + expiration_timestamps, + ) + .await; })); } @@ -50,6 +62,8 @@ impl RunningWorker { async fn worker( sqs_receiver: SqsReceiver, + db_client: DynamoDBClient, + s3_client: aws_sdk_s3::Client, expiration_timestamps: Arc>>, ) { // TODO: process error @@ -77,8 +91,10 @@ impl RunningWorker { }; match sqs_message { - SqsMessage::Compile { id } => {} - SqsMessage::Verify { id } => {} + SqsMessage::Compile { request } => { + let _ = compile(request, &db_client, &s3_client).await; // TODO: + } + SqsMessage::Verify { request } => {} // TODO; } let _ = sqs_receiver.delete_message(receipt_handle).await; @@ -89,6 +105,7 @@ impl RunningWorker { pub struct WorkerEngine { sqs_client: SqsClient, db_client: DynamoDBClient, + s3_client: aws_sdk_s3::Client, expiration_timestamps: Arc>>, is_supervisor_enabled: Arc, running_workers: Vec, @@ -96,13 +113,19 @@ pub struct WorkerEngine { } impl WorkerEngine { - pub fn new(sqs_client: SqsClient, db_client: DynamoDBClient, supervisor_enabled: bool) -> Self { + pub fn new( + sqs_client: SqsClient, + db_client: DynamoDBClient, + s3_client: aws_sdk_s3::Client, + supervisor_enabled: bool, + ) -> Self { let is_supervisor_enabled = Arc::new(AtomicBool::new(supervisor_enabled)); let expiration_timestamps = Arc::new(Mutex::new(vec![])); WorkerEngine { sqs_client, db_client, + s3_client, supervisor_thread: Arc::new(None), expiration_timestamps, running_workers: vec![], @@ -112,8 +135,12 @@ impl WorkerEngine { pub fn start(&mut self, num_workers: NonZeroUsize) { let sqs_listener = SqsListener::new(self.sqs_client.clone(), Duration::from_millis(500)); + let s3_client = self.s3_client.clone(); + let db_client = self.db_client.clone(); self.running_workers.push(RunningWorker::new( sqs_listener, + db_client, + s3_client, num_workers.get(), self.expiration_timestamps.clone(), )); From 5b5684553195b416f70222d93b51d20bc1c2d036 Mon Sep 17 00:00:00 2001 From: taco-paco Date: Mon, 26 Aug 2024 09:49:45 +0700 Subject: [PATCH 07/21] feat: intermidiate compile changes --- Cargo.toml | 1 + crates/worker/src/commands/compile.rs | 327 ++++++++++++++------------ crates/worker/src/errors.rs | 9 +- 3 files changed, 184 insertions(+), 153 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6ff3d014..8f7df719 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "crates/lambdas", "crates/worker", ] +exclude = ["api"] [workspace.dependencies] aws-config = "1.5.5" diff --git a/crates/worker/src/commands/compile.rs b/crates/worker/src/commands/compile.rs index 2854d2c3..1e230180 100644 --- a/crates/worker/src/commands/compile.rs +++ b/crates/worker/src/commands/compile.rs @@ -1,10 +1,14 @@ +use aws_sdk_dynamodb::types::AttributeValue; use aws_sdk_s3::types::Object; use std::ops::Add; +use std::path::Path; use std::process::Stdio; +use aws_sdk_dynamodb::error::SdkError; +use aws_sdk_dynamodb::operation::update_item::UpdateItemError; use tracing::log::Level::Error; use tracing::warn; use tracing::{error, info}; -use types::{CompilationRequest, Item, Status}; +use types::{CompilationConfig, CompilationRequest, Item, Status}; use uuid::Uuid; use crate::dynamodb_client::DynamoDBClient; @@ -17,6 +21,11 @@ use crate::utils::lib::{ ZKSOLC_VERSIONS, }; +struct CompilationInput { + pub config: CompilationConfig, + pub contracts: Vec>, +} + async fn list_all_keys( client: &aws_sdk_s3::Client, id: String, @@ -117,161 +126,175 @@ pub async fn compile( Status::Pending => {} status => { warn!("Item already processing: {}", status); - return Err(CompilationError::UnexpectedStatusError(status)); + return Err(CompilationError::UnexpectedStatusError(status.to_string())); } } - let files = extract_files(request.id, s3_client).await?; + let files = extract_files(request.id.clone(), s3_client).await?; + + { + let new_item = Item { + id: item.id.clone(), + status: Status::Compiling, + }; + let db_update_result = db_client + .client + .update_item() + .table_name(db_client.table_name.clone()) + .update_expression("SET Status=1") + .key("ID", AttributeValue::S(request.id.clone())) + .condition_expression("Status=0") // TODO: check + .send() + .await; + match db_update_result { + Ok(_) => {}, + Err(SdkError::ServiceError(err)) => match err.err() { + UpdateItemError::ConditionalCheckFailedException(err) => { + return Err(CompilationError::UnexpectedStatusError("Concurrent status change from another instance".into())) + }, + _ => return Err(SdkError::ServiceError(err).into()) + } + Err(err) => Err(err).into(), + } + } + + let asd = do_compile(request.id,CompilationInput { + config: request.config, + contracts: files, + }) + .await; // TODO: Ok(()) } -// -// pub async fn do_compile( -// id: Uuid, -// db_client: DynamoDBClient, -// s3_client: aws_sdk_s3::Client, -// compilation_request: CompilationRequest, -// ) -> Result> { -// // TODO: errors -// -// let zksolc_version = compilation_request.config.version; -// -// // check if the version is supported -// if !ZKSOLC_VERSIONS.contains(&zksolc_version.as_str()) { -// return Err(ApiError::VersionNotSupported(zksolc_version)); -// } -// -// if compilation_request.contracts.is_empty() { -// return Ok(Json(CompileResponse { -// file_content: vec![], -// status: status_code_to_message(Some(0)), -// message: "Nothing to compile".into(), -// })); -// } -// -// let namespace = generate_folder_name(); -// -// // root directory for the contracts -// let workspace_path_str = format!("{}/{}", SOL_ROOT, namespace); -// let workspace_path = Path::new(&workspace_path_str); -// -// // root directory for the artifacts -// let artifacts_path_str = format!("{}/{}", workspace_path_str, "artifacts-zk"); -// let artifacts_path = Path::new(&artifacts_path_str); -// -// // root directory for user files (hardhat config, etc) -// let user_files_path_str = workspace_path_str.clone(); -// let hardhat_config_path = Path::new(&user_files_path_str).join("hardhat.config.ts"); -// -// // instantly create the directories -// tokio::fs::create_dir_all(workspace_path) -// .await -// .map_err(ApiError::FailedToWriteFile)?; -// tokio::fs::create_dir_all(artifacts_path) -// .await -// .map_err(ApiError::FailedToWriteFile)?; -// -// // when the compilation is done, clean up the directories -// // it will be called when the AutoCleanUp struct is dropped -// let auto_clean_up = AutoCleanUp { -// dirs: vec![workspace_path.to_str().unwrap()], -// }; -// -// // write the hardhat config file -// let mut hardhat_config_builder = HardhatConfigBuilder::new(); -// hardhat_config_builder -// .zksolc_version(&zksolc_version) -// .solidity_version(DEFAULT_SOLIDITY_VERSION); -// if let Some(target_path) = compilation_request.target_path { -// hardhat_config_builder.paths_sources(&target_path); -// } -// -// let hardhat_config_content = hardhat_config_builder.build().to_string_config(); -// -// // create parent directories -// tokio::fs::create_dir_all(hardhat_config_path.parent().unwrap()) -// .await -// .map_err(ApiError::FailedToWriteFile)?; -// -// tokio::fs::write(hardhat_config_path, hardhat_config_content) -// .await -// .map_err(ApiError::FailedToWriteFile)?; -// -// // filter test files from compilation candidates -// let contracts = compilation_request -// .contracts -// .into_iter() -// .filter(|contract| !contract.file_name.ends_with("_test.sol")) -// .collect(); -// -// // initialize the files -// initialize_files(contracts, workspace_path).await?; -// -// // Limit number of spawned processes. RAII released -// let _permit = SPAWN_SEMAPHORE.acquire().await.expect("Expired semaphore"); -// -// let command = tokio::process::Command::new("npx") -// .arg("hardhat") -// .arg("compile") -// .current_dir(workspace_path) -// .stdout(Stdio::piped()) -// .stderr(Stdio::piped()) -// .spawn(); -// let process = command.map_err(ApiError::FailedToExecuteCommand)?; -// let output = process -// .wait_with_output() -// .await -// .map_err(ApiError::FailedToReadOutput)?; -// -// let status = output.status; -// let message = String::from_utf8_lossy(&output.stdout).to_string(); -// -// info!("Output: \n{:?}", String::from_utf8_lossy(&output.stdout)); -// if !status.success() { -// error!( -// "Compilation error: {}", -// String::from_utf8_lossy(&output.stderr) -// ); -// return Ok(Json(CompileResponse { -// file_content: vec![], -// message: format!( -// "Failed to compile:\n{}", -// String::from_utf8_lossy(&output.stderr) -// ), -// status: "Error".to_string(), -// })); -// } -// -// // fetch the files in the artifacts directory -// let mut file_contents: Vec = vec![]; -// let file_paths = list_files_in_directory(artifacts_path); -// -// for file_path in file_paths.iter() { -// let file_content = tokio::fs::read_to_string(file_path) -// .await -// .map_err(ApiError::FailedToReadFile)?; -// let full_path = Path::new(file_path); -// let relative_path = full_path.strip_prefix(artifacts_path).unwrap_or(full_path); -// let relative_path_str = relative_path.to_str().unwrap(); -// -// // todo(varex83): is it the best way to check? -// let is_contract = -// !relative_path_str.ends_with(".dbg.json") && relative_path_str.ends_with(".json"); -// -// file_contents.push(CompiledFile { -// file_name: relative_path_str.to_string(), -// file_content, -// is_contract, -// }); -// } -// -// // calling here explicitly to avoid dropping the AutoCleanUp struct -// auto_clean_up.clean_up().await; -// -// Ok(Json(CompileResponse { -// file_content: file_contents, -// status: status_code_to_message(status.code()), -// message, -// })) -// } +pub async fn do_compile(namespace: String, compilation_request: CompilationInput) -> Result<(), CompilationError> { + let zksolc_version = compilation_request.config.version; + + // check if the version is supported + if !ZKSOLC_VERSIONS.contains(&zksolc_version.as_str()) { + return Err(CompilationError::VersionNotSupported(zksolc_version)); + } + + // root directory for the contracts + let workspace_path_str = format!("{}/{}", SOL_ROOT, namespace); + let workspace_path = Path::new(&workspace_path_str); + + // root directory for the artifacts + let artifacts_path_str = format!("{}/{}", workspace_path_str, "artifacts-zk"); + let artifacts_path = Path::new(&artifacts_path_str); + + // root directory for user files (hardhat config, etc) + let user_files_path_str = workspace_path_str.clone(); + let hardhat_config_path = Path::new(&user_files_path_str).join("hardhat.config.ts"); + + // instantly create the directories + tokio::fs::create_dir_all(workspace_path) + .await + .map_err(ApiError::FailedToWriteFile)?; + tokio::fs::create_dir_all(artifacts_path) + .await + .map_err(ApiError::FailedToWriteFile)?; + + // when the compilation is done, clean up the directories + // it will be called when the AutoCleanUp struct is dropped + let auto_clean_up = AutoCleanUp { + dirs: vec![workspace_path.to_str().unwrap()], + }; + + // write the hardhat config file + let mut hardhat_config_builder = HardhatConfigBuilder::new(); + hardhat_config_builder + .zksolc_version(&zksolc_version) + .solidity_version(DEFAULT_SOLIDITY_VERSION); + if let Some(target_path) = compilation_request.target_path { + hardhat_config_builder.paths_sources(&target_path); + } + + let hardhat_config_content = hardhat_config_builder.build().to_string_config(); + + // create parent directories + tokio::fs::create_dir_all(hardhat_config_path.parent().unwrap()) + .await + .map_err(ApiError::FailedToWriteFile)?; + + tokio::fs::write(hardhat_config_path, hardhat_config_content) + .await + .map_err(ApiError::FailedToWriteFile)?; + + // filter test files from compilation candidates + let contracts = compilation_request + .contracts + .into_iter() + .filter(|contract| !contract.file_name.ends_with("_test.sol")) + .collect(); + + // initialize the files + initialize_files(contracts, workspace_path).await?; + + // Limit number of spawned processes. RAII released + let _permit = SPAWN_SEMAPHORE.acquire().await.expect("Expired semaphore"); + + let command = tokio::process::Command::new("npx") + .arg("hardhat") + .arg("compile") + .current_dir(workspace_path) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn(); + let process = command.map_err(ApiError::FailedToExecuteCommand)?; + let output = process + .wait_with_output() + .await + .map_err(ApiError::FailedToReadOutput)?; + + let status = output.status; + let message = String::from_utf8_lossy(&output.stdout).to_string(); + + info!("Output: \n{:?}", String::from_utf8_lossy(&output.stdout)); + if !status.success() { + error!( + "Compilation error: {}", + String::from_utf8_lossy(&output.stderr) + ); + return Ok(Json(CompileResponse { + file_content: vec![], + message: format!( + "Failed to compile:\n{}", + String::from_utf8_lossy(&output.stderr) + ), + status: "Error".to_string(), + })); + } + + // fetch the files in the artifacts directory + let mut file_contents: Vec = vec![]; + let file_paths = list_files_in_directory(artifacts_path); + + for file_path in file_paths.iter() { + let file_content = tokio::fs::read_to_string(file_path) + .await + .map_err(ApiError::FailedToReadFile)?; + let full_path = Path::new(file_path); + let relative_path = full_path.strip_prefix(artifacts_path).unwrap_or(full_path); + let relative_path_str = relative_path.to_str().unwrap(); + + // todo(varex83): is it the best way to check? + let is_contract = + !relative_path_str.ends_with(".dbg.json") && relative_path_str.ends_with(".json"); + + file_contents.push(CompiledFile { + file_name: relative_path_str.to_string(), + file_content, + is_contract, + }); + } + + // calling here explicitly to avoid dropping the AutoCleanUp struct + auto_clean_up.clean_up().await; + + Ok(Json(CompileResponse { + file_content: file_contents, + status: status_code_to_message(status.code()), + message, + })) +} diff --git a/crates/worker/src/errors.rs b/crates/worker/src/errors.rs index cd810b81..dcaf3c5d 100644 --- a/crates/worker/src/errors.rs +++ b/crates/worker/src/errors.rs @@ -1,6 +1,7 @@ use aws_sdk_dynamodb::config::http::HttpResponse; use aws_sdk_dynamodb::operation::delete_item::DeleteItemError; use aws_sdk_dynamodb::operation::get_item::GetItemError; +use aws_sdk_dynamodb::operation::update_item::UpdateItemError; use aws_sdk_s3::operation::get_object::GetObjectError; use aws_sdk_s3::operation::list_objects_v2::ListObjectsV2Error; use aws_sdk_sqs::error::SdkError; @@ -16,6 +17,8 @@ pub(crate) type SqsDeleteError = SdkError; pub(crate) type DBDeleteError = SdkError; pub(crate) type DBGetError = SdkError; +pub(crate) type DBUpdateError = SdkError; + // S3 related errors pub(crate) type S3ListObjectsError = SdkError; pub(crate) type S3GetObjectError = SdkError; @@ -28,6 +31,8 @@ pub enum DBError { GetItemError(#[from] DBGetError), #[error(transparent)] ItemFormatError(#[from] ItemError), + #[error(transparent)] + UpdateItemError(#[from] UpdateItemError) } #[derive(thiserror::Error, Debug)] @@ -53,7 +58,9 @@ pub enum CompilationError { #[error("Item isn't id DB: {0}")] NoDBItemError(String), #[error("Unexpected status: {0}")] - UnexpectedStatusError(Status), + UnexpectedStatusError(String), // ignorable + #[error("Unsupported version: {0}")] + VersionNotSupported(String), } #[derive(thiserror::Error, Debug)] From 93182573dc965fda4d6a8d73ea9d9adfe7d03c11 Mon Sep 17 00:00:00 2001 From: taco-paco Date: Mon, 2 Sep 2024 12:35:15 +0900 Subject: [PATCH 08/21] feat: adapted compile. --- crates/types/src/item.rs | 139 ++++++++++++ crates/types/src/lib.rs | 124 +--------- crates/worker/Cargo.toml | 3 +- crates/worker/src/commands/compile.rs | 311 +++++++++++++++++--------- crates/worker/src/commands/mod.rs | 8 + crates/worker/src/dynamodb_client.rs | 4 +- crates/worker/src/errors.rs | 12 +- crates/worker/src/main.rs | 1 - crates/worker/src/sqs_client.rs | 1 - crates/worker/src/utils/lib.rs | 64 +++--- crates/worker/src/worker.rs | 2 - 11 files changed, 395 insertions(+), 274 deletions(-) create mode 100644 crates/types/src/item.rs diff --git a/crates/types/src/item.rs b/crates/types/src/item.rs new file mode 100644 index 00000000..7cd8bad4 --- /dev/null +++ b/crates/types/src/item.rs @@ -0,0 +1,139 @@ +use aws_sdk_dynamodb::types::AttributeValue; +use serde::Serialize; +use std::collections::HashMap; +use std::fmt; +use std::fmt::Formatter; + +#[derive(Debug, Clone, Serialize)] +pub enum Status { + // TODO: add FilesUploaded(?) + Pending, + Compiling, + Ready { + // TODO: Legacy support via enum here. + presigned_urls: Vec, + }, + Failed(String), +} + +impl fmt::Display for Status { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + Status::Pending => write!(f, "Pending"), + Status::Compiling => write!(f, "Compiling"), + Status::Ready {presigned_urls: _} => write!(f, "Ready"), + Status::Failed(msg) => write!(f, "Failed: {}", msg), + } + } +} + +impl From<&Status> for u32 { + fn from(value: &Status) -> Self { + match value { + Status::Pending => 0, + Status::Compiling => 1, + Status::Ready {presigned_urls: _} => 2, + Status::Failed(_) => 3, + } + } +} + +impl From for u32 { + fn from(value: Status) -> Self { + u32::from(&value) + } +} + +impl From for HashMap { + fn from(value: Status) -> Self { + match value.clone() { + Status::Pending | Status::Compiling => HashMap::from([( + "Status".into(), + AttributeValue::N(u32::from(&value).to_string()), + )]), + Status::Ready { presigned_urls } => HashMap::from([ + ( + "Status".into(), + AttributeValue::N(u32::from(&value).to_string()), + ), + ("Data".into(), AttributeValue::Ss(presigned_urls)), + ]), + Status::Failed(val) => HashMap::from([ + ( + "Status".into(), + AttributeValue::N(u32::from(&value).to_string()), + ), + ("Data".into(), AttributeValue::S(val)), + ]), + } + } +} + +#[derive(thiserror::Error, Debug)] +pub enum ItemError { + #[error("Invalid Item format")] + FormatError, + #[error(transparent)] + ParseError(#[from] std::num::ParseIntError), +} + +pub struct Item { + // TODO: uuid? + pub id: String, + pub status: Status, +} + +impl From for HashMap { + fn from(value: Item) -> Self { + let mut item_map = HashMap::from([("ID".into(), AttributeValue::S(value.id))]); + item_map.extend(HashMap::from(value.status)); + + item_map + } +} + +impl TryFrom<&HashMap> for Status { + type Error = ItemError; + fn try_from(value: &HashMap) -> Result { + let status = value.get("Status").ok_or(ItemError::FormatError)?; + let status: u32 = status + .as_n() + .map_err(|_| ItemError::FormatError)? + .parse::()?; + let status = match status { + 0 => Status::Pending, + 1 => Status::Compiling, + 2 => { + let data = value.get("Data").ok_or(ItemError::FormatError)?; + let data = data.as_ss().map_err(|_| ItemError::FormatError)?; + + Status::Ready { + presigned_urls: data.clone(), + } + } + 3 => { + let data = value.get("Data").ok_or(ItemError::FormatError)?; + let data = data.as_s().map_err(|_| ItemError::FormatError)?; + + Status::Failed(data.clone()) + } + _ => return Err(ItemError::FormatError), + }; + + Ok(status) + } +} + +impl TryFrom> for Item { + type Error = ItemError; + fn try_from(value: HashMap) -> Result { + let id = value.get("ID").ok_or(ItemError::FormatError)?; + let id = id.as_s().map_err(|_| ItemError::FormatError)?; + let status = (&value).try_into()?; + + Ok(Item { + id: id.clone(), + status, + }) + } +} diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 2fbd720e..61d3646a 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -1,8 +1,8 @@ -use aws_sdk_dynamodb::types::AttributeValue; +pub mod item; + use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use std::fmt; -use std::fmt::Formatter; + +pub const ARTIFACTS_FOLDER: &str = "artifacts"; #[derive(Serialize, Deserialize, Clone, Debug)] pub struct CompilationConfig { @@ -46,119 +46,3 @@ pub enum SqsMessage { request: VerificationRequest, }, } - -#[derive(Debug, Clone, Serialize)] -pub enum Status { - // TODO: add FilesUploaded(?) - Pending, - Compiling, - Ready(String), - Failed(String), -} - -impl fmt::Display for Status { - fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { - match self { - Status::Pending => write!(f, "Pending"), - Status::Compiling => write!(f, "Compiling"), - Status::Ready(msg) => write!(f, "Ready: {}", msg), - Status::Failed(msg) => write!(f, "Failed: {}", msg), - } - } -} - -impl From<&Status> for u32 { - fn from(value: &Status) -> Self { - match value { - Status::Pending => 0, - Status::Compiling => 1, - Status::Ready(_) => 2, - Status::Failed(_) => 3, - } - } -} - -impl From for HashMap { - fn from(value: Status) -> Self { - match value.clone() { - Status::Pending | Status::Compiling => HashMap::from([( - "Status".into(), - AttributeValue::N(u32::from(&value).to_string()), - )]), - Status::Ready(val) | Status::Failed(val) => HashMap::from([ - ( - "Status".into(), - AttributeValue::N(u32::from(&value).to_string()), - ), - ("Data".into(), AttributeValue::S(val)), - ]), - } - } -} - -#[derive(thiserror::Error, Debug)] -pub enum ItemError { - #[error("Invalid Item format")] - FormatError, - #[error(transparent)] - ParseError(#[from] std::num::ParseIntError), -} - -pub struct Item { - // TODO: uuid? - pub id: String, - pub status: Status, -} - -impl From for HashMap { - fn from(value: Item) -> Self { - let mut item_map = HashMap::from([("ID".into(), AttributeValue::S(value.id))]); - item_map.extend(HashMap::from(value.status)); - - item_map - } -} - -impl TryFrom<&HashMap> for Status { - type Error = ItemError; - fn try_from(value: &HashMap) -> Result { - let status = value.get("Status").ok_or(ItemError::FormatError)?; - let status: u32 = status - .as_n() - .map_err(|_| ItemError::FormatError)? - .parse::()?; - let status = match status { - 0 => Status::Pending, - 1 => Status::Compiling, - 2 => { - let data = value.get("Data").ok_or(ItemError::FormatError)?; - let data = data.as_s().map_err(|_| ItemError::FormatError)?; - - Status::Ready(data.clone()) - } - 3 => { - let data = value.get("Data").ok_or(ItemError::FormatError)?; - let data = data.as_s().map_err(|_| ItemError::FormatError)?; - - Status::Failed(data.clone()) - } - _ => return Err(ItemError::FormatError), - }; - - Ok(status) - } -} - -impl TryFrom> for Item { - type Error = ItemError; - fn try_from(value: HashMap) -> Result { - let id = value.get("ID").ok_or(ItemError::FormatError)?; - let id = id.as_s().map_err(|_| ItemError::FormatError)?; - let status = (&value).try_into()?; - - Ok(Item { - id: id.clone(), - status, - }) - } -} diff --git a/crates/worker/Cargo.toml b/crates/worker/Cargo.toml index f3758310..10eec4f5 100644 --- a/crates/worker/Cargo.toml +++ b/crates/worker/Cargo.toml @@ -20,7 +20,8 @@ aws-runtime = "1.4.0" aws-smithy-types = "1.2.2" async-channel = "2.3.1" chrono = "0.4.38" -crossbeam-queue = "0.3.11" +lazy_static = "1.5.0" +walkdir = "2.3.2" # Inner crates types = {workspace = true} \ No newline at end of file diff --git a/crates/worker/src/commands/compile.rs b/crates/worker/src/commands/compile.rs index 1e230180..cced5272 100644 --- a/crates/worker/src/commands/compile.rs +++ b/crates/worker/src/commands/compile.rs @@ -1,29 +1,123 @@ +use crate::commands::SPAWN_SEMAPHORE; +use aws_sdk_dynamodb::error::SdkError; +use aws_sdk_dynamodb::operation::update_item::UpdateItemError; use aws_sdk_dynamodb::types::AttributeValue; +use aws_sdk_s3::presigning::PresigningConfig; use aws_sdk_s3::types::Object; use std::ops::Add; use std::path::Path; use std::process::Stdio; -use aws_sdk_dynamodb::error::SdkError; -use aws_sdk_dynamodb::operation::update_item::UpdateItemError; -use tracing::log::Level::Error; +use std::time::Duration; use tracing::warn; use tracing::{error, info}; -use types::{CompilationConfig, CompilationRequest, Item, Status}; -use uuid::Uuid; +use types::item::{Item, Status}; +use types::{CompilationConfig, CompilationRequest, ARTIFACTS_FOLDER}; use crate::dynamodb_client::DynamoDBClient; -use crate::errors::CompilationError::NoDBItemError; use crate::errors::{CompilationError, DBError, S3Error}; use crate::utils::cleaner::AutoCleanUp; use crate::utils::hardhat_config::HardhatConfigBuilder; use crate::utils::lib::{ - generate_folder_name, status_code_to_message, DEFAULT_SOLIDITY_VERSION, SOL_ROOT, - ZKSOLC_VERSIONS, + initialize_files, list_files_in_directory, + DEFAULT_SOLIDITY_VERSION, SOL_ROOT, ZKSOLC_VERSIONS, }; -struct CompilationInput { +pub struct CompilationFile { + // legacy name. file_path really + pub file_name: String, + pub file_content: Vec, +} + +pub struct CompilationInput { pub config: CompilationConfig, - pub contracts: Vec>, + // legacy. files really + pub contracts: Vec, +} + +pub struct CompilationArtifact { + pub file_name: String, + pub file_content: Vec, + pub is_contract: bool, +} + +pub async fn compile( + request: CompilationRequest, + db_client: &DynamoDBClient, + s3_client: &aws_sdk_s3::Client, +) -> Result<(), CompilationError> { + let item = db_client.get_item(request.id.clone()).await?; + let item: Item = match item { + Some(item) => item, + None => { + error!("No item id: {}", request.id); + return Err(CompilationError::NoDBItemError(request.id)); + } + }; + + match item.status { + Status::Pending => {} + status => { + warn!("Item already processing: {}", status); + return Err(CompilationError::UnexpectedStatusError(status.to_string())); + } + } + + let files = extract_files(request.id.clone(), s3_client).await?; + { + let db_update_result = db_client + .client + .update_item() + .table_name(db_client.table_name.clone()) + .key("ID", AttributeValue::S(request.id.clone())) + .update_expression("SET Status = :newStatus") + .condition_expression("Status = :currentStatus") // TODO: check + .expression_attribute_values( + ":newStatus", + AttributeValue::N(u32::from(Status::Compiling).to_string()), + ) + .expression_attribute_values( + ":currentStatus", + AttributeValue::N(u32::from(Status::Pending).to_string()), + ) + .send() + .await; + match db_update_result { + Ok(_) => {} + Err(SdkError::ServiceError(err)) => match err.err() { + UpdateItemError::ConditionalCheckFailedException(_) => { + return Err(CompilationError::UnexpectedStatusError( + "Concurrent status change from another instance".into(), + )) + } + _ => return Err(DBError::from(SdkError::ServiceError(err)).into()), + }, + Err(err) => return Err(DBError::from(err).into()), + } + } + + match do_compile( + request.id.clone(), + CompilationInput { + config: request.config, + contracts: files, + }, + ) + .await + { + Ok(val) => Ok(on_compilation_success(&request.id, db_client, s3_client, val).await?), + Err(err) => match err { + CompilationError::CompilationFailureError(value) => { + Ok(on_compilation_failed(&request.id, db_client, value).await?) + } + CompilationError::VersionNotSupported(value) => Ok(on_compilation_failed( + &request.id, + &db_client, + format!("Unsupported compiler version: {}", value), + ) + .await?), + _ => Err(err), + }, + } } async fn list_all_keys( @@ -74,7 +168,7 @@ async fn list_all_keys( async fn extract_files( id: String, s3_client: &aws_sdk_s3::Client, -) -> Result>, S3Error> { +) -> Result, S3Error> { let objects = list_all_keys(&s3_client, id.to_string(), "TODO").await?; let mut files = vec![]; @@ -102,72 +196,91 @@ async fn extract_files( return Err(S3Error::InvalidObjectError); } - files.push(contents); + files.push(CompilationFile { + file_content: contents, + file_name: key.to_string(), + }); } Ok(files) } -pub async fn compile( - request: CompilationRequest, +pub async fn on_compilation_success( + id: &str, db_client: &DynamoDBClient, s3_client: &aws_sdk_s3::Client, + compilation_artifacts: Vec, ) -> Result<(), CompilationError> { - let item = db_client.get_item(request.id.clone()).await?; - let item: Item = match item { - Some(item) => item, - None => { - error!("No item id: {}", request.id); - return Err(NoDBItemError(request.id)); - } - }; + let mut presigned_urls = Vec::with_capacity(compilation_artifacts.len()); + for el in compilation_artifacts { + let file_key = format!("{}/{}/{}", ARTIFACTS_FOLDER, id, el.file_name); + s3_client + .put_object() + .bucket("TODO") + .key(file_key.clone()) + .body(el.file_content.into()) + .send() + .await + .map_err(S3Error::from)?; - match item.status { - Status::Pending => {} - status => { - warn!("Item already processing: {}", status); - return Err(CompilationError::UnexpectedStatusError(status.to_string())); - } + let expires_in = PresigningConfig::expires_in(Duration::from_secs(5 * 60 * 60)).unwrap(); + let presigned_request = s3_client + .get_object() + .bucket("TODO") + .key(file_key) + .presigned(expires_in) + .await + .map_err(S3Error::from)?; + + presigned_urls.push(presigned_request.uri().to_string()); } - let files = extract_files(request.id.clone(), s3_client).await?; + db_client + .client + .update_item() + .table_name(db_client.table_name.clone()) + .key("ID", AttributeValue::S(id.to_string())) + .update_expression("SET Status = :newStatus") + .update_expression("SET Data = :data") + .expression_attribute_values( + ":newStatus", + AttributeValue::N(2.to_string()), // Ready + ) + .expression_attribute_values(":data", AttributeValue::Ss(presigned_urls)) + .send() + .await + .map_err(DBError::from)?; - { - let new_item = Item { - id: item.id.clone(), - status: Status::Compiling, - }; - let db_update_result = db_client - .client - .update_item() - .table_name(db_client.table_name.clone()) - .update_expression("SET Status=1") - .key("ID", AttributeValue::S(request.id.clone())) - .condition_expression("Status=0") // TODO: check - .send() - .await; - match db_update_result { - Ok(_) => {}, - Err(SdkError::ServiceError(err)) => match err.err() { - UpdateItemError::ConditionalCheckFailedException(err) => { - return Err(CompilationError::UnexpectedStatusError("Concurrent status change from another instance".into())) - }, - _ => return Err(SdkError::ServiceError(err).into()) - } - Err(err) => Err(err).into(), - } - } + Ok(()) +} - let asd = do_compile(request.id,CompilationInput { - config: request.config, - contracts: files, - }) - .await; +pub async fn on_compilation_failed( + id: &str, + db_client: &DynamoDBClient, + message: String, +) -> Result<(), DBError> { + db_client + .client + .update_item() + .table_name(db_client.table_name.clone()) + .key("ID", AttributeValue::S(id.to_string())) + .update_expression("SET Status = :newStatus") + .update_expression("SET Data = :data") + .expression_attribute_values( + ":newStatus", + AttributeValue::N(3.to_string()), // Failed + ) + .expression_attribute_values(":data", AttributeValue::S(message)) + .send() + .await?; - // TODO: Ok(()) } -pub async fn do_compile(namespace: String, compilation_request: CompilationInput) -> Result<(), CompilationError> { + +pub async fn do_compile( + namespace: String, + compilation_request: CompilationInput, +) -> Result, CompilationError> { let zksolc_version = compilation_request.config.version; // check if the version is supported @@ -188,12 +301,8 @@ pub async fn do_compile(namespace: String, compilation_request: CompilationInput let hardhat_config_path = Path::new(&user_files_path_str).join("hardhat.config.ts"); // instantly create the directories - tokio::fs::create_dir_all(workspace_path) - .await - .map_err(ApiError::FailedToWriteFile)?; - tokio::fs::create_dir_all(artifacts_path) - .await - .map_err(ApiError::FailedToWriteFile)?; + tokio::fs::create_dir_all(workspace_path).await?; + tokio::fs::create_dir_all(artifacts_path).await?; // when the compilation is done, clean up the directories // it will be called when the AutoCleanUp struct is dropped @@ -206,20 +315,15 @@ pub async fn do_compile(namespace: String, compilation_request: CompilationInput hardhat_config_builder .zksolc_version(&zksolc_version) .solidity_version(DEFAULT_SOLIDITY_VERSION); - if let Some(target_path) = compilation_request.target_path { + if let Some(target_path) = compilation_request.config.target_path { hardhat_config_builder.paths_sources(&target_path); } let hardhat_config_content = hardhat_config_builder.build().to_string_config(); // create parent directories - tokio::fs::create_dir_all(hardhat_config_path.parent().unwrap()) - .await - .map_err(ApiError::FailedToWriteFile)?; - - tokio::fs::write(hardhat_config_path, hardhat_config_content) - .await - .map_err(ApiError::FailedToWriteFile)?; + tokio::fs::create_dir_all(hardhat_config_path.parent().unwrap()).await?; + tokio::fs::write(hardhat_config_path, hardhat_config_content).await?; // filter test files from compilation candidates let contracts = compilation_request @@ -229,60 +333,50 @@ pub async fn do_compile(namespace: String, compilation_request: CompilationInput .collect(); // initialize the files - initialize_files(contracts, workspace_path).await?; + initialize_files(workspace_path, contracts).await?; // Limit number of spawned processes. RAII released let _permit = SPAWN_SEMAPHORE.acquire().await.expect("Expired semaphore"); - let command = tokio::process::Command::new("npx") + let process = tokio::process::Command::new("npx") .arg("hardhat") .arg("compile") .current_dir(workspace_path) .stdout(Stdio::piped()) .stderr(Stdio::piped()) - .spawn(); - let process = command.map_err(ApiError::FailedToExecuteCommand)?; - let output = process - .wait_with_output() - .await - .map_err(ApiError::FailedToReadOutput)?; + .spawn()?; + + let output = process.wait_with_output().await?; let status = output.status; let message = String::from_utf8_lossy(&output.stdout).to_string(); + info!("Output: \n{:?}", message); - info!("Output: \n{:?}", String::from_utf8_lossy(&output.stdout)); if !status.success() { - error!( - "Compilation error: {}", - String::from_utf8_lossy(&output.stderr) - ); - return Ok(Json(CompileResponse { - file_content: vec![], - message: format!( - "Failed to compile:\n{}", - String::from_utf8_lossy(&output.stderr) - ), - status: "Error".to_string(), - })); + let err_msg = String::from_utf8_lossy(&output.stderr); + error!("Compilation error: {}", err_msg); + // TODO: handle + return Err(CompilationError::CompilationFailureError(err_msg.to_string())); } // fetch the files in the artifacts directory - let mut file_contents: Vec = vec![]; - let file_paths = list_files_in_directory(artifacts_path); - + let mut file_contents: Vec = vec![]; + let file_paths = + list_files_in_directory(artifacts_path).expect("Unexpected error listing artifact"); for file_path in file_paths.iter() { - let file_content = tokio::fs::read_to_string(file_path) - .await - .map_err(ApiError::FailedToReadFile)?; + // TODO: change this - don't store files in RAM. copy 1-1 to S3 + let file_content = tokio::fs::read(file_path).await?; let full_path = Path::new(file_path); - let relative_path = full_path.strip_prefix(artifacts_path).unwrap_or(full_path); + + let relative_path = full_path + .strip_prefix(artifacts_path) + .expect("Unexpected prefix"); let relative_path_str = relative_path.to_str().unwrap(); - // todo(varex83): is it the best way to check? let is_contract = !relative_path_str.ends_with(".dbg.json") && relative_path_str.ends_with(".json"); - file_contents.push(CompiledFile { + file_contents.push(CompilationArtifact { file_name: relative_path_str.to_string(), file_content, is_contract, @@ -291,10 +385,5 @@ pub async fn do_compile(namespace: String, compilation_request: CompilationInput // calling here explicitly to avoid dropping the AutoCleanUp struct auto_clean_up.clean_up().await; - - Ok(Json(CompileResponse { - file_content: file_contents, - status: status_code_to_message(status.code()), - message, - })) + Ok(file_contents) } diff --git a/crates/worker/src/commands/mod.rs b/crates/worker/src/commands/mod.rs index 48755763..b540cd7d 100644 --- a/crates/worker/src/commands/mod.rs +++ b/crates/worker/src/commands/mod.rs @@ -1,2 +1,10 @@ +use lazy_static::lazy_static; +use tokio::sync::Semaphore; + pub mod compile; pub mod verify; + +const PROCESS_SPAWN_LIMIT: usize = 8; +lazy_static! { + static ref SPAWN_SEMAPHORE: Semaphore = Semaphore::new(PROCESS_SPAWN_LIMIT); +} diff --git a/crates/worker/src/dynamodb_client.rs b/crates/worker/src/dynamodb_client.rs index 777a1dc0..6eaf13d5 100644 --- a/crates/worker/src/dynamodb_client.rs +++ b/crates/worker/src/dynamodb_client.rs @@ -1,7 +1,7 @@ -use crate::errors::{DBDeleteError, DBError, DBGetError}; +use crate::errors::{ DBError}; use aws_sdk_dynamodb::types::AttributeValue; use aws_sdk_dynamodb::Client; -use types::Item; +use types::item::Item; #[derive(Clone)] pub struct DynamoDBClient { diff --git a/crates/worker/src/errors.rs b/crates/worker/src/errors.rs index dcaf3c5d..6cffd6ef 100644 --- a/crates/worker/src/errors.rs +++ b/crates/worker/src/errors.rs @@ -4,10 +4,11 @@ use aws_sdk_dynamodb::operation::get_item::GetItemError; use aws_sdk_dynamodb::operation::update_item::UpdateItemError; use aws_sdk_s3::operation::get_object::GetObjectError; use aws_sdk_s3::operation::list_objects_v2::ListObjectsV2Error; +use aws_sdk_s3::operation::put_object::PutObjectError; use aws_sdk_sqs::error::SdkError; use aws_sdk_sqs::operation::delete_message::DeleteMessageError; use aws_sdk_sqs::operation::receive_message::ReceiveMessageError; -use types::{ItemError, Status}; +use types::item::{ItemError}; // SQS related errors pub(crate) type SqsReceiveError = SdkError; @@ -22,6 +23,7 @@ pub(crate) type DBUpdateError = SdkError; // S3 related errors pub(crate) type S3ListObjectsError = SdkError; pub(crate) type S3GetObjectError = SdkError; +pub(crate) type S3PutObjectError = SdkError; #[derive(thiserror::Error, Debug)] pub enum DBError { @@ -32,7 +34,7 @@ pub enum DBError { #[error(transparent)] ItemFormatError(#[from] ItemError), #[error(transparent)] - UpdateItemError(#[from] UpdateItemError) + UpdateItemError(#[from] DBUpdateError), } #[derive(thiserror::Error, Debug)] @@ -44,6 +46,8 @@ pub enum S3Error { #[error(transparent)] ListObjectsError(#[from] S3ListObjectsError), #[error(transparent)] + PutObjectError(#[from] S3PutObjectError), + #[error(transparent)] IoError(#[from] std::io::Error), #[error(transparent)] ByteStreamError(#[from] aws_smithy_types::byte_stream::error::Error), @@ -61,6 +65,10 @@ pub enum CompilationError { UnexpectedStatusError(String), // ignorable #[error("Unsupported version: {0}")] VersionNotSupported(String), + #[error(transparent)] + IoError(#[from] std::io::Error), + #[error("Failed to compile: {0}")] + CompilationFailureError(String), } #[derive(thiserror::Error, Debug)] diff --git a/crates/worker/src/main.rs b/crates/worker/src/main.rs index 5556e6af..1878e5fb 100644 --- a/crates/worker/src/main.rs +++ b/crates/worker/src/main.rs @@ -8,7 +8,6 @@ mod worker; use aws_config::BehaviorVersion; use aws_runtime::env_config::file::{EnvConfigFileKind, EnvConfigFiles}; -use std::ops::Deref; use std::time::Duration; use crate::{sqs_client::SqsClient, sqs_listener::SqsListener}; diff --git a/crates/worker/src/sqs_client.rs b/crates/worker/src/sqs_client.rs index 83cc7109..944fdae4 100644 --- a/crates/worker/src/sqs_client.rs +++ b/crates/worker/src/sqs_client.rs @@ -36,7 +36,6 @@ macro_rules! match_result { } other => Err(other), }, - Err(err) => Err(err.into()), } }; } diff --git a/crates/worker/src/utils/lib.rs b/crates/worker/src/utils/lib.rs index 74db6b2a..aea64b33 100644 --- a/crates/worker/src/utils/lib.rs +++ b/crates/worker/src/utils/lib.rs @@ -1,6 +1,8 @@ +use crate::commands::compile::CompilationFile; use std::path::{Path, PathBuf}; use tracing::debug; use uuid::Uuid; +use walkdir::WalkDir; pub const SOL_ROOT: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/", "hardhat_env/workspaces/"); pub const ZK_CACHE_ROOT: &str = concat!( @@ -138,22 +140,17 @@ pub fn generate_folder_name() -> String { uuid.to_string() } -// pub fn list_files_in_directory>(path: P) -> Vec { -// let mut file_paths = Vec::new(); -// -// for entry in WalkDir::new(path) { -// match entry { -// Ok(entry) => { -// if entry.file_type().is_file() { -// file_paths.push(entry.path().display().to_string()); -// } -// } -// Err(e) => println!("Error reading directory: {}", e), -// } -// } -// -// file_paths -// } +pub fn list_files_in_directory>(path: P) -> Result, walkdir::Error> { + let mut file_paths = Vec::new(); + for entry in WalkDir::new(path) { + let entry = entry?; + if entry.file_type().is_file() { + file_paths.push(entry.path().display().to_string()); + } + } + + Ok(file_paths) +} // pub fn generate_mock_compile_request() -> CompilationRequest { // CompilationRequest { @@ -189,21 +186,20 @@ pub fn generate_mock_solidity_file_content() -> String { .to_string() } -// pub async fn initialize_files(files: Vec, file_path: &Path) -> Result<()> { -// for file in files { -// let file_path_str = format!("{}/{}", file_path.to_str().unwrap(), file.file_name); -// let file_path = Path::new(&file_path_str); -// -// // create parent directories -// tokio::fs::create_dir_all(file_path.parent().unwrap()) -// .await -// .map_err(ApiError::FailedToWriteFile)?; -// -// // write file -// tokio::fs::write(file_path, file.file_content.clone()) -// .await -// .map_err(ApiError::FailedToWriteFile)?; -// } -// -// Ok(()) -// } +pub async fn initialize_files( + dst_dir: &Path, + files: Vec, +) -> Result<(), std::io::Error> { + for file in files { + let file_path_str = format!("{}/{}", dst_dir.to_str().unwrap(), file.file_name); + let file_path = Path::new(&file_path_str); + + // create parent directories + tokio::fs::create_dir_all(file_path.parent().unwrap()).await?; + + // write file + tokio::fs::write(file_path, file.file_content.clone()).await?; + } + + Ok(()) +} diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index ec211ef5..72c20ee7 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -1,7 +1,5 @@ use crate::commands::compile::compile; use crate::dynamodb_client::DynamoDBClient; -use crossbeam_queue::ArrayQueue; -use std::fmt::{Display, Formatter}; use std::num::NonZeroUsize; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; From e40583030a8cd07dff2875c212d06ec210e8c633 Mon Sep 17 00:00:00 2001 From: taco-paco Date: Mon, 2 Sep 2024 19:04:39 +0900 Subject: [PATCH 09/21] feat: s3 client added --- crates/types/src/item.rs | 4 +- crates/worker/Cargo.toml | 3 +- crates/worker/src/commands/compile.rs | 125 +++-------------------- crates/worker/src/dynamodb_client.rs | 2 +- crates/worker/src/errors.rs | 2 +- crates/worker/src/main.rs | 35 ++++--- crates/worker/src/s3_client.rs | 139 ++++++++++++++++++++++++++ crates/worker/src/worker.rs | 28 +++++- 8 files changed, 204 insertions(+), 134 deletions(-) create mode 100644 crates/worker/src/s3_client.rs diff --git a/crates/types/src/item.rs b/crates/types/src/item.rs index 7cd8bad4..7e573fc8 100644 --- a/crates/types/src/item.rs +++ b/crates/types/src/item.rs @@ -21,7 +21,7 @@ impl fmt::Display for Status { match self { Status::Pending => write!(f, "Pending"), Status::Compiling => write!(f, "Compiling"), - Status::Ready {presigned_urls: _} => write!(f, "Ready"), + Status::Ready { presigned_urls: _ } => write!(f, "Ready"), Status::Failed(msg) => write!(f, "Failed: {}", msg), } } @@ -32,7 +32,7 @@ impl From<&Status> for u32 { match value { Status::Pending => 0, Status::Compiling => 1, - Status::Ready {presigned_urls: _} => 2, + Status::Ready { presigned_urls: _ } => 2, Status::Failed(_) => 3, } } diff --git a/crates/worker/Cargo.toml b/crates/worker/Cargo.toml index 10eec4f5..890d7d4d 100644 --- a/crates/worker/Cargo.toml +++ b/crates/worker/Cargo.toml @@ -20,8 +20,9 @@ aws-runtime = "1.4.0" aws-smithy-types = "1.2.2" async-channel = "2.3.1" chrono = "0.4.38" +futures = "0.3.30" lazy_static = "1.5.0" walkdir = "2.3.2" # Inner crates -types = {workspace = true} \ No newline at end of file +types = {workspace = true} diff --git a/crates/worker/src/commands/compile.rs b/crates/worker/src/commands/compile.rs index cced5272..186ddcfd 100644 --- a/crates/worker/src/commands/compile.rs +++ b/crates/worker/src/commands/compile.rs @@ -3,8 +3,6 @@ use aws_sdk_dynamodb::error::SdkError; use aws_sdk_dynamodb::operation::update_item::UpdateItemError; use aws_sdk_dynamodb::types::AttributeValue; use aws_sdk_s3::presigning::PresigningConfig; -use aws_sdk_s3::types::Object; -use std::ops::Add; use std::path::Path; use std::process::Stdio; use std::time::Duration; @@ -14,12 +12,12 @@ use types::item::{Item, Status}; use types::{CompilationConfig, CompilationRequest, ARTIFACTS_FOLDER}; use crate::dynamodb_client::DynamoDBClient; -use crate::errors::{CompilationError, DBError, S3Error}; +use crate::errors::{CompilationError, DBError}; +use crate::s3_client::S3Client; use crate::utils::cleaner::AutoCleanUp; use crate::utils::hardhat_config::HardhatConfigBuilder; use crate::utils::lib::{ - initialize_files, list_files_in_directory, - DEFAULT_SOLIDITY_VERSION, SOL_ROOT, ZKSOLC_VERSIONS, + initialize_files, list_files_in_directory, DEFAULT_SOLIDITY_VERSION, SOL_ROOT, ZKSOLC_VERSIONS, }; pub struct CompilationFile { @@ -43,7 +41,7 @@ pub struct CompilationArtifact { pub async fn compile( request: CompilationRequest, db_client: &DynamoDBClient, - s3_client: &aws_sdk_s3::Client, + s3_client: &S3Client, ) -> Result<(), CompilationError> { let item = db_client.get_item(request.id.clone()).await?; let item: Item = match item { @@ -62,7 +60,8 @@ pub async fn compile( } } - let files = extract_files(request.id.clone(), s3_client).await?; + let dir = format!("{}/", request.id); + let files = s3_client.extract_files(&dir).await?; { let db_update_result = db_client .client @@ -102,7 +101,7 @@ pub async fn compile( contracts: files, }, ) - .await + .await { Ok(val) => Ok(on_compilation_success(&request.id, db_client, s3_client, val).await?), Err(err) => match err { @@ -114,123 +113,27 @@ pub async fn compile( &db_client, format!("Unsupported compiler version: {}", value), ) - .await?), + .await?), _ => Err(err), }, } } -async fn list_all_keys( - client: &aws_sdk_s3::Client, - id: String, - bucket: &str, -) -> Result, S3Error> { - let mut objects = Vec::new(); - let mut continuation_token: Option = None; - - let id = id.clone().add("/"); - loop { - let mut request = client - .list_objects_v2() - .bucket(bucket) - .delimiter('/') - .prefix(id.clone()); - if let Some(token) = continuation_token { - request = request.continuation_token(token); - } - - let response = request.send().await?; - if let Some(contents) = response.contents { - objects.extend(contents); - } - - let is_truncated = if let Some(is_truncated) = response.is_truncated { - is_truncated - } else { - warn!("is_truncated empty"); - break; - }; - - if !is_truncated { - break; - } - - continuation_token = response.next_continuation_token; - if continuation_token.is_none() { - error!("continuation_token wasn't set!"); - break; - } - } - - Ok(objects) -} - -async fn extract_files( - id: String, - s3_client: &aws_sdk_s3::Client, -) -> Result, S3Error> { - let objects = list_all_keys(&s3_client, id.to_string(), "TODO").await?; - - let mut files = vec![]; - for object in objects { - let key = object.key().ok_or(S3Error::InvalidObjectError)?; - let expected_size = object.size.ok_or(S3Error::InvalidObjectError)?; - - let mut object = s3_client - .get_object() - .bucket("TODO:") - .key(key) - .send() - .await?; - - let mut byte_count = 0; - let mut contents = Vec::new(); - while let Some(bytes) = object.body.try_next().await? { - let bytes_len = bytes.len(); - std::io::Write::write_all(&mut contents, &bytes)?; - byte_count += bytes_len; - } - - if byte_count as i64 != expected_size { - error!("Fetched num bytes != expected size of file."); - return Err(S3Error::InvalidObjectError); - } - - files.push(CompilationFile { - file_content: contents, - file_name: key.to_string(), - }); - } - - Ok(files) -} - pub async fn on_compilation_success( id: &str, db_client: &DynamoDBClient, - s3_client: &aws_sdk_s3::Client, + s3_client: &S3Client, compilation_artifacts: Vec, ) -> Result<(), CompilationError> { let mut presigned_urls = Vec::with_capacity(compilation_artifacts.len()); for el in compilation_artifacts { let file_key = format!("{}/{}/{}", ARTIFACTS_FOLDER, id, el.file_name); - s3_client - .put_object() - .bucket("TODO") - .key(file_key.clone()) - .body(el.file_content.into()) - .send() - .await - .map_err(S3Error::from)?; + s3_client.put_object(&file_key, el.file_content).await?; let expires_in = PresigningConfig::expires_in(Duration::from_secs(5 * 60 * 60)).unwrap(); let presigned_request = s3_client - .get_object() - .bucket("TODO") - .key(file_key) - .presigned(expires_in) - .await - .map_err(S3Error::from)?; + .get_object_presigned(&file_key, expires_in) + .await?; presigned_urls.push(presigned_request.uri().to_string()); } @@ -356,7 +259,9 @@ pub async fn do_compile( let err_msg = String::from_utf8_lossy(&output.stderr); error!("Compilation error: {}", err_msg); // TODO: handle - return Err(CompilationError::CompilationFailureError(err_msg.to_string())); + return Err(CompilationError::CompilationFailureError( + err_msg.to_string(), + )); } // fetch the files in the artifacts directory diff --git a/crates/worker/src/dynamodb_client.rs b/crates/worker/src/dynamodb_client.rs index 6eaf13d5..c6b0c72a 100644 --- a/crates/worker/src/dynamodb_client.rs +++ b/crates/worker/src/dynamodb_client.rs @@ -1,4 +1,4 @@ -use crate::errors::{ DBError}; +use crate::errors::DBError; use aws_sdk_dynamodb::types::AttributeValue; use aws_sdk_dynamodb::Client; use types::item::Item; diff --git a/crates/worker/src/errors.rs b/crates/worker/src/errors.rs index 6cffd6ef..17403bb9 100644 --- a/crates/worker/src/errors.rs +++ b/crates/worker/src/errors.rs @@ -8,7 +8,7 @@ use aws_sdk_s3::operation::put_object::PutObjectError; use aws_sdk_sqs::error::SdkError; use aws_sdk_sqs::operation::delete_message::DeleteMessageError; use aws_sdk_sqs::operation::receive_message::ReceiveMessageError; -use types::item::{ItemError}; +use types::item::ItemError; // SQS related errors pub(crate) type SqsReceiveError = SdkError; diff --git a/crates/worker/src/main.rs b/crates/worker/src/main.rs index 1878e5fb..e23257d5 100644 --- a/crates/worker/src/main.rs +++ b/crates/worker/src/main.rs @@ -1,6 +1,7 @@ mod commands; mod dynamodb_client; mod errors; +mod s3_client; mod sqs_client; mod sqs_listener; mod utils; @@ -8,14 +9,19 @@ mod worker; use aws_config::BehaviorVersion; use aws_runtime::env_config::file::{EnvConfigFileKind, EnvConfigFiles}; -use std::time::Duration; +use std::num::NonZeroUsize; -use crate::{sqs_client::SqsClient, sqs_listener::SqsListener}; +use crate::dynamodb_client::DynamoDBClient; +use crate::s3_client::S3Client; +use crate::sqs_client::SqsClient; +use crate::worker::WorkerEngine; const AWS_PROFILE_DEFAULT: &str = "dev"; // TODO: remove pub(crate) const QUEUE_URL_DEFAULT: &str = "https://sqs.ap-southeast-2.amazonaws.com/266735844848/zksync-sqs"; +const TABLE_NAME_DEFAULT: &str = "zksync-table"; +const BUCKET_NAME_DEFAULT: &str = "zksync-compilation-s3"; // TODO: state synchronization @@ -36,17 +42,16 @@ async fn main() { let sqs_client = aws_sdk_sqs::Client::new(&config); let sqs_client = SqsClient::new(sqs_client, QUEUE_URL_DEFAULT); - let sqs_listener = SqsListener::new(sqs_client, Duration::from_secs(1)); - let sqs_receiver = sqs_listener.receiver(); - - while let Ok(message) = sqs_receiver.recv().await { - println!("{:?}", message); - if let Some(receipt_handle) = message.receipt_handle { - sqs_receiver - .delete_message(receipt_handle) - .await - .map_err(|err| println!("delete error: {}", err.to_string())) - .unwrap(); - } - } + // Initialize DynamoDb client + let db_client = aws_sdk_dynamodb::Client::new(&config); + let db_client = DynamoDBClient::new(db_client, TABLE_NAME_DEFAULT); + + // Initialize S3 client + let s3_client = aws_sdk_s3::Client::new(&config); + let s3_client = S3Client::new(s3_client, BUCKET_NAME_DEFAULT); + + let mut engine = WorkerEngine::new(sqs_client, db_client, s3_client, true); + engine.start(NonZeroUsize::new(10).unwrap()); + + engine.wait().await; } diff --git a/crates/worker/src/s3_client.rs b/crates/worker/src/s3_client.rs new file mode 100644 index 00000000..dc8f66d2 --- /dev/null +++ b/crates/worker/src/s3_client.rs @@ -0,0 +1,139 @@ +use crate::commands::compile::CompilationFile; +use crate::errors::S3Error; +use aws_sdk_s3::presigning::{PresignedRequest, PresigningConfig}; +use aws_sdk_s3::types::Object; +use aws_sdk_s3::Client; +use aws_smithy_types::byte_stream::ByteStream; +use std::io::Write; +use tracing::{error, warn}; + +#[derive(Clone)] +pub struct S3Client { + pub client: Client, + pub bucket_name: String, +} + +impl S3Client { + pub fn new(client: Client, bucket_name: &str) -> Self { + Self { + bucket_name: bucket_name.to_string(), + client, + } + } + + pub async fn extract_files(&self, dir: &str) -> Result, S3Error> { + let objects = self.list_all_keys(dir).await?; + + let mut files = vec![]; + for object in objects { + let key = object.key().ok_or(S3Error::InvalidObjectError)?; + let expected_size = object.size.ok_or(S3Error::InvalidObjectError)?; + + let mut contents = Vec::with_capacity(expected_size as usize); + self.get_object_into(key, &mut contents).await?; + if contents.len() as i64 != expected_size { + error!("Fetched num bytes != expected size of file."); + return Err(S3Error::InvalidObjectError); + } + + files.push(CompilationFile { + file_content: contents, + file_name: key.to_string(), + }); + } + + Ok(files) + } + + pub async fn get_object_into(&self, key: &str, writer: &mut impl Write) -> Result<(), S3Error> { + let mut object = self + .client + .get_object() + .bucket(self.bucket_name.clone()) + .key(key) + .send() + .await?; + + while let Some(bytes) = object.body.try_next().await? { + writer.write_all(&bytes)?; + } + + Ok(()) + } + + pub async fn get_object(&self, key: &str) -> Result, S3Error> { + let mut contents = vec![]; + self.get_object_into(key, &mut contents).await?; + + Ok(contents) + } + + pub async fn get_object_presigned( + &self, + key: &str, + expires_in: PresigningConfig, + ) -> Result { + Ok(self + .client + .get_object() + .bucket(self.bucket_name.clone()) + .key(key.to_string()) + .presigned(expires_in) + .await + .map_err(S3Error::from)?) + } + + pub async fn put_object(&self, key: &str, data: impl Into) -> Result<(), S3Error> { + let _ = self + .client + .put_object() + .bucket(self.bucket_name.clone()) + .key(key.to_string()) + .body(data.into()) + .send() + .await?; + + Ok(()) + } + + pub async fn list_all_keys(&self, dir: &str) -> Result, S3Error> { + let mut objects = Vec::new(); + let mut continuation_token: Option = None; + + loop { + let mut request = self + .client + .list_objects_v2() + .bucket(self.bucket_name.clone()) + .delimiter('/') + .prefix(dir.to_string()); + if let Some(token) = continuation_token { + request = request.continuation_token(token); + } + + let response = request.send().await?; + if let Some(contents) = response.contents { + objects.extend(contents); + } + + let is_truncated = if let Some(is_truncated) = response.is_truncated { + is_truncated + } else { + warn!("is_truncated empty"); + break; + }; + + if !is_truncated { + break; + } + + continuation_token = response.next_continuation_token; + if continuation_token.is_none() { + error!("continuation_token wasn't set!"); + break; + } + } + + Ok(objects) + } +} diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index 72c20ee7..4e169124 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -1,5 +1,6 @@ use crate::commands::compile::compile; use crate::dynamodb_client::DynamoDBClient; +use crate::s3_client::S3Client; use std::num::NonZeroUsize; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; @@ -27,7 +28,7 @@ impl RunningWorker { pub fn new( sqs_listener: SqsListener, db_client: DynamoDBClient, - s3_client: aws_sdk_s3::Client, + s3_client: S3Client, num_workers: usize, expiration_timestamps: Arc>>, ) -> Self { @@ -61,7 +62,7 @@ impl RunningWorker { async fn worker( sqs_receiver: SqsReceiver, db_client: DynamoDBClient, - s3_client: aws_sdk_s3::Client, + s3_client: S3Client, expiration_timestamps: Arc>>, ) { // TODO: process error @@ -98,12 +99,16 @@ impl RunningWorker { let _ = sqs_receiver.delete_message(receipt_handle).await; } } + + pub async fn wait(self) { + futures::future::join_all(self.worker_threads).await; + } } pub struct WorkerEngine { sqs_client: SqsClient, db_client: DynamoDBClient, - s3_client: aws_sdk_s3::Client, + s3_client: S3Client, expiration_timestamps: Arc>>, is_supervisor_enabled: Arc, running_workers: Vec, @@ -114,7 +119,7 @@ impl WorkerEngine { pub fn new( sqs_client: SqsClient, db_client: DynamoDBClient, - s3_client: aws_sdk_s3::Client, + s3_client: S3Client, supervisor_enabled: bool, ) -> Self { let is_supervisor_enabled = Arc::new(AtomicBool::new(supervisor_enabled)); @@ -153,6 +158,21 @@ impl WorkerEngine { } } + pub async fn wait(self) { + let mut worker_futures = Vec::new(); + for worker in self.running_workers { + worker_futures.push(worker.wait()); + } + + // Wait for all workers to finish + futures::future::join_all(worker_futures).await; + + // Wait for the supervisor thread if it exists + if let Some(supervisor_thread) = Arc::try_unwrap(self.supervisor_thread).ok().flatten() { + supervisor_thread.await.expect("Supervisor thread panicked"); + } + } + // pub async fn enable_supervisor_thread(&mut self) { // if self.supervisor_thread.is_some() { // return; From b7a87635f0508c9f39a1a4ffa7c3a14c4ecdd4bc Mon Sep 17 00:00:00 2001 From: taco-paco Date: Mon, 2 Sep 2024 19:25:18 +0900 Subject: [PATCH 10/21] fix: lambdas compilation feat: intemidiate copy of remnant files --- .gitignore | 3 +- crates/lambdas/src/compile.rs | 2 +- crates/worker/compiled/combined.json | 1 + crates/worker/hardhat_env/.gitignore | 117 ++++++++++++++++++++++++ crates/worker/hardhat_env/package.json | 24 +++++ crates/worker/hardhat_env/tsconfig.json | 18 ++++ crates/worker/scripts/deploy.js | 57 ++++++++++++ 7 files changed, 220 insertions(+), 2 deletions(-) create mode 100644 crates/worker/compiled/combined.json create mode 100644 crates/worker/hardhat_env/.gitignore create mode 100644 crates/worker/hardhat_env/package.json create mode 100644 crates/worker/hardhat_env/tsconfig.json create mode 100644 crates/worker/scripts/deploy.js diff --git a/.gitignore b/.gitignore index f8b720fe..7143682f 100644 --- a/.gitignore +++ b/.gitignore @@ -16,10 +16,11 @@ Cargo.lock # MSVC Windows builds of rustc generate these, which store debugging information *.pdb -node_modules api/logs api/hardhat_env/workspaces +node_modules + # MacOS related .DS_Store diff --git a/crates/lambdas/src/compile.rs b/crates/lambdas/src/compile.rs index ed1e5477..85bd491d 100644 --- a/crates/lambdas/src/compile.rs +++ b/crates/lambdas/src/compile.rs @@ -5,7 +5,7 @@ use lambda_http::{ }; use std::ops::Add; use tracing::{error, info}; -use types::{CompilationRequest, Item, SqsMessage, Status}; +use types::{CompilationRequest, SqsMessage, item::{Item, Status}}; mod common; use crate::common::{errors::Error, utils::extract_request, BUCKET_NAME_DEFAULT}; diff --git a/crates/worker/compiled/combined.json b/crates/worker/compiled/combined.json new file mode 100644 index 00000000..854de0fc --- /dev/null +++ b/crates/worker/compiled/combined.json @@ -0,0 +1 @@ +{"contracts":{"/home/edgar/development/nethermind/zksync-remix-plugin/examples/Greeter.sol:Greeter":{"abi":[{"inputs":[{"internalType":"string","name":"_greeting","type":"string"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[],"name":"greet","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"string","name":"_greeting","type":"string"}],"name":"setGreeting","outputs":[],"stateMutability":"nonpayable","type":"function"}],"factory-deps":{}}},"version":"0.8.19+commit.7dd6d404.Linux.g++","zk_version":"1.3.9"} \ No newline at end of file diff --git a/crates/worker/hardhat_env/.gitignore b/crates/worker/hardhat_env/.gitignore new file mode 100644 index 00000000..4fd16a91 --- /dev/null +++ b/crates/worker/hardhat_env/.gitignore @@ -0,0 +1,117 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +lerna-debug.log* +.vscode + +# hardhat artifacts +artifacts +cache + +# zksync artifacts +artifacts-zk +cache-zk +hardhat-cache + +# Diagnostic reports (https://nodejs.org/api/report.html) +report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json + +# Runtime data +pids +*.pid +*.seed +*.pid.lock + +# Directory for instrumented libs generated by jscoverage/JSCover +lib-cov + +# Coverage directory used by tools like istanbul +coverage +*.lcov + +# nyc test coverage +.nyc_output + +# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files) +.grunt + +# Bower dependency directory (https://bower.io/) +bower_components + +# node-waf configuration +.lock-wscript + +# Compiled binary addons (https://nodejs.org/api/addons.html) +build/Release + +# Dependency directories +node_modules/ +jspm_packages/ + +# TypeScript v1 declaration files +typings/ + +# TypeScript cache +*.tsbuildinfo + +# Optional npm cache directory +.npm + +# Optional eslint cache +.eslintcache + +# Microbundle cache +.rpt2_cache/ +.rts2_cache_cjs/ +.rts2_cache_es/ +.rts2_cache_umd/ + +# Optional REPL history +.node_repl_history + +# Output of 'npm pack' +*.tgz + +# Yarn Integrity file +.yarn-integrity + +# dotenv environment variables file +.env +.env.test + +# parcel-bundler cache (https://parceljs.org/) +.cache + +# Next.js build output +.next + +# Nuxt.js build / generate output +.nuxt +dist + +# Gatsby files +.cache/ +# Comment in the public line in if your project uses Gatsby and *not* Next.js +# https://nextjs.org/blog/next-9-1#public-directory-support +# public + +# vuepress build output +.vuepress/dist + +# Serverless directories +.serverless/ + +# FuseBox cache +.fusebox/ + +# DynamoDB Local files +.dynamodb/ + +# TernJS port file +.tern-port + +# lock files +yarn.lock \ No newline at end of file diff --git a/crates/worker/hardhat_env/package.json b/crates/worker/hardhat_env/package.json new file mode 100644 index 00000000..57fd9af5 --- /dev/null +++ b/crates/worker/hardhat_env/package.json @@ -0,0 +1,24 @@ +{ + "name": "hardhat-env", + "version": "0.1.0", + "license": "MIT", + "scripts": { + "compile": "npx hardhat compile", + "verify": "npx hardhat verify" + }, + "devDependencies": { + "@types/node": "^18.11.17", + "ts-node": "^10.6.0", + "typescript": "^5.1.6" + }, + "dependencies": { + "hardhat": "^2.19.4", + "@matterlabs/hardhat-zksync-solc": "1.1.4", + "@matterlabs/hardhat-zksync-verify": "1.4.3", + "@matterlabs/zksync-contracts": "^0.6.1", + "@openzeppelin/contracts": "^4.6.0", + "@openzeppelin/contracts-upgradeable": "^4.6.0", + "@chainlink/contracts": "^1.0.0", + "@thirdweb-dev/contracts": "^3.11.2" + } +} \ No newline at end of file diff --git a/crates/worker/hardhat_env/tsconfig.json b/crates/worker/hardhat_env/tsconfig.json new file mode 100644 index 00000000..27149c97 --- /dev/null +++ b/crates/worker/hardhat_env/tsconfig.json @@ -0,0 +1,18 @@ +{ + "compilerOptions": { + "target": "es5", + "module": "commonjs", + "strict": true, + "esModuleInterop": true, + "moduleResolution": "node", + "forceConsistentCasingInFileNames": true, + "outDir": "dist" + }, + "include": [ + "./hardhat.config.ts", + "./scripts", + "./deploy", + "./test", + "typechain/**/*" + ] +} \ No newline at end of file diff --git a/crates/worker/scripts/deploy.js b/crates/worker/scripts/deploy.js new file mode 100644 index 00000000..d38de0fb --- /dev/null +++ b/crates/worker/scripts/deploy.js @@ -0,0 +1,57 @@ + + // Right click on the script name and hit "Run" to execute + (async () => { + try { + console.log('deploy to starknet...') + const compiledCairoContract = await remix.call('fileManager', 'readFile', 'compiled_cairo_artifacts/contract.json'); + const compiledContract = starknet.json.parse(compiledCairoContract); + const NetworkBaseUrls = { + 'goerli-alpha': 'https://alpha4.starknet.io', + 'mainnet-alpha': 'https://alpha-mainnet.starknet.io' + } + + const payload = { + compiledContract: compiledContract, + transactionInputs: [], // if you have constructor args please add your args + network: 'goerli-alpha' // mainnet-alpha or goerli-alpha or devnet + }; + + const baseUrl = payload['network'] ? NetworkBaseUrls[payload['network']] : payload['baseUrl']; + + const response = await fetch(baseUrl + '/gateway/add_transaction', { + method: 'POST', + headers: { + accept: 'application/json', + }, + body: JSON.stringify({ + type: 'DEPLOY', + contract_address_salt: '0x01319c1c1f0400688eafde419346d0b9876cd3d6a4daaa9f4768a3f5a810c543', + contract_definition: payload.compiledContract.contract_definition, + constructor_calldata: payload.transactionInputs + }) + }); + + const responseData = await response.json(); + + // const methodResponse = await callContract({ + // contract_address: responseData.address, + // entry_point_selector: getSelectorFromName("YOUR_FUNCTION_NAME"), + // calldata: ["1"], + // }); + + // const result = methodResponse.result[0]; + // result contains the return value of the method you gave to callContract + if(response.status === 200) { + console.log('Deployed contract address: ', responseData.address) + console.log('Deployed contract transaction hash: ', responseData.transaction_hash) + console.log('Deployment successful.') + } else { + console.log('Deployed contract error: ', responseData) + console.log('Deployment failed.') + } + + } catch (exception) { + console.log(exception.message) + } + })() + \ No newline at end of file From 31cac651786b4dda831982819e58417b67716775 Mon Sep 17 00:00:00 2001 From: taco-paco Date: Tue, 3 Sep 2024 19:19:04 +0900 Subject: [PATCH 11/21] fix: some bugs. compilation works --- crates/worker/src/commands/compile.rs | 46 +++++++++++++++------------ crates/worker/src/s3_client.rs | 9 +++++- crates/worker/src/utils/lib.rs | 5 ++- crates/worker/src/worker.rs | 11 ++++++- 4 files changed, 45 insertions(+), 26 deletions(-) diff --git a/crates/worker/src/commands/compile.rs b/crates/worker/src/commands/compile.rs index 186ddcfd..ccae729e 100644 --- a/crates/worker/src/commands/compile.rs +++ b/crates/worker/src/commands/compile.rs @@ -68,8 +68,9 @@ pub async fn compile( .update_item() .table_name(db_client.table_name.clone()) .key("ID", AttributeValue::S(request.id.clone())) - .update_expression("SET Status = :newStatus") - .condition_expression("Status = :currentStatus") // TODO: check + .update_expression("SET #status = :newStatus") + .condition_expression("#status = :currentStatus") // TODO: check + .expression_attribute_names("#status", "Status") .expression_attribute_values( ":newStatus", AttributeValue::N(u32::from(Status::Compiling).to_string()), @@ -84,9 +85,10 @@ pub async fn compile( Ok(_) => {} Err(SdkError::ServiceError(err)) => match err.err() { UpdateItemError::ConditionalCheckFailedException(_) => { + error!("Conditional check not met"); return Err(CompilationError::UnexpectedStatusError( "Concurrent status change from another instance".into(), - )) + )); } _ => return Err(DBError::from(SdkError::ServiceError(err)).into()), }, @@ -138,13 +140,19 @@ pub async fn on_compilation_success( presigned_urls.push(presigned_request.uri().to_string()); } + if presigned_urls.is_empty() { + // TODO: AttributeValue::Ss doesn't allow empty arrays. Decide what to do. for now + presigned_urls.push("".to_string()); + } + db_client .client .update_item() .table_name(db_client.table_name.clone()) .key("ID", AttributeValue::S(id.to_string())) - .update_expression("SET Status = :newStatus") - .update_expression("SET Data = :data") + .update_expression("SET #status = :newStatus, #data = :data") + .expression_attribute_names("#status", "Status") + .expression_attribute_names("#data", "Data") .expression_attribute_values( ":newStatus", AttributeValue::N(2.to_string()), // Ready @@ -167,8 +175,9 @@ pub async fn on_compilation_failed( .update_item() .table_name(db_client.table_name.clone()) .key("ID", AttributeValue::S(id.to_string())) - .update_expression("SET Status = :newStatus") - .update_expression("SET Data = :data") + .update_expression("SET #status = :newStatus, #data = :data") + .expression_attribute_names("#status", "Status") + .expression_attribute_names("#data", "Data") .expression_attribute_values( ":newStatus", AttributeValue::N(3.to_string()), // Failed @@ -192,20 +201,15 @@ pub async fn do_compile( } // root directory for the contracts - let workspace_path_str = format!("{}/{}", SOL_ROOT, namespace); - let workspace_path = Path::new(&workspace_path_str); - + let workspace_path = Path::new(SOL_ROOT).join(namespace); // root directory for the artifacts - let artifacts_path_str = format!("{}/{}", workspace_path_str, "artifacts-zk"); - let artifacts_path = Path::new(&artifacts_path_str); - + let artifacts_path = workspace_path.join("artifacts-zk"); // root directory for user files (hardhat config, etc) - let user_files_path_str = workspace_path_str.clone(); - let hardhat_config_path = Path::new(&user_files_path_str).join("hardhat.config.ts"); + let hardhat_config_path = workspace_path.join("hardhat.config.ts"); // instantly create the directories - tokio::fs::create_dir_all(workspace_path).await?; - tokio::fs::create_dir_all(artifacts_path).await?; + tokio::fs::create_dir_all(&workspace_path).await?; + tokio::fs::create_dir_all(&artifacts_path).await?; // when the compilation is done, clean up the directories // it will be called when the AutoCleanUp struct is dropped @@ -236,7 +240,7 @@ pub async fn do_compile( .collect(); // initialize the files - initialize_files(workspace_path, contracts).await?; + initialize_files(&workspace_path, contracts).await?; // Limit number of spawned processes. RAII released let _permit = SPAWN_SEMAPHORE.acquire().await.expect("Expired semaphore"); @@ -244,7 +248,7 @@ pub async fn do_compile( let process = tokio::process::Command::new("npx") .arg("hardhat") .arg("compile") - .current_dir(workspace_path) + .current_dir(&workspace_path) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn()?; @@ -267,14 +271,14 @@ pub async fn do_compile( // fetch the files in the artifacts directory let mut file_contents: Vec = vec![]; let file_paths = - list_files_in_directory(artifacts_path).expect("Unexpected error listing artifact"); + list_files_in_directory(&artifacts_path).expect("Unexpected error listing artifact"); for file_path in file_paths.iter() { // TODO: change this - don't store files in RAM. copy 1-1 to S3 let file_content = tokio::fs::read(file_path).await?; let full_path = Path::new(file_path); let relative_path = full_path - .strip_prefix(artifacts_path) + .strip_prefix(&artifacts_path) .expect("Unexpected prefix"); let relative_path_str = relative_path.to_str().unwrap(); diff --git a/crates/worker/src/s3_client.rs b/crates/worker/src/s3_client.rs index dc8f66d2..daccd94a 100644 --- a/crates/worker/src/s3_client.rs +++ b/crates/worker/src/s3_client.rs @@ -5,6 +5,7 @@ use aws_sdk_s3::types::Object; use aws_sdk_s3::Client; use aws_smithy_types::byte_stream::ByteStream; use std::io::Write; +use std::path::Path; use tracing::{error, warn}; #[derive(Clone)] @@ -36,9 +37,15 @@ impl S3Client { return Err(S3Error::InvalidObjectError); } + let file_path = Path::new(key) + .strip_prefix(dir) + .expect("Unreachable. list_all_keys bug."); files.push(CompilationFile { file_content: contents, - file_name: key.to_string(), + file_name: file_path + .to_str() + .expect("Unexpected encoding issue.") + .to_string(), }); } diff --git a/crates/worker/src/utils/lib.rs b/crates/worker/src/utils/lib.rs index aea64b33..3155d807 100644 --- a/crates/worker/src/utils/lib.rs +++ b/crates/worker/src/utils/lib.rs @@ -187,12 +187,11 @@ pub fn generate_mock_solidity_file_content() -> String { } pub async fn initialize_files( - dst_dir: &Path, + dst_dir: impl AsRef, files: Vec, ) -> Result<(), std::io::Error> { for file in files { - let file_path_str = format!("{}/{}", dst_dir.to_str().unwrap(), file.file_name); - let file_path = Path::new(&file_path_str); + let file_path = dst_dir.as_ref().join(file.file_name); // create parent directories tokio::fs::create_dir_all(file_path.parent().unwrap()).await?; diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index 4e169124..6aa07fa1 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -91,7 +91,16 @@ impl RunningWorker { match sqs_message { SqsMessage::Compile { request } => { - let _ = compile(request, &db_client, &s3_client).await; // TODO: + let result = compile(request, &db_client, &s3_client).await; // TODO: + match result { + Ok(()) => { + println!("Success"); // TODO: remove + } + Err(err) => { + println!("err: {:?}", err); + info!("err: {}", err.to_string()); + } + } } SqsMessage::Verify { request } => {} // TODO; } From 81f69ac962793f2cc203075f77ecb60145670f7d Mon Sep 17 00:00:00 2001 From: taco-paco Date: Wed, 4 Sep 2024 12:39:31 +0900 Subject: [PATCH 12/21] feat: compilation error handling --- crates/worker/src/commands/compile.rs | 3 +-- crates/worker/src/errors.rs | 2 +- crates/worker/src/worker.rs | 35 +++++++++++++++++++-------- 3 files changed, 27 insertions(+), 13 deletions(-) diff --git a/crates/worker/src/commands/compile.rs b/crates/worker/src/commands/compile.rs index ccae729e..d4b88404 100644 --- a/crates/worker/src/commands/compile.rs +++ b/crates/worker/src/commands/compile.rs @@ -69,7 +69,7 @@ pub async fn compile( .table_name(db_client.table_name.clone()) .key("ID", AttributeValue::S(request.id.clone())) .update_expression("SET #status = :newStatus") - .condition_expression("#status = :currentStatus") // TODO: check + .condition_expression("#status = :currentStatus") .expression_attribute_names("#status", "Status") .expression_attribute_values( ":newStatus", @@ -262,7 +262,6 @@ pub async fn do_compile( if !status.success() { let err_msg = String::from_utf8_lossy(&output.stderr); error!("Compilation error: {}", err_msg); - // TODO: handle return Err(CompilationError::CompilationFailureError( err_msg.to_string(), )); diff --git a/crates/worker/src/errors.rs b/crates/worker/src/errors.rs index 17403bb9..ea7c9441 100644 --- a/crates/worker/src/errors.rs +++ b/crates/worker/src/errors.rs @@ -62,7 +62,7 @@ pub enum CompilationError { #[error("Item isn't id DB: {0}")] NoDBItemError(String), #[error("Unexpected status: {0}")] - UnexpectedStatusError(String), // ignorable + UnexpectedStatusError(String), #[error("Unsupported version: {0}")] VersionNotSupported(String), #[error(transparent)] diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index 6aa07fa1..82bbb4b2 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -1,6 +1,3 @@ -use crate::commands::compile::compile; -use crate::dynamodb_client::DynamoDBClient; -use crate::s3_client::S3Client; use std::num::NonZeroUsize; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::Arc; @@ -12,6 +9,10 @@ use tracing::{error, info, warn}; use types::SqsMessage; use uuid::Uuid; +use crate::commands::compile::compile; +use crate::dynamodb_client::DynamoDBClient; +use crate::errors::CompilationError; +use crate::s3_client::S3Client; use crate::sqs_client::SqsClient; use crate::sqs_listener::{SqsListener, SqsReceiver}; use crate::utils::lib::{timestamp, DURATION_TO_PURGE}; @@ -93,13 +94,27 @@ impl RunningWorker { SqsMessage::Compile { request } => { let result = compile(request, &db_client, &s3_client).await; // TODO: match result { - Ok(()) => { - println!("Success"); // TODO: remove - } - Err(err) => { - println!("err: {:?}", err); - info!("err: {}", err.to_string()); - } + Ok(()) => {} + Err(err) => match err { + CompilationError::DBError(err) => { + warn!("compilation DBError: {}", err.to_string()); + continue; + } + CompilationError::S3Error(err) => { + warn!("compilation S3Error: {}", err.to_string()); + continue; + } + CompilationError::NoDBItemError(err) => { + warn!("{}", err.to_string()); + } + CompilationError::UnexpectedStatusError(err) => { + warn!("{}", err.to_string()); + } + CompilationError::IoError(err) => { + warn!("IOError: {}", err.to_string()); + } + _ => error!("Unexpected branch."), + }, } } SqsMessage::Verify { request } => {} // TODO; From 0d8cfebd40b10e2087289dc6786e7648e0554242 Mon Sep 17 00:00:00 2001 From: taco-paco Date: Thu, 5 Sep 2024 11:33:22 +0900 Subject: [PATCH 13/21] feat: sqs client refactoring --- crates/worker/src/main.rs | 3 +- crates/worker/src/sqs_client.rs | 224 ------------------------ crates/worker/src/sqs_client/mod.rs | 81 +++++++++ crates/worker/src/sqs_client/wrapper.rs | 164 +++++++++++++++++ crates/worker/src/sqs_listener.rs | 9 +- crates/worker/src/utils/cleaner.rs | 4 +- crates/worker/src/worker.rs | 6 +- 7 files changed, 257 insertions(+), 234 deletions(-) delete mode 100644 crates/worker/src/sqs_client.rs create mode 100644 crates/worker/src/sqs_client/mod.rs create mode 100644 crates/worker/src/sqs_client/wrapper.rs diff --git a/crates/worker/src/main.rs b/crates/worker/src/main.rs index e23257d5..f87d7d2b 100644 --- a/crates/worker/src/main.rs +++ b/crates/worker/src/main.rs @@ -13,6 +13,7 @@ use std::num::NonZeroUsize; use crate::dynamodb_client::DynamoDBClient; use crate::s3_client::S3Client; +use crate::sqs_client::wrapper::SqsClientWrapper; use crate::sqs_client::SqsClient; use crate::worker::WorkerEngine; @@ -40,7 +41,7 @@ async fn main() { // Initialize SQS client let sqs_client = aws_sdk_sqs::Client::new(&config); - let sqs_client = SqsClient::new(sqs_client, QUEUE_URL_DEFAULT); + let sqs_client = SqsClientWrapper::new(sqs_client, QUEUE_URL_DEFAULT); // Initialize DynamoDb client let db_client = aws_sdk_dynamodb::Client::new(&config); diff --git a/crates/worker/src/sqs_client.rs b/crates/worker/src/sqs_client.rs deleted file mode 100644 index 944fdae4..00000000 --- a/crates/worker/src/sqs_client.rs +++ /dev/null @@ -1,224 +0,0 @@ -use crate::errors::{SqsDeleteError, SqsReceiveError}; -use aws_config::retry::ErrorKind; -use aws_sdk_sqs::operation::delete_message::DeleteMessageOutput; -use aws_sdk_sqs::operation::receive_message::ReceiveMessageOutput; -use aws_sdk_sqs::Client; -use std::sync::atomic::{AtomicU8, Ordering}; -use std::sync::Arc; -use std::time::Duration; -use tokio::sync::{oneshot, Mutex}; -use tokio::time::sleep; - -macro_rules! match_result { - ($err_type:ident, $result:expr) => { - match $result { - Ok(val) => Ok(Some(val)), - Err(err) => match err { - $err_type::ConstructionFailure(_) => Err(err), - $err_type::TimeoutError(_) => Ok(None), - $err_type::DispatchFailure(dispatch_err) => { - if dispatch_err.is_io() { - return Ok(None); - } - if dispatch_err.is_timeout() { - return Ok(None); - } - if dispatch_err.is_user() { - return Err($err_type::DispatchFailure(dispatch_err)); - } - if let Some(other) = dispatch_err.as_other() { - return match other { - ErrorKind::ClientError => Err($err_type::DispatchFailure(dispatch_err)), - _ => Ok(None), - }; - } - Err($err_type::DispatchFailure(dispatch_err)) - } - other => Err(other), - }, - } - }; -} - -enum Action { - Default, - Receive(oneshot::Sender>), - Delete { - receipt_handle: String, - sender: oneshot::Sender>, - }, -} - -impl Default for Action { - fn default() -> Self { - Action::Default - } -} - -enum State { - Connected = 0, - Reconnecting = 1, -} - -#[derive(Clone)] -pub struct SqsClient { - client: Client, - queue_url: String, - pending_actions: Arc>>, - state: Arc, -} - -impl SqsClient { - pub fn new(client: Client, queue_url: impl Into) -> Self { - let this = Self { - client, - queue_url: queue_url.into(), - pending_actions: Arc::new(Mutex::new(vec![])), - state: Arc::new(AtomicU8::new(State::Connected as u8)), - }; - - // TODO: improve the lauch - tokio::spawn(SqsClient::worker(this.clone())); - - this - } - - async fn receive_attempt(&self) -> Result, SqsReceiveError> { - let result = self - .client - .receive_message() - .queue_url(self.queue_url.clone()) - .max_number_of_messages(1) - .send() - .await; - - match_result!(SqsReceiveError, result) - } - - async fn delete_attempt( - &self, - receipt_handle: impl Into, - ) -> Result, SqsDeleteError> { - let result = self - .client - .delete_message() - .queue_url(self.queue_url.clone()) - .receipt_handle(receipt_handle) - .send() - .await; - - match_result!(SqsDeleteError, result) - } - - // TODO: start - async fn worker(self) { - // TODO: get the tasks through receiver + maintain the inner queue - loop { - let mut actions = self.pending_actions.lock().await; - - let mut pivot = 0; - for i in 0..actions.len() { - let action = std::mem::take(&mut actions[i]); - match action { - Action::Receive(sender) => match self.receive_attempt().await { - Ok(Some(val)) => { - self.state.store(State::Connected as u8, Ordering::Release); - if sender.send(Ok(val)).is_err() { - break; - } - } - Err(err) => { - if sender.send(Err(err)).is_err() { - break; - } - } - Ok(None) => { - // Keeping in the array to resend. - actions[pivot] = Action::Receive(sender); - pivot += 1; - } - }, - Action::Delete { - receipt_handle, - sender, - } => match self.delete_attempt(receipt_handle.clone()).await { - Ok(Some(val)) => { - self.state.store(State::Connected as u8, Ordering::Release); - if sender.send(Ok(val)).is_err() { - break; - } - } - Err(err) => { - if sender.send(Err(err)).is_err() { - break; - } - } - Ok(None) => { - actions[pivot] = Action::Delete { - receipt_handle, - sender, - }; - pivot += 1; - } - }, - Action::Default => unreachable!(), - }; - } - - actions.truncate(pivot); - drop(actions); - - sleep(Duration::from_secs(3)).await; - } - } - - pub async fn receive_message(&self) -> Result { - match self.state.load(Ordering::Acquire) { - 0 => match self.receive_attempt().await { - Ok(None) => self - .state - .store(State::Reconnecting as u8, Ordering::Release), - Ok(Some(val)) => return Ok(val), - Err(err) => return Err(err), - }, - 1 => {} - _ => unreachable!(), - }; - - // State::Reconnecting branch - let (sender, receiver) = oneshot::channel(); - self.pending_actions - .lock() - .await - .push(Action::Receive(sender)); - - receiver.await.unwrap() // TODO: for now - } - - pub async fn delete_message( - &self, - receipt_handle: impl Into, - ) -> Result<(), SqsDeleteError> { - let receipt_handle = receipt_handle.into(); - match self.state.load(Ordering::Acquire) { - 0 => match self.delete_attempt(receipt_handle.clone()).await { - Ok(None) => self - .state - .store(State::Reconnecting as u8, Ordering::Release), - Ok(Some(_)) => return Ok(()), - Err(err) => return Err(err), - }, - 1 => {} - _ => unreachable!(), - }; - - // State::Reconnecting branch - let (sender, receiver) = oneshot::channel(); - self.pending_actions.lock().await.push(Action::Delete { - receipt_handle, - sender, - }); - - receiver.await.unwrap().map(|_| ()) // TODO: for now - } -} diff --git a/crates/worker/src/sqs_client/mod.rs b/crates/worker/src/sqs_client/mod.rs new file mode 100644 index 00000000..722d16c3 --- /dev/null +++ b/crates/worker/src/sqs_client/mod.rs @@ -0,0 +1,81 @@ +use aws_config::retry::ErrorKind; +use aws_sdk_sqs::operation::delete_message::DeleteMessageOutput; +use aws_sdk_sqs::operation::receive_message::ReceiveMessageOutput; +use aws_sdk_sqs::Client; + +use crate::errors::{SqsDeleteError, SqsReceiveError}; + +pub mod wrapper; + +macro_rules! match_result { + ($err_type:ident, $result:expr) => { + match $result { + Ok(val) => Ok(Some(val)), + Err(err) => match err { + $err_type::ConstructionFailure(_) => Err(err), + $err_type::TimeoutError(_) => Ok(None), + $err_type::DispatchFailure(dispatch_err) => { + if dispatch_err.is_io() { + return Ok(None); + } + if dispatch_err.is_timeout() { + return Ok(None); + } + if dispatch_err.is_user() { + return Err($err_type::DispatchFailure(dispatch_err)); + } + if let Some(other) = dispatch_err.as_other() { + return match other { + ErrorKind::ClientError => Err($err_type::DispatchFailure(dispatch_err)), + _ => Ok(None), + }; + } + Err($err_type::DispatchFailure(dispatch_err)) + } + other => Err(other), + }, + } + }; +} + +#[derive(Clone)] +pub struct SqsClient { + pub client: Client, + pub queue_url: String, +} + +impl SqsClient { + pub fn new(client: Client, queue_url: impl Into) -> Self { + Self { + client, + queue_url: queue_url.into(), + } + } + + async fn receive_attempt(&self) -> Result, SqsReceiveError> { + let result = self + .client + .receive_message() + .queue_url(self.queue_url.clone()) + .max_number_of_messages(1) + .send() + .await; + + match_result!(SqsReceiveError, result) + } + + async fn delete_attempt( + &self, + receipt_handle: impl Into, + ) -> Result, SqsDeleteError> { + let result = self + .client + .delete_message() + .queue_url(self.queue_url.clone()) + .receipt_handle(receipt_handle) + .send() + .await; + + match_result!(SqsDeleteError, result) + } +} diff --git a/crates/worker/src/sqs_client/wrapper.rs b/crates/worker/src/sqs_client/wrapper.rs new file mode 100644 index 00000000..e0856cb8 --- /dev/null +++ b/crates/worker/src/sqs_client/wrapper.rs @@ -0,0 +1,164 @@ +use crate::errors::{SqsDeleteError, SqsReceiveError}; +use crate::sqs_client::SqsClient; +use aws_sdk_sqs::operation::delete_message::DeleteMessageOutput; +use aws_sdk_sqs::operation::receive_message::ReceiveMessageOutput; +use aws_sdk_sqs::Client; +use std::sync::atomic::{AtomicU8, Ordering}; +use std::sync::Arc; +use std::time::Duration; +use tokio::sync::{mpsc, oneshot, Mutex}; +use tokio::time::sleep; + +enum Action { + Default, + Receive(oneshot::Sender>), + Delete { + receipt_handle: String, + sender: oneshot::Sender>, + }, +} + +impl Default for Action { + fn default() -> Self { + Action::Default + } +} + +enum State { + Connected = 0, + Reconnecting = 1, +} + +#[derive(Clone)] +pub struct SqsClientWrapper { + client: SqsClient, + actions_sender: mpsc::Sender, + state: Arc, +} + +impl SqsClientWrapper { + pub fn new(client: Client, queue_url: impl Into) -> Self { + let client = SqsClient::new(client, queue_url); + let state = Arc::new(AtomicU8::new(State::Connected as u8)); + let (sender, receiver) = mpsc::channel(1000); + + tokio::spawn(Self::worker(client.clone(), state.clone(), receiver)); + + Self { + client, + actions_sender: sender, + state, + } + } + + // TODO: start + async fn worker(client: SqsClient, state: Arc, mut receiver: mpsc::Receiver) { + let mut pending_actions = vec![]; + + while let Some(action) = receiver.recv().await { + pending_actions.push(action); + + while !pending_actions.is_empty() { + Self::resend_pending_actions(&mut pending_actions, &client, &state).await; + sleep(Duration::from_secs(3)).await; + } + } + } + + pub async fn resend_pending_actions( + pending_actions: &mut Vec, + client: &SqsClient, + state: &Arc, + ) { + let mut pivot = 0; + for i in 0..pending_actions.len() { + let action = std::mem::take(&mut pending_actions[i]); + match action { + Action::Receive(sender) => match client.receive_attempt().await { + Ok(Some(val)) => { + state.store(State::Connected as u8, Ordering::Release); + let _ = sender.send(Ok(val)); + } + Err(err) => { + let _ = sender.send(Err(err)); + } + Ok(None) => { + // Keeping in the array to resend. + pending_actions[pivot] = Action::Receive(sender); + pivot += 1; + } + }, + Action::Delete { + receipt_handle, + sender, + } => match client.delete_attempt(receipt_handle.clone()).await { + Ok(Some(val)) => { + state.store(State::Connected as u8, Ordering::Release); + let _ = sender.send(Ok(val)); + } + Err(err) => { + let _ = sender.send(Err(err)); + } + Ok(None) => { + pending_actions[pivot] = Action::Delete { + receipt_handle, + sender, + }; + pivot += 1; + } + }, + Action::Default => unreachable!(), + }; + } + + pending_actions.truncate(pivot); + } + + pub async fn receive_message(&self) -> Result { + match self.state.load(Ordering::Acquire) { + 0 => match self.client.receive_attempt().await { + Ok(None) => self + .state + .store(State::Reconnecting as u8, Ordering::Release), + Ok(Some(val)) => return Ok(val), + Err(err) => return Err(err), + }, + 1 => {} + _ => unreachable!(), + }; + + // State::Reconnecting branch + let (sender, receiver) = oneshot::channel(); + self.actions_sender.send(Action::Receive(sender)).await; + receiver.await.unwrap() // TODO: for now + } + + pub async fn delete_message( + &self, + receipt_handle: impl Into, + ) -> Result<(), SqsDeleteError> { + let receipt_handle = receipt_handle.into(); + match self.state.load(Ordering::Acquire) { + 0 => match self.client.delete_attempt(receipt_handle.clone()).await { + Ok(None) => self + .state + .store(State::Reconnecting as u8, Ordering::Release), + Ok(Some(_)) => return Ok(()), + Err(err) => return Err(err), + }, + 1 => {} + _ => unreachable!(), + }; + + // State::Reconnecting branch + let (sender, receiver) = oneshot::channel(); + self.actions_sender + .send(Action::Delete { + receipt_handle, + sender, + }) + .await; + + receiver.await.unwrap().map(|_| ()) // TODO: for now + } +} diff --git a/crates/worker/src/sqs_listener.rs b/crates/worker/src/sqs_listener.rs index fdbc2914..37e7af5d 100644 --- a/crates/worker/src/sqs_listener.rs +++ b/crates/worker/src/sqs_listener.rs @@ -8,16 +8,17 @@ use std::time::Duration; use tokio::task::JoinHandle; use tokio::time::sleep; +use crate::sqs_client::wrapper::SqsClientWrapper; use crate::sqs_client::SqsClient; pub struct SqsListener { handle: JoinHandle>, receiver: Receiver, - client: SqsClient, + client: SqsClientWrapper, } impl SqsListener { - pub fn new(client: SqsClient, poll_interval: Duration) -> Self { + pub fn new(client: SqsClientWrapper, poll_interval: Duration) -> Self { // TODO: unbounded? let (sender, receiver) = async_channel::bounded(1000); let handle = tokio::spawn(Self::listen(client.clone(), sender, poll_interval)); @@ -30,7 +31,7 @@ impl SqsListener { } async fn listen( - client: SqsClient, + client: SqsClientWrapper, sender: Sender, poll_interval: Duration, ) -> Result<(), SdkError> { @@ -65,7 +66,7 @@ impl SqsListener { } pub struct SqsReceiver { - client: SqsClient, + client: SqsClientWrapper, receiver: Receiver, } diff --git a/crates/worker/src/utils/cleaner.rs b/crates/worker/src/utils/cleaner.rs index b3e9e0bc..52f07d39 100644 --- a/crates/worker/src/utils/cleaner.rs +++ b/crates/worker/src/utils/cleaner.rs @@ -11,7 +11,7 @@ impl Drop for AutoCleanUp<'_> { } impl AutoCleanUp<'_> { - pub async fn clean_up(&self) { + pub async fn clean_up(self) { for path in self.dirs.iter() { println!("Removing path: {:?}", path); @@ -26,7 +26,7 @@ impl AutoCleanUp<'_> { } } - pub fn clean_up_sync(&self) { + fn clean_up_sync(&mut self) { for path in self.dirs.iter() { println!("Removing path: {:?}", path); diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index 82bbb4b2..482a9f91 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -13,7 +13,7 @@ use crate::commands::compile::compile; use crate::dynamodb_client::DynamoDBClient; use crate::errors::CompilationError; use crate::s3_client::S3Client; -use crate::sqs_client::SqsClient; +use crate::sqs_client::wrapper::SqsClientWrapper; use crate::sqs_listener::{SqsListener, SqsReceiver}; use crate::utils::lib::{timestamp, DURATION_TO_PURGE}; @@ -130,7 +130,7 @@ impl RunningWorker { } pub struct WorkerEngine { - sqs_client: SqsClient, + sqs_client: SqsClientWrapper, db_client: DynamoDBClient, s3_client: S3Client, expiration_timestamps: Arc>>, @@ -141,7 +141,7 @@ pub struct WorkerEngine { impl WorkerEngine { pub fn new( - sqs_client: SqsClient, + sqs_client: SqsClientWrapper, db_client: DynamoDBClient, s3_client: S3Client, supervisor_enabled: bool, From 6ef60447664be9360cc8b795bd171962f3692ff9 Mon Sep 17 00:00:00 2001 From: taco-paco Date: Thu, 5 Sep 2024 12:28:23 +0900 Subject: [PATCH 14/21] fix: some logic fixes --- crates/worker/src/errors.rs | 8 +++++ crates/worker/src/sqs_client/wrapper.rs | 44 ++++++++++++++++++------- crates/worker/src/sqs_listener.rs | 19 ++++++++--- crates/worker/src/worker.rs | 11 ++++--- 4 files changed, 61 insertions(+), 21 deletions(-) diff --git a/crates/worker/src/errors.rs b/crates/worker/src/errors.rs index ea7c9441..8c35cebb 100644 --- a/crates/worker/src/errors.rs +++ b/crates/worker/src/errors.rs @@ -25,6 +25,14 @@ pub(crate) type S3ListObjectsError = SdkError; pub(crate) type S3GetObjectError = SdkError; pub(crate) type S3PutObjectError = SdkError; +#[derive(thiserror::Error, Debug)] +pub enum SqsError { + #[error(transparent)] + ReceiveError(#[from] SqsReceiveError), + #[error(transparent)] + DeleteError(#[from] SqsDeleteError), +} + #[derive(thiserror::Error, Debug)] pub enum DBError { #[error(transparent)] diff --git a/crates/worker/src/sqs_client/wrapper.rs b/crates/worker/src/sqs_client/wrapper.rs index e0856cb8..63a798c2 100644 --- a/crates/worker/src/sqs_client/wrapper.rs +++ b/crates/worker/src/sqs_client/wrapper.rs @@ -6,8 +6,9 @@ use aws_sdk_sqs::Client; use std::sync::atomic::{AtomicU8, Ordering}; use std::sync::Arc; use std::time::Duration; +use tokio::select; use tokio::sync::{mpsc, oneshot, Mutex}; -use tokio::time::sleep; +use tokio::time::{sleep, Instant}; enum Action { Default, @@ -53,14 +54,35 @@ impl SqsClientWrapper { // TODO: start async fn worker(client: SqsClient, state: Arc, mut receiver: mpsc::Receiver) { + const SLEEP_DURATION: Duration = Duration::from_secs(3); let mut pending_actions = vec![]; - while let Some(action) = receiver.recv().await { - pending_actions.push(action); + loop { + if pending_actions.is_empty() { + if let Some(action) = receiver.recv().await { + pending_actions.push(action); + } else { + return; + } + } + + Self::resend_pending_actions(&mut pending_actions, &client, &state).await; + + let start_time = Instant::now(); + let value = select! { + value = receiver.recv() => value, + _ = sleep(SLEEP_DURATION) => continue, + }; - while !pending_actions.is_empty() { - Self::resend_pending_actions(&mut pending_actions, &client, &state).await; - sleep(Duration::from_secs(3)).await; + if let Some(action) = value { + pending_actions.push(action); + } else { + return; + } + + let elapsed = start_time.elapsed(); + if let Some(remaining_sleep) = SLEEP_DURATION.checked_sub(elapsed) { + sleep(remaining_sleep).await; } } } @@ -80,7 +102,7 @@ impl SqsClientWrapper { let _ = sender.send(Ok(val)); } Err(err) => { - let _ = sender.send(Err(err)); + let _ = sender.send(Err(err)); } Ok(None) => { // Keeping in the array to resend. @@ -97,7 +119,7 @@ impl SqsClientWrapper { let _ = sender.send(Ok(val)); } Err(err) => { - let _ = sender.send(Err(err)); + let _ = sender.send(Err(err)); } Ok(None) => { pending_actions[pivot] = Action::Delete { @@ -136,14 +158,14 @@ impl SqsClientWrapper { pub async fn delete_message( &self, receipt_handle: impl Into, - ) -> Result<(), SqsDeleteError> { + ) -> Result { let receipt_handle = receipt_handle.into(); match self.state.load(Ordering::Acquire) { 0 => match self.client.delete_attempt(receipt_handle.clone()).await { Ok(None) => self .state .store(State::Reconnecting as u8, Ordering::Release), - Ok(Some(_)) => return Ok(()), + Ok(Some(value)) => return Ok(value), Err(err) => return Err(err), }, 1 => {} @@ -159,6 +181,6 @@ impl SqsClientWrapper { }) .await; - receiver.await.unwrap().map(|_| ()) // TODO: for now + receiver.await.unwrap() // TODO: for now } } diff --git a/crates/worker/src/sqs_listener.rs b/crates/worker/src/sqs_listener.rs index 37e7af5d..2375d634 100644 --- a/crates/worker/src/sqs_listener.rs +++ b/crates/worker/src/sqs_listener.rs @@ -1,5 +1,5 @@ use crate::errors::{SqsDeleteError, SqsReceiveError}; -use async_channel::{Receiver, Recv, Sender}; +use async_channel::{Receiver, Recv, Sender, TrySendError}; use aws_sdk_sqs::config::http::HttpResponse; use aws_sdk_sqs::error::SdkError; use aws_sdk_sqs::operation::receive_message::ReceiveMessageError; @@ -34,7 +34,7 @@ impl SqsListener { client: SqsClientWrapper, sender: Sender, poll_interval: Duration, - ) -> Result<(), SdkError> { + ) -> Result<(), SqsReceiveError> { loop { let response = client.receive_message().await?; let messages = if let Some(messages) = response.messages { @@ -44,8 +44,16 @@ impl SqsListener { }; for message in messages { - if sender.send(message).await.is_err() { - return Ok(()); + match sender.try_send(message) { + Ok(()) => {}, + Err(err) => match err { + TrySendError::Full(_) => { + // If the channel is full ignoring the message. + // The reason is possibility of "visibility timeout" expiration + // leading to other instance fetching the message not only us. + }, + TrySendError::Closed(_) => return Ok(()) + } } } @@ -79,6 +87,7 @@ impl SqsReceiver { &self, receipt_handle: impl Into, ) -> Result<(), SqsDeleteError> { - self.client.delete_message(receipt_handle).await + let _ = self.client.delete_message(receipt_handle).await?; + Ok(()) } } diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index 482a9f91..0781afd3 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -68,16 +68,17 @@ impl RunningWorker { ) { // TODO: process error while let Ok(message) = sqs_receiver.recv().await { - let body = if let Some(body) = message.body { - body + let receipt_handle = if let Some(receipt_handle) = message.receipt_handle { + receipt_handle } else { continue; }; - let receipt_handle = if let Some(receipt_handle) = message.receipt_handle { - receipt_handle + let body = if let Some(body) = message.body { + body } else { - warn!("Has body but not handle"); + warn!("Has handle but not body"); + let _ = sqs_receiver.delete_message(receipt_handle).await; continue; }; From d0ed48c5e1881767be1b4819ad9389ed974e9143 Mon Sep 17 00:00:00 2001 From: taco-paco Date: Thu, 5 Sep 2024 13:21:43 +0900 Subject: [PATCH 15/21] refactor: worker engine --- crates/worker/src/main.rs | 10 +- crates/worker/src/sqs_client/wrapper.rs | 2 +- crates/worker/src/sqs_listener.rs | 12 +- crates/worker/src/worker.rs | 160 +++++++++--------------- 4 files changed, 70 insertions(+), 114 deletions(-) diff --git a/crates/worker/src/main.rs b/crates/worker/src/main.rs index f87d7d2b..2ec4c2f7 100644 --- a/crates/worker/src/main.rs +++ b/crates/worker/src/main.rs @@ -15,7 +15,7 @@ use crate::dynamodb_client::DynamoDBClient; use crate::s3_client::S3Client; use crate::sqs_client::wrapper::SqsClientWrapper; use crate::sqs_client::SqsClient; -use crate::worker::WorkerEngine; +use crate::worker::EngineBuilder; const AWS_PROFILE_DEFAULT: &str = "dev"; // TODO: remove @@ -24,7 +24,7 @@ pub(crate) const QUEUE_URL_DEFAULT: &str = const TABLE_NAME_DEFAULT: &str = "zksync-table"; const BUCKET_NAME_DEFAULT: &str = "zksync-compilation-s3"; -// TODO: state synchronization +// TODO: state synchronization for purging #[tokio::main] async fn main() { @@ -51,8 +51,8 @@ async fn main() { let s3_client = aws_sdk_s3::Client::new(&config); let s3_client = S3Client::new(s3_client, BUCKET_NAME_DEFAULT); - let mut engine = WorkerEngine::new(sqs_client, db_client, s3_client, true); - engine.start(NonZeroUsize::new(10).unwrap()); + let mut engine = EngineBuilder::new(sqs_client, db_client, s3_client, true); + let running_engine = engine.start(NonZeroUsize::new(10).unwrap()); - engine.wait().await; + running_engine.wait().await; } diff --git a/crates/worker/src/sqs_client/wrapper.rs b/crates/worker/src/sqs_client/wrapper.rs index 63a798c2..c760d017 100644 --- a/crates/worker/src/sqs_client/wrapper.rs +++ b/crates/worker/src/sqs_client/wrapper.rs @@ -10,7 +10,7 @@ use tokio::select; use tokio::sync::{mpsc, oneshot, Mutex}; use tokio::time::{sleep, Instant}; -enum Action { +pub enum Action { Default, Receive(oneshot::Sender>), Delete { diff --git a/crates/worker/src/sqs_listener.rs b/crates/worker/src/sqs_listener.rs index 2375d634..c14754ad 100644 --- a/crates/worker/src/sqs_listener.rs +++ b/crates/worker/src/sqs_listener.rs @@ -1,15 +1,11 @@ use crate::errors::{SqsDeleteError, SqsReceiveError}; use async_channel::{Receiver, Recv, Sender, TrySendError}; -use aws_sdk_sqs::config::http::HttpResponse; -use aws_sdk_sqs::error::SdkError; -use aws_sdk_sqs::operation::receive_message::ReceiveMessageError; use aws_sdk_sqs::types::Message; use std::time::Duration; use tokio::task::JoinHandle; use tokio::time::sleep; use crate::sqs_client::wrapper::SqsClientWrapper; -use crate::sqs_client::SqsClient; pub struct SqsListener { handle: JoinHandle>, @@ -19,8 +15,8 @@ pub struct SqsListener { impl SqsListener { pub fn new(client: SqsClientWrapper, poll_interval: Duration) -> Self { - // TODO: unbounded? - let (sender, receiver) = async_channel::bounded(1000); + // Low channel capacity in order not to hit SQS "visibility timeout". + let (sender, receiver) = async_channel::bounded(20); let handle = tokio::spawn(Self::listen(client.clone(), sender, poll_interval)); Self { @@ -49,8 +45,8 @@ impl SqsListener { Err(err) => match err { TrySendError::Full(_) => { // If the channel is full ignoring the message. - // The reason is possibility of "visibility timeout" expiration - // leading to other instance fetching the message not only us. + // The reason is possibility of SQS "visibility timeout" expiration + // leading to other instance fetching the same message, not only us. }, TrySendError::Closed(_) => return Ok(()) } diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index 0781afd3..a4dbb904 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -18,21 +18,63 @@ use crate::sqs_listener::{SqsListener, SqsReceiver}; use crate::utils::lib::{timestamp, DURATION_TO_PURGE}; pub type Timestamp = u64; -pub struct RunningWorker { + +pub struct EngineBuilder { + sqs_client: SqsClientWrapper, + db_client: DynamoDBClient, + s3_client: S3Client, + is_supervisor_enabled: bool, + running_workers: Vec, +} + +impl EngineBuilder { + pub fn new( + sqs_client: SqsClientWrapper, + db_client: DynamoDBClient, + s3_client: S3Client, + supervisor_enabled: bool, + ) -> Self { + EngineBuilder { + sqs_client, + db_client, + s3_client, + running_workers: vec![], + is_supervisor_enabled: supervisor_enabled, + } + } + + pub fn start(self, num_workers: NonZeroUsize) -> RunningEngine { + let sqs_listener = SqsListener::new(self.sqs_client, Duration::from_millis(500)); + let s3_client = self.s3_client.clone(); + let db_client = self.db_client.clone(); + + RunningEngine::new( + sqs_listener, + db_client, + s3_client, + num_workers.get(), + self.is_supervisor_enabled, + ) + } +} + +pub struct RunningEngine { sqs_listener: SqsListener, expiration_timestamps: Arc>>, num_workers: usize, worker_threads: Vec>, + supervisor_thread: Option>, } -impl RunningWorker { +impl RunningEngine { pub fn new( sqs_listener: SqsListener, db_client: DynamoDBClient, s3_client: S3Client, num_workers: usize, - expiration_timestamps: Arc>>, + enable_supervisor: bool, ) -> Self { + let expiration_timestamps= Arc::new(Mutex::new(vec![])); let mut worker_threads = Vec::with_capacity(num_workers); for _ in 0..num_workers { // Start worker @@ -42,7 +84,7 @@ impl RunningWorker { let expiration_timestamps = expiration_timestamps.clone(); worker_threads.push(tokio::spawn(async move { - RunningWorker::worker( + RunningEngine::worker( sqs_receiver, db_client_copy, s3_client_copy, @@ -52,10 +94,21 @@ impl RunningWorker { })); } + let supervisor_thread = if enable_supervisor { + let db_client = db_client.clone(); + let expiration_timestamps = expiration_timestamps.clone(); + Some(tokio::spawn(async move { + RunningEngine::supervisor(db_client, expiration_timestamps).await; + })) + } else { + None + }; + Self { sqs_listener, expiration_timestamps, num_workers, + supervisor_thread, worker_threads, } } @@ -125,92 +178,6 @@ impl RunningWorker { } } - pub async fn wait(self) { - futures::future::join_all(self.worker_threads).await; - } -} - -pub struct WorkerEngine { - sqs_client: SqsClientWrapper, - db_client: DynamoDBClient, - s3_client: S3Client, - expiration_timestamps: Arc>>, - is_supervisor_enabled: Arc, - running_workers: Vec, - supervisor_thread: Arc>>, -} - -impl WorkerEngine { - pub fn new( - sqs_client: SqsClientWrapper, - db_client: DynamoDBClient, - s3_client: S3Client, - supervisor_enabled: bool, - ) -> Self { - let is_supervisor_enabled = Arc::new(AtomicBool::new(supervisor_enabled)); - let expiration_timestamps = Arc::new(Mutex::new(vec![])); - - WorkerEngine { - sqs_client, - db_client, - s3_client, - supervisor_thread: Arc::new(None), - expiration_timestamps, - running_workers: vec![], - is_supervisor_enabled, - } - } - - pub fn start(&mut self, num_workers: NonZeroUsize) { - let sqs_listener = SqsListener::new(self.sqs_client.clone(), Duration::from_millis(500)); - let s3_client = self.s3_client.clone(); - let db_client = self.db_client.clone(); - self.running_workers.push(RunningWorker::new( - sqs_listener, - db_client, - s3_client, - num_workers.get(), - self.expiration_timestamps.clone(), - )); - - // TODO: not protection really - if self.is_supervisor_enabled.load(Ordering::Acquire) && self.supervisor_thread.is_none() { - let db_client = self.db_client.clone(); - let expiration_timestamps = self.expiration_timestamps.clone(); - self.supervisor_thread = Arc::new(Some(tokio::spawn(async move { - WorkerEngine::supervisor(db_client, expiration_timestamps).await; - }))); - } - } - - pub async fn wait(self) { - let mut worker_futures = Vec::new(); - for worker in self.running_workers { - worker_futures.push(worker.wait()); - } - - // Wait for all workers to finish - futures::future::join_all(worker_futures).await; - - // Wait for the supervisor thread if it exists - if let Some(supervisor_thread) = Arc::try_unwrap(self.supervisor_thread).ok().flatten() { - supervisor_thread.await.expect("Supervisor thread panicked"); - } - } - - // pub async fn enable_supervisor_thread(&mut self) { - // if self.supervisor_thread.is_some() { - // return; - // } - // - // self.is_supervisor_enabled.store(true, Ordering::Release); - // let expiration_timestamps = self.expiration_timestamps.clone(); - // - // self.supervisor_thread = Arc::new(Some(tokio::spawn(async move { - // WorkerEngine::supervisor(self.db_client.clone(), expiration_timestamps).await; - // }))); - // } - pub async fn supervisor( db_client: DynamoDBClient, expiration_timestamps: Arc>>, @@ -241,14 +208,7 @@ impl WorkerEngine { } } - // pub async fn disable_supervisor_thread(&mut self) { - // let mut is_enabled = self.is_supervisor_enabled.lock().await; - // *is_enabled = false; - // - // if let Ok(Some(join_handle)) = Arc::try_unwrap(self.supervisor_thread.clone()) { - // let _ = join_handle.await; - // } - // - // self.supervisor_thread = Arc::new(None); - // } + pub async fn wait(self) { + futures::future::join_all(self.worker_threads).await; + } } From 2e0bf494a7761d3c9db1e6c282bbc625c9d8ac5c Mon Sep 17 00:00:00 2001 From: taco-paco Date: Thu, 5 Sep 2024 17:33:34 +0900 Subject: [PATCH 16/21] feat: added purgatory struct for deleting data from S3 in the future --- crates/types/src/item.rs | 1 + crates/worker/src/dynamodb_client.rs | 2 +- crates/worker/src/errors.rs | 20 +++- crates/worker/src/main.rs | 14 ++- crates/worker/src/purgatory.rs | 118 ++++++++++++++++++++++++ crates/worker/src/sqs_client/wrapper.rs | 2 +- crates/worker/src/sqs_listener.rs | 8 +- crates/worker/src/worker.rs | 113 ++++++----------------- 8 files changed, 179 insertions(+), 99 deletions(-) create mode 100644 crates/worker/src/purgatory.rs diff --git a/crates/types/src/item.rs b/crates/types/src/item.rs index 7e573fc8..d709fff7 100644 --- a/crates/types/src/item.rs +++ b/crates/types/src/item.rs @@ -81,6 +81,7 @@ pub struct Item { // TODO: uuid? pub id: String, pub status: Status, + // TODO: type: Compiling/Verifying } impl From for HashMap { diff --git a/crates/worker/src/dynamodb_client.rs b/crates/worker/src/dynamodb_client.rs index c6b0c72a..0c4b59c8 100644 --- a/crates/worker/src/dynamodb_client.rs +++ b/crates/worker/src/dynamodb_client.rs @@ -38,7 +38,7 @@ impl DynamoDBClient { .await?; if let Some(item) = result.item { - // TODO: maybe change status when error? + // TODO: maybe change status or delete when error? Ok(Some(item.try_into()?)) } else { Ok(None) diff --git a/crates/worker/src/errors.rs b/crates/worker/src/errors.rs index 8c35cebb..31a88ccb 100644 --- a/crates/worker/src/errors.rs +++ b/crates/worker/src/errors.rs @@ -8,6 +8,7 @@ use aws_sdk_s3::operation::put_object::PutObjectError; use aws_sdk_sqs::error::SdkError; use aws_sdk_sqs::operation::delete_message::DeleteMessageError; use aws_sdk_sqs::operation::receive_message::ReceiveMessageError; +use tracing::{error}; use types::item::ItemError; // SQS related errors @@ -63,9 +64,9 @@ pub enum S3Error { #[derive(thiserror::Error, Debug)] pub enum CompilationError { - #[error(transparent)] + #[error("DBError: {0}")] DBError(#[from] DBError), - #[error(transparent)] + #[error("S3Error: {0}")] S3Error(#[from] S3Error), #[error("Item isn't id DB: {0}")] NoDBItemError(String), @@ -73,12 +74,25 @@ pub enum CompilationError { UnexpectedStatusError(String), #[error("Unsupported version: {0}")] VersionNotSupported(String), - #[error(transparent)] + #[error("IoError: {0}")] IoError(#[from] std::io::Error), #[error("Failed to compile: {0}")] CompilationFailureError(String), } +impl CompilationError { + pub fn recoverable(&self) -> bool { + match self { + CompilationError::DBError(_) => true, + CompilationError::S3Error(_) => true, + CompilationError::NoDBItemError(_) => false, + CompilationError::UnexpectedStatusError(_) => false, + CompilationError::IoError(_) => false, + _ => false, + } + } +} + #[derive(thiserror::Error, Debug)] pub enum Error { #[error(transparent)] diff --git a/crates/worker/src/main.rs b/crates/worker/src/main.rs index 2ec4c2f7..39ee2bcf 100644 --- a/crates/worker/src/main.rs +++ b/crates/worker/src/main.rs @@ -1,6 +1,7 @@ mod commands; mod dynamodb_client; mod errors; +mod purgatory; mod s3_client; mod sqs_client; mod sqs_listener; @@ -12,20 +13,19 @@ use aws_runtime::env_config::file::{EnvConfigFileKind, EnvConfigFiles}; use std::num::NonZeroUsize; use crate::dynamodb_client::DynamoDBClient; +use crate::purgatory::State; use crate::s3_client::S3Client; use crate::sqs_client::wrapper::SqsClientWrapper; -use crate::sqs_client::SqsClient; use crate::worker::EngineBuilder; const AWS_PROFILE_DEFAULT: &str = "dev"; -// TODO: remove + +// TODO: remove all of the below. Impl cli. pub(crate) const QUEUE_URL_DEFAULT: &str = "https://sqs.ap-southeast-2.amazonaws.com/266735844848/zksync-sqs"; const TABLE_NAME_DEFAULT: &str = "zksync-table"; const BUCKET_NAME_DEFAULT: &str = "zksync-compilation-s3"; -// TODO: state synchronization for purging - #[tokio::main] async fn main() { let profile_name = std::env::var("AWS_PROFILE").unwrap_or(AWS_PROFILE_DEFAULT.into()); @@ -51,8 +51,12 @@ async fn main() { let s3_client = aws_sdk_s3::Client::new(&config); let s3_client = S3Client::new(s3_client, BUCKET_NAME_DEFAULT); - let mut engine = EngineBuilder::new(sqs_client, db_client, s3_client, true); + let state = State::load().await; + + let engine = EngineBuilder::new(sqs_client, db_client, s3_client, state); let running_engine = engine.start(NonZeroUsize::new(10).unwrap()); running_engine.wait().await; + + // TODO: transfer metrics. } diff --git a/crates/worker/src/purgatory.rs b/crates/worker/src/purgatory.rs new file mode 100644 index 00000000..5f76c45d --- /dev/null +++ b/crates/worker/src/purgatory.rs @@ -0,0 +1,118 @@ +use std::collections::HashMap; +use std::marker::PhantomData; +use std::ptr::NonNull; +use std::sync::Arc; +use tokio::{sync::Mutex, task::JoinHandle}; +use types::item::Status; +use uuid::Uuid; +use types::SqsMessage; + +pub type Timestamp = u64; + +#[derive(Clone)] +pub struct Purgatory { + inner: Arc>, +} + +impl Purgatory { + pub fn new(state: State) -> Self { + let mut handle = NonNull::dangling(); + let this = Self { + inner: Arc::new(Mutex::new(Inner::new(handle, state))), + }; + + let initialized_handle = tokio::spawn(this.clone().deamon()); + unsafe { + *handle.as_mut() = initialized_handle; + } + + this + } + + pub async fn purge(&mut self) { + self.inner.lock().await.purge() + } + + pub async fn add_task(&mut self, _: &SqsMessage) { + todo!() + } + + // TODO: args: status, id + pub async fn update_task(&mut self) { + + } + + async fn deamon(self) { + todo!() + } +} + +struct Inner { + state: State, + + // No aliases possible since only we own the data + handle: NonNull>, + _marker: PhantomData>, +} + +unsafe impl Send for Inner {} + +impl Drop for Inner { + fn drop(&mut self) { + unsafe { + self.handle.as_ref().abort(); + } + } +} + +impl Inner { + fn new(handle: NonNull>, state: State) -> Self { + Self { handle, state, _marker: PhantomData } + } + + pub fn purge(&mut self) { + todo!() + } + + // TODO: replace with Self::purge + // pub async fn supervisor( + // db_client: DynamoDBClient, + // expiration_timestamps: Arc>>, + // ) { + // loop { + // let now = timestamp(); + // + // let to_delete = { + // let mut to_delete = vec![]; + // let mut expiration_timestamps = expiration_timestamps.lock().await; + // expiration_timestamps.retain(|&(uuid, expiration)| { + // if expiration < now { + // to_delete.push(uuid); + // false + // } else { + // true + // } + // }); + // + // to_delete + // }; + // + // for uuid in to_delete { + // db_client.delete_item(uuid.to_string()).await; + // } + // + // sleep(Duration::from_millis(2000)).await; + // } + // } +} + +pub struct State { + expiration_timestamps: Vec<(Uuid, Timestamp)>, + task_status: HashMap, +} + +impl State { + pub async fn load() -> State { + todo!() + } +} diff --git a/crates/worker/src/sqs_client/wrapper.rs b/crates/worker/src/sqs_client/wrapper.rs index c760d017..3266a65a 100644 --- a/crates/worker/src/sqs_client/wrapper.rs +++ b/crates/worker/src/sqs_client/wrapper.rs @@ -7,7 +7,7 @@ use std::sync::atomic::{AtomicU8, Ordering}; use std::sync::Arc; use std::time::Duration; use tokio::select; -use tokio::sync::{mpsc, oneshot, Mutex}; +use tokio::sync::{mpsc, oneshot}; use tokio::time::{sleep, Instant}; pub enum Action { diff --git a/crates/worker/src/sqs_listener.rs b/crates/worker/src/sqs_listener.rs index c14754ad..c88e7308 100644 --- a/crates/worker/src/sqs_listener.rs +++ b/crates/worker/src/sqs_listener.rs @@ -41,15 +41,15 @@ impl SqsListener { for message in messages { match sender.try_send(message) { - Ok(()) => {}, + Ok(()) => {} Err(err) => match err { TrySendError::Full(_) => { // If the channel is full ignoring the message. // The reason is possibility of SQS "visibility timeout" expiration // leading to other instance fetching the same message, not only us. - }, - TrySendError::Closed(_) => return Ok(()) - } + } + TrySendError::Closed(_) => return Ok(()), + }, } } diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index a4dbb904..98e2e016 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -1,29 +1,22 @@ use std::num::NonZeroUsize; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::Arc; use std::time::Duration; use tokio::sync::Mutex; use tokio::task::JoinHandle; -use tokio::time::sleep; -use tracing::{error, info, warn}; +use tracing::{error, warn}; use types::SqsMessage; -use uuid::Uuid; use crate::commands::compile::compile; use crate::dynamodb_client::DynamoDBClient; -use crate::errors::CompilationError; +use crate::purgatory::{Purgatory, State}; use crate::s3_client::S3Client; use crate::sqs_client::wrapper::SqsClientWrapper; use crate::sqs_listener::{SqsListener, SqsReceiver}; -use crate::utils::lib::{timestamp, DURATION_TO_PURGE}; - -pub type Timestamp = u64; pub struct EngineBuilder { sqs_client: SqsClientWrapper, db_client: DynamoDBClient, s3_client: S3Client, - is_supervisor_enabled: bool, + state: State, running_workers: Vec, } @@ -32,38 +25,35 @@ impl EngineBuilder { sqs_client: SqsClientWrapper, db_client: DynamoDBClient, s3_client: S3Client, - supervisor_enabled: bool, + state: State, ) -> Self { EngineBuilder { sqs_client, db_client, s3_client, + state, running_workers: vec![], - is_supervisor_enabled: supervisor_enabled, } } pub fn start(self, num_workers: NonZeroUsize) -> RunningEngine { let sqs_listener = SqsListener::new(self.sqs_client, Duration::from_millis(500)); - let s3_client = self.s3_client.clone(); - let db_client = self.db_client.clone(); RunningEngine::new( sqs_listener, - db_client, - s3_client, + self.db_client, + self.s3_client, + self.state, num_workers.get(), - self.is_supervisor_enabled, ) } } pub struct RunningEngine { sqs_listener: SqsListener, - expiration_timestamps: Arc>>, + purgatory: Purgatory, num_workers: usize, worker_threads: Vec>, - supervisor_thread: Option>, } impl RunningEngine { @@ -71,44 +61,34 @@ impl RunningEngine { sqs_listener: SqsListener, db_client: DynamoDBClient, s3_client: S3Client, + state: State, num_workers: usize, - enable_supervisor: bool, ) -> Self { - let expiration_timestamps= Arc::new(Mutex::new(vec![])); + let purgatory = Purgatory::new(state); + let mut worker_threads = Vec::with_capacity(num_workers); for _ in 0..num_workers { // Start worker let sqs_receiver = sqs_listener.receiver(); let db_client_copy = db_client.clone(); let s3_client_copy = s3_client.clone(); - let expiration_timestamps = expiration_timestamps.clone(); + let purgatory_copy = purgatory.clone(); worker_threads.push(tokio::spawn(async move { RunningEngine::worker( sqs_receiver, db_client_copy, s3_client_copy, - expiration_timestamps, + purgatory_copy, ) .await; })); } - let supervisor_thread = if enable_supervisor { - let db_client = db_client.clone(); - let expiration_timestamps = expiration_timestamps.clone(); - Some(tokio::spawn(async move { - RunningEngine::supervisor(db_client, expiration_timestamps).await; - })) - } else { - None - }; - Self { sqs_listener, - expiration_timestamps, + purgatory, num_workers, - supervisor_thread, worker_threads, } } @@ -117,7 +97,7 @@ impl RunningEngine { sqs_receiver: SqsReceiver, db_client: DynamoDBClient, s3_client: S3Client, - expiration_timestamps: Arc>>, + mut purgatory: Purgatory ) { // TODO: process error while let Ok(message) = sqs_receiver.recv().await { @@ -144,31 +124,22 @@ impl RunningEngine { } }; + purgatory.add_task(&sqs_message).await; match sqs_message { SqsMessage::Compile { request } => { let result = compile(request, &db_client, &s3_client).await; // TODO: match result { - Ok(()) => {} - Err(err) => match err { - CompilationError::DBError(err) => { - warn!("compilation DBError: {}", err.to_string()); + Ok(()) => purgatory.update_task().await, + Err(err) => { + if err.recoverable() { + warn!("recoverable error after compilation: {}", err); continue; + } else { + // delete from SQS + warn!("unrecoverable error after compilation: {}", err); + purgatory.update_task().await; } - CompilationError::S3Error(err) => { - warn!("compilation S3Error: {}", err.to_string()); - continue; - } - CompilationError::NoDBItemError(err) => { - warn!("{}", err.to_string()); - } - CompilationError::UnexpectedStatusError(err) => { - warn!("{}", err.to_string()); - } - CompilationError::IoError(err) => { - warn!("IOError: {}", err.to_string()); - } - _ => error!("Unexpected branch."), - }, + } } } SqsMessage::Verify { request } => {} // TODO; @@ -178,37 +149,9 @@ impl RunningEngine { } } - pub async fn supervisor( - db_client: DynamoDBClient, - expiration_timestamps: Arc>>, - ) { - loop { - let now = timestamp(); - - let to_delete = { - let mut to_delete = vec![]; - let mut expiration_timestamps = expiration_timestamps.lock().await; - expiration_timestamps.retain(|&(uuid, expiration)| { - if expiration < now { - to_delete.push(uuid); - false - } else { - true - } - }); - - to_delete - }; - - for uuid in to_delete { - db_client.delete_item(uuid.to_string()).await; - } - - sleep(Duration::from_millis(2000)).await; - } - } - pub async fn wait(self) { futures::future::join_all(self.worker_threads).await; } } + +// what are we purging? From a4122495ce32f6278d871de886c9ae54caba3671 Mon Sep 17 00:00:00 2001 From: taco-paco Date: Fri, 6 Sep 2024 18:09:57 +0900 Subject: [PATCH 17/21] refactor: PR comments fixes --- crates/lambdas/src/compile.rs | 2 +- crates/worker/scripts/deploy.js | 57 --------------------------- crates/worker/src/commands/compile.rs | 33 +++++++++------- crates/worker/src/errors.rs | 18 ++++----- crates/worker/src/purgatory.rs | 12 +++--- crates/worker/src/s3_client.rs | 2 +- crates/worker/src/utils/lib.rs | 2 +- crates/worker/src/worker.rs | 25 ++++++------ 8 files changed, 51 insertions(+), 100 deletions(-) delete mode 100644 crates/worker/scripts/deploy.js diff --git a/crates/lambdas/src/compile.rs b/crates/lambdas/src/compile.rs index 85bd491d..5fefc802 100644 --- a/crates/lambdas/src/compile.rs +++ b/crates/lambdas/src/compile.rs @@ -49,7 +49,7 @@ async fn compile( .await; match result { - Ok(val) => val, + Ok(value) => value, Err(SdkError::ServiceError(val)) => match val.err() { PutItemError::ConditionalCheckFailedException(_) => { error!("Recompilation attempt, id: {}", request.id); diff --git a/crates/worker/scripts/deploy.js b/crates/worker/scripts/deploy.js deleted file mode 100644 index d38de0fb..00000000 --- a/crates/worker/scripts/deploy.js +++ /dev/null @@ -1,57 +0,0 @@ - - // Right click on the script name and hit "Run" to execute - (async () => { - try { - console.log('deploy to starknet...') - const compiledCairoContract = await remix.call('fileManager', 'readFile', 'compiled_cairo_artifacts/contract.json'); - const compiledContract = starknet.json.parse(compiledCairoContract); - const NetworkBaseUrls = { - 'goerli-alpha': 'https://alpha4.starknet.io', - 'mainnet-alpha': 'https://alpha-mainnet.starknet.io' - } - - const payload = { - compiledContract: compiledContract, - transactionInputs: [], // if you have constructor args please add your args - network: 'goerli-alpha' // mainnet-alpha or goerli-alpha or devnet - }; - - const baseUrl = payload['network'] ? NetworkBaseUrls[payload['network']] : payload['baseUrl']; - - const response = await fetch(baseUrl + '/gateway/add_transaction', { - method: 'POST', - headers: { - accept: 'application/json', - }, - body: JSON.stringify({ - type: 'DEPLOY', - contract_address_salt: '0x01319c1c1f0400688eafde419346d0b9876cd3d6a4daaa9f4768a3f5a810c543', - contract_definition: payload.compiledContract.contract_definition, - constructor_calldata: payload.transactionInputs - }) - }); - - const responseData = await response.json(); - - // const methodResponse = await callContract({ - // contract_address: responseData.address, - // entry_point_selector: getSelectorFromName("YOUR_FUNCTION_NAME"), - // calldata: ["1"], - // }); - - // const result = methodResponse.result[0]; - // result contains the return value of the method you gave to callContract - if(response.status === 200) { - console.log('Deployed contract address: ', responseData.address) - console.log('Deployed contract transaction hash: ', responseData.transaction_hash) - console.log('Deployment successful.') - } else { - console.log('Deployed contract error: ', responseData) - console.log('Deployment failed.') - } - - } catch (exception) { - console.log(exception.message) - } - })() - \ No newline at end of file diff --git a/crates/worker/src/commands/compile.rs b/crates/worker/src/commands/compile.rs index d4b88404..5befeb12 100644 --- a/crates/worker/src/commands/compile.rs +++ b/crates/worker/src/commands/compile.rs @@ -21,8 +21,7 @@ use crate::utils::lib::{ }; pub struct CompilationFile { - // legacy name. file_path really - pub file_name: String, + pub file_path: String, pub file_content: Vec, } @@ -83,21 +82,23 @@ pub async fn compile( .await; match db_update_result { Ok(_) => {} - Err(SdkError::ServiceError(err)) => match err.err() { - UpdateItemError::ConditionalCheckFailedException(_) => { - error!("Conditional check not met"); - return Err(CompilationError::UnexpectedStatusError( - "Concurrent status change from another instance".into(), - )); + Err(SdkError::ServiceError(err)) => { + return match err.err() { + UpdateItemError::ConditionalCheckFailedException(_) => { + error!("Conditional check not met"); + Err(CompilationError::UnexpectedStatusError( + "Concurrent status change from another instance".into(), + )) + } + _ => Err(DBError::from(SdkError::ServiceError(err)).into()), } - _ => return Err(DBError::from(SdkError::ServiceError(err)).into()), - }, + } Err(err) => return Err(DBError::from(err).into()), } } match do_compile( - request.id.clone(), + &request.id, CompilationInput { config: request.config, contracts: files, @@ -105,7 +106,7 @@ pub async fn compile( ) .await { - Ok(val) => Ok(on_compilation_success(&request.id, db_client, s3_client, val).await?), + Ok(value) => Ok(on_compilation_success(&request.id, db_client, s3_client, value).await?), Err(err) => match err { CompilationError::CompilationFailureError(value) => { Ok(on_compilation_failed(&request.id, db_client, value).await?) @@ -127,12 +128,14 @@ pub async fn on_compilation_success( s3_client: &S3Client, compilation_artifacts: Vec, ) -> Result<(), CompilationError> { + const DOWNLOAD_URL_EXPIRATION: Duration = Duration::from_secs(5 * 60 * 60); + let mut presigned_urls = Vec::with_capacity(compilation_artifacts.len()); for el in compilation_artifacts { let file_key = format!("{}/{}/{}", ARTIFACTS_FOLDER, id, el.file_name); s3_client.put_object(&file_key, el.file_content).await?; - let expires_in = PresigningConfig::expires_in(Duration::from_secs(5 * 60 * 60)).unwrap(); + let expires_in = PresigningConfig::expires_in(DOWNLOAD_URL_EXPIRATION).unwrap(); let presigned_request = s3_client .get_object_presigned(&file_key, expires_in) .await?; @@ -190,7 +193,7 @@ pub async fn on_compilation_failed( } pub async fn do_compile( - namespace: String, + namespace: &str, compilation_request: CompilationInput, ) -> Result, CompilationError> { let zksolc_version = compilation_request.config.version; @@ -236,7 +239,7 @@ pub async fn do_compile( let contracts = compilation_request .contracts .into_iter() - .filter(|contract| !contract.file_name.ends_with("_test.sol")) + .filter(|contract| !contract.file_path.ends_with("_test.sol")) .collect(); // initialize the files diff --git a/crates/worker/src/errors.rs b/crates/worker/src/errors.rs index 31a88ccb..96e9ab08 100644 --- a/crates/worker/src/errors.rs +++ b/crates/worker/src/errors.rs @@ -8,7 +8,7 @@ use aws_sdk_s3::operation::put_object::PutObjectError; use aws_sdk_sqs::error::SdkError; use aws_sdk_sqs::operation::delete_message::DeleteMessageError; use aws_sdk_sqs::operation::receive_message::ReceiveMessageError; -use tracing::{error}; +use tracing::error; use types::item::ItemError; // SQS related errors @@ -28,9 +28,9 @@ pub(crate) type S3PutObjectError = SdkError; #[derive(thiserror::Error, Debug)] pub enum SqsError { - #[error(transparent)] + #[error("SqsReceiveError: {0}")] ReceiveError(#[from] SqsReceiveError), - #[error(transparent)] + #[error("SqsDeleteError: {0}")] DeleteError(#[from] SqsDeleteError), } @@ -83,12 +83,12 @@ pub enum CompilationError { impl CompilationError { pub fn recoverable(&self) -> bool { match self { - CompilationError::DBError(_) => true, - CompilationError::S3Error(_) => true, - CompilationError::NoDBItemError(_) => false, - CompilationError::UnexpectedStatusError(_) => false, - CompilationError::IoError(_) => false, - _ => false, + CompilationError::DBError(_) | CompilationError::S3Error(_) => true, + CompilationError::NoDBItemError(_) + | CompilationError::UnexpectedStatusError(_) + | CompilationError::IoError(_) + | CompilationError::VersionNotSupported(_) + | CompilationError::CompilationFailureError(_) => false, } } } diff --git a/crates/worker/src/purgatory.rs b/crates/worker/src/purgatory.rs index 5f76c45d..0b2e7603 100644 --- a/crates/worker/src/purgatory.rs +++ b/crates/worker/src/purgatory.rs @@ -4,8 +4,8 @@ use std::ptr::NonNull; use std::sync::Arc; use tokio::{sync::Mutex, task::JoinHandle}; use types::item::Status; -use uuid::Uuid; use types::SqsMessage; +use uuid::Uuid; pub type Timestamp = u64; @@ -38,9 +38,7 @@ impl Purgatory { } // TODO: args: status, id - pub async fn update_task(&mut self) { - - } + pub async fn update_task(&mut self) {} async fn deamon(self) { todo!() @@ -67,7 +65,11 @@ impl Drop for Inner { impl Inner { fn new(handle: NonNull>, state: State) -> Self { - Self { handle, state, _marker: PhantomData } + Self { + handle, + state, + _marker: PhantomData, + } } pub fn purge(&mut self) { diff --git a/crates/worker/src/s3_client.rs b/crates/worker/src/s3_client.rs index daccd94a..43a3c76c 100644 --- a/crates/worker/src/s3_client.rs +++ b/crates/worker/src/s3_client.rs @@ -42,7 +42,7 @@ impl S3Client { .expect("Unreachable. list_all_keys bug."); files.push(CompilationFile { file_content: contents, - file_name: file_path + file_path: file_path .to_str() .expect("Unexpected encoding issue.") .to_string(), diff --git a/crates/worker/src/utils/lib.rs b/crates/worker/src/utils/lib.rs index 3155d807..ff03aaa0 100644 --- a/crates/worker/src/utils/lib.rs +++ b/crates/worker/src/utils/lib.rs @@ -191,7 +191,7 @@ pub async fn initialize_files( files: Vec, ) -> Result<(), std::io::Error> { for file in files { - let file_path = dst_dir.as_ref().join(file.file_name); + let file_path = dst_dir.as_ref().join(file.file_path); // create parent directories tokio::fs::create_dir_all(file_path.parent().unwrap()).await?; diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index 98e2e016..a9bf86cc 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -75,13 +75,8 @@ impl RunningEngine { let purgatory_copy = purgatory.clone(); worker_threads.push(tokio::spawn(async move { - RunningEngine::worker( - sqs_receiver, - db_client_copy, - s3_client_copy, - purgatory_copy, - ) - .await; + RunningEngine::worker(sqs_receiver, db_client_copy, s3_client_copy, purgatory_copy) + .await; })); } @@ -97,7 +92,7 @@ impl RunningEngine { sqs_receiver: SqsReceiver, db_client: DynamoDBClient, s3_client: S3Client, - mut purgatory: Purgatory + mut purgatory: Purgatory, ) { // TODO: process error while let Ok(message) = sqs_receiver.recv().await { @@ -111,7 +106,10 @@ impl RunningEngine { body } else { warn!("Has handle but not body"); - let _ = sqs_receiver.delete_message(receipt_handle).await; + if let Err(err) = sqs_receiver.delete_message(receipt_handle).await { + warn!("{}", err); + } + continue; }; @@ -119,7 +117,10 @@ impl RunningEngine { Ok(sqs_message) => sqs_message, Err(err) => { error!("Could not deserialize message: {}", err.to_string()); - let _ = sqs_receiver.delete_message(receipt_handle).await; + if let Err(err) = sqs_receiver.delete_message(receipt_handle).await { + warn!("{}", err); + } + continue; } }; @@ -145,7 +146,9 @@ impl RunningEngine { SqsMessage::Verify { request } => {} // TODO; } - let _ = sqs_receiver.delete_message(receipt_handle).await; + if let Err(err) = sqs_receiver.delete_message(receipt_handle).await { + warn!("{}", err); + } } } From ad35ef924b8e310d78dd1025c078f32c351130fe Mon Sep 17 00:00:00 2001 From: taco-paco Date: Mon, 9 Sep 2024 11:34:13 +0900 Subject: [PATCH 18/21] refactor: add status key name --- crates/types/src/item.rs | 18 ++++++++++++------ crates/worker/src/commands/compile.rs | 6 +++--- crates/worker/src/sqs_client/wrapper.rs | 1 - 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/crates/types/src/item.rs b/crates/types/src/item.rs index d709fff7..84618970 100644 --- a/crates/types/src/item.rs +++ b/crates/types/src/item.rs @@ -16,12 +16,18 @@ pub enum Status { Failed(String), } +impl Status { + pub const fn db_key_name() -> &'static str { + "Status" + } +} + impl fmt::Display for Status { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { match self { Status::Pending => write!(f, "Pending"), Status::Compiling => write!(f, "Compiling"), - Status::Ready { presigned_urls: _ } => write!(f, "Ready"), + Status::Ready { .. } => write!(f, "Ready"), Status::Failed(msg) => write!(f, "Failed: {}", msg), } } @@ -32,7 +38,7 @@ impl From<&Status> for u32 { match value { Status::Pending => 0, Status::Compiling => 1, - Status::Ready { presigned_urls: _ } => 2, + Status::Ready { .. } => 2, Status::Failed(_) => 3, } } @@ -48,19 +54,19 @@ impl From for HashMap { fn from(value: Status) -> Self { match value.clone() { Status::Pending | Status::Compiling => HashMap::from([( - "Status".into(), + Status::db_key_name().into(), AttributeValue::N(u32::from(&value).to_string()), )]), Status::Ready { presigned_urls } => HashMap::from([ ( - "Status".into(), + Status::db_key_name().into(), AttributeValue::N(u32::from(&value).to_string()), ), ("Data".into(), AttributeValue::Ss(presigned_urls)), ]), Status::Failed(val) => HashMap::from([ ( - "Status".into(), + Status::db_key_name().into(), AttributeValue::N(u32::from(&value).to_string()), ), ("Data".into(), AttributeValue::S(val)), @@ -96,7 +102,7 @@ impl From for HashMap { impl TryFrom<&HashMap> for Status { type Error = ItemError; fn try_from(value: &HashMap) -> Result { - let status = value.get("Status").ok_or(ItemError::FormatError)?; + let status = value.get(Status::db_key_name()).ok_or(ItemError::FormatError)?; let status: u32 = status .as_n() .map_err(|_| ItemError::FormatError)? diff --git a/crates/worker/src/commands/compile.rs b/crates/worker/src/commands/compile.rs index 5befeb12..136e31aa 100644 --- a/crates/worker/src/commands/compile.rs +++ b/crates/worker/src/commands/compile.rs @@ -69,7 +69,7 @@ pub async fn compile( .key("ID", AttributeValue::S(request.id.clone())) .update_expression("SET #status = :newStatus") .condition_expression("#status = :currentStatus") - .expression_attribute_names("#status", "Status") + .expression_attribute_names("#status", Status::db_key_name()) .expression_attribute_values( ":newStatus", AttributeValue::N(u32::from(Status::Compiling).to_string()), @@ -154,7 +154,7 @@ pub async fn on_compilation_success( .table_name(db_client.table_name.clone()) .key("ID", AttributeValue::S(id.to_string())) .update_expression("SET #status = :newStatus, #data = :data") - .expression_attribute_names("#status", "Status") + .expression_attribute_names("#status", Status::db_key_name()) .expression_attribute_names("#data", "Data") .expression_attribute_values( ":newStatus", @@ -179,7 +179,7 @@ pub async fn on_compilation_failed( .table_name(db_client.table_name.clone()) .key("ID", AttributeValue::S(id.to_string())) .update_expression("SET #status = :newStatus, #data = :data") - .expression_attribute_names("#status", "Status") + .expression_attribute_names("#status", Status::db_key_name()) .expression_attribute_names("#data", "Data") .expression_attribute_values( ":newStatus", diff --git a/crates/worker/src/sqs_client/wrapper.rs b/crates/worker/src/sqs_client/wrapper.rs index 3266a65a..5b2e000f 100644 --- a/crates/worker/src/sqs_client/wrapper.rs +++ b/crates/worker/src/sqs_client/wrapper.rs @@ -52,7 +52,6 @@ impl SqsClientWrapper { } } - // TODO: start async fn worker(client: SqsClient, state: Arc, mut receiver: mpsc::Receiver) { const SLEEP_DURATION: Duration = Duration::from_secs(3); let mut pending_actions = vec![]; From 06d71d370da1f6f317b4014915c3058935c70a76 Mon Sep 17 00:00:00 2001 From: taco-paco Date: Mon, 9 Sep 2024 11:48:23 +0900 Subject: [PATCH 19/21] refactor: removed hardcoded attribute names --- crates/types/src/item.rs | 40 +++++++++++++++++++-------- crates/worker/src/commands/compile.rs | 19 +++++++------ crates/worker/src/dynamodb_client.rs | 4 +-- 3 files changed, 42 insertions(+), 21 deletions(-) diff --git a/crates/types/src/item.rs b/crates/types/src/item.rs index 84618970..66b630a4 100644 --- a/crates/types/src/item.rs +++ b/crates/types/src/item.rs @@ -17,7 +17,7 @@ pub enum Status { } impl Status { - pub const fn db_key_name() -> &'static str { + pub const fn attribute_name() -> &'static str { "Status" } } @@ -54,22 +54,22 @@ impl From for HashMap { fn from(value: Status) -> Self { match value.clone() { Status::Pending | Status::Compiling => HashMap::from([( - Status::db_key_name().into(), + Status::attribute_name().into(), AttributeValue::N(u32::from(&value).to_string()), )]), Status::Ready { presigned_urls } => HashMap::from([ ( - Status::db_key_name().into(), + Status::attribute_name().into(), AttributeValue::N(u32::from(&value).to_string()), ), - ("Data".into(), AttributeValue::Ss(presigned_urls)), + (Item::data_attribute_name().into(), AttributeValue::Ss(presigned_urls)), ]), Status::Failed(val) => HashMap::from([ ( - Status::db_key_name().into(), + Status::attribute_name().into(), AttributeValue::N(u32::from(&value).to_string()), ), - ("Data".into(), AttributeValue::S(val)), + (Item::data_attribute_name().into(), AttributeValue::S(val)), ]), } } @@ -90,9 +90,27 @@ pub struct Item { // TODO: type: Compiling/Verifying } +impl Item { + pub const fn status_attribute_name() -> &'static str { + Status::attribute_name() + } + + pub const fn data_attribute_name() -> &'static str { + "Data" + } + + pub const fn id_attribute_name() -> &'static str { + "ID" + } + + pub const fn primary_key_name() -> &'static str { + Self::id_attribute_name() + } +} + impl From for HashMap { fn from(value: Item) -> Self { - let mut item_map = HashMap::from([("ID".into(), AttributeValue::S(value.id))]); + let mut item_map = HashMap::from([(Item::id_attribute_name().into(), AttributeValue::S(value.id))]); item_map.extend(HashMap::from(value.status)); item_map @@ -102,7 +120,7 @@ impl From for HashMap { impl TryFrom<&HashMap> for Status { type Error = ItemError; fn try_from(value: &HashMap) -> Result { - let status = value.get(Status::db_key_name()).ok_or(ItemError::FormatError)?; + let status = value.get(Status::attribute_name()).ok_or(ItemError::FormatError)?; let status: u32 = status .as_n() .map_err(|_| ItemError::FormatError)? @@ -111,7 +129,7 @@ impl TryFrom<&HashMap> for Status { 0 => Status::Pending, 1 => Status::Compiling, 2 => { - let data = value.get("Data").ok_or(ItemError::FormatError)?; + let data = value.get(Item::data_attribute_name()).ok_or(ItemError::FormatError)?; let data = data.as_ss().map_err(|_| ItemError::FormatError)?; Status::Ready { @@ -119,7 +137,7 @@ impl TryFrom<&HashMap> for Status { } } 3 => { - let data = value.get("Data").ok_or(ItemError::FormatError)?; + let data = value.get(Item::data_attribute_name()).ok_or(ItemError::FormatError)?; let data = data.as_s().map_err(|_| ItemError::FormatError)?; Status::Failed(data.clone()) @@ -134,7 +152,7 @@ impl TryFrom<&HashMap> for Status { impl TryFrom> for Item { type Error = ItemError; fn try_from(value: HashMap) -> Result { - let id = value.get("ID").ok_or(ItemError::FormatError)?; + let id = value.get(Item::id_attribute_name()).ok_or(ItemError::FormatError)?; let id = id.as_s().map_err(|_| ItemError::FormatError)?; let status = (&value).try_into()?; diff --git a/crates/worker/src/commands/compile.rs b/crates/worker/src/commands/compile.rs index 136e31aa..ba24377e 100644 --- a/crates/worker/src/commands/compile.rs +++ b/crates/worker/src/commands/compile.rs @@ -66,10 +66,13 @@ pub async fn compile( .client .update_item() .table_name(db_client.table_name.clone()) - .key("ID", AttributeValue::S(request.id.clone())) + .key( + Item::primary_key_name(), + AttributeValue::S(request.id.clone()), + ) .update_expression("SET #status = :newStatus") .condition_expression("#status = :currentStatus") - .expression_attribute_names("#status", Status::db_key_name()) + .expression_attribute_names("#status", Status::attribute_name()) .expression_attribute_values( ":newStatus", AttributeValue::N(u32::from(Status::Compiling).to_string()), @@ -152,10 +155,10 @@ pub async fn on_compilation_success( .client .update_item() .table_name(db_client.table_name.clone()) - .key("ID", AttributeValue::S(id.to_string())) + .key(Item::primary_key_name(), AttributeValue::S(id.to_string())) .update_expression("SET #status = :newStatus, #data = :data") - .expression_attribute_names("#status", Status::db_key_name()) - .expression_attribute_names("#data", "Data") + .expression_attribute_names("#status", Status::attribute_name()) + .expression_attribute_names("#data", Item::data_attribute_name()) .expression_attribute_values( ":newStatus", AttributeValue::N(2.to_string()), // Ready @@ -177,10 +180,10 @@ pub async fn on_compilation_failed( .client .update_item() .table_name(db_client.table_name.clone()) - .key("ID", AttributeValue::S(id.to_string())) + .key(Item::primary_key_name(), AttributeValue::S(id.to_string())) .update_expression("SET #status = :newStatus, #data = :data") - .expression_attribute_names("#status", Status::db_key_name()) - .expression_attribute_names("#data", "Data") + .expression_attribute_names("#status", Status::attribute_name()) + .expression_attribute_names("#data", Item::data_attribute_name()) .expression_attribute_values( ":newStatus", AttributeValue::N(3.to_string()), // Failed diff --git a/crates/worker/src/dynamodb_client.rs b/crates/worker/src/dynamodb_client.rs index 0c4b59c8..c5df3892 100644 --- a/crates/worker/src/dynamodb_client.rs +++ b/crates/worker/src/dynamodb_client.rs @@ -21,7 +21,7 @@ impl DynamoDBClient { self.client .delete_item() .table_name(self.table_name.clone()) - .key("ID", AttributeValue::S(id)) + .key(Item::primary_key_name(), AttributeValue::S(id)) .send() .await?; @@ -33,7 +33,7 @@ impl DynamoDBClient { .client .get_item() .table_name(self.table_name.clone()) - .key("ID", AttributeValue::S(id)) + .key(Item::primary_key_name(), AttributeValue::S(id)) .send() .await?; From b980d4b89a739e4f4d9c196b371bcbb9c9cda2ed Mon Sep 17 00:00:00 2001 From: taco-paco Date: Mon, 9 Sep 2024 12:40:19 +0900 Subject: [PATCH 20/21] refactor: pr comment fixes --- crates/types/src/item.rs | 13 +-- crates/worker/src/commands/compile.rs | 105 ++++++++++++------------ crates/worker/src/sqs_client/wrapper.rs | 10 +-- crates/worker/src/worker.rs | 3 - 4 files changed, 63 insertions(+), 68 deletions(-) diff --git a/crates/types/src/item.rs b/crates/types/src/item.rs index 66b630a4..4d474536 100644 --- a/crates/types/src/item.rs +++ b/crates/types/src/item.rs @@ -4,13 +4,14 @@ use std::collections::HashMap; use std::fmt; use std::fmt::Formatter; +pub type AttributeMap = HashMap; + #[derive(Debug, Clone, Serialize)] pub enum Status { // TODO: add FilesUploaded(?) Pending, Compiling, Ready { - // TODO: Legacy support via enum here. presigned_urls: Vec, }, Failed(String), @@ -108,7 +109,7 @@ impl Item { } } -impl From for HashMap { +impl From for AttributeMap { fn from(value: Item) -> Self { let mut item_map = HashMap::from([(Item::id_attribute_name().into(), AttributeValue::S(value.id))]); item_map.extend(HashMap::from(value.status)); @@ -117,9 +118,9 @@ impl From for HashMap { } } -impl TryFrom<&HashMap> for Status { +impl TryFrom<&AttributeMap> for Status { type Error = ItemError; - fn try_from(value: &HashMap) -> Result { + fn try_from(value: &AttributeMap) -> Result { let status = value.get(Status::attribute_name()).ok_or(ItemError::FormatError)?; let status: u32 = status .as_n() @@ -149,9 +150,9 @@ impl TryFrom<&HashMap> for Status { } } -impl TryFrom> for Item { +impl TryFrom for Item { type Error = ItemError; - fn try_from(value: HashMap) -> Result { + fn try_from(value: AttributeMap) -> Result { let id = value.get(Item::id_attribute_name()).ok_or(ItemError::FormatError)?; let id = id.as_s().map_err(|_| ItemError::FormatError)?; let status = (&value).try_into()?; diff --git a/crates/worker/src/commands/compile.rs b/crates/worker/src/commands/compile.rs index ba24377e..9375c319 100644 --- a/crates/worker/src/commands/compile.rs +++ b/crates/worker/src/commands/compile.rs @@ -61,44 +61,9 @@ pub async fn compile( let dir = format!("{}/", request.id); let files = s3_client.extract_files(&dir).await?; - { - let db_update_result = db_client - .client - .update_item() - .table_name(db_client.table_name.clone()) - .key( - Item::primary_key_name(), - AttributeValue::S(request.id.clone()), - ) - .update_expression("SET #status = :newStatus") - .condition_expression("#status = :currentStatus") - .expression_attribute_names("#status", Status::attribute_name()) - .expression_attribute_values( - ":newStatus", - AttributeValue::N(u32::from(Status::Compiling).to_string()), - ) - .expression_attribute_values( - ":currentStatus", - AttributeValue::N(u32::from(Status::Pending).to_string()), - ) - .send() - .await; - match db_update_result { - Ok(_) => {} - Err(SdkError::ServiceError(err)) => { - return match err.err() { - UpdateItemError::ConditionalCheckFailedException(_) => { - error!("Conditional check not met"); - Err(CompilationError::UnexpectedStatusError( - "Concurrent status change from another instance".into(), - )) - } - _ => Err(DBError::from(SdkError::ServiceError(err)).into()), - } - } - Err(err) => return Err(DBError::from(err).into()), - } - } + + // Update status to Compiling + try_set_compiling_status(db_client, &request.id).await?; match do_compile( &request.id, @@ -110,18 +75,54 @@ pub async fn compile( .await { Ok(value) => Ok(on_compilation_success(&request.id, db_client, s3_client, value).await?), - Err(err) => match err { - CompilationError::CompilationFailureError(value) => { - Ok(on_compilation_failed(&request.id, db_client, value).await?) + Err(CompilationError::CompilationFailureError(value)) => { + Ok(on_compilation_failed(&request.id, db_client, value).await?) + } + Err(CompilationError::VersionNotSupported(value)) => Ok(on_compilation_failed( + &request.id, + &db_client, + format!("Unsupported compiler version: {}", value), + ) + .await?), + Err(err) => Err(err), + } +} + +async fn try_set_compiling_status( + db_client: &DynamoDBClient, + key: &str, +) -> Result<(), CompilationError> { + let db_update_result = db_client + .client + .update_item() + .table_name(db_client.table_name.clone()) + .key(Item::primary_key_name(), AttributeValue::S(key.to_string())) + .update_expression("SET #status = :newStatus") + .condition_expression("#status = :currentStatus") + .expression_attribute_names("#status", Status::attribute_name()) + .expression_attribute_values( + ":newStatus", + AttributeValue::N(u32::from(Status::Compiling).to_string()), + ) + .expression_attribute_values( + ":currentStatus", + AttributeValue::N(u32::from(Status::Pending).to_string()), + ) + .send() + .await; + + match db_update_result { + Ok(_) => Ok(()), + Err(SdkError::ServiceError(err)) => match err.err() { + UpdateItemError::ConditionalCheckFailedException(_) => { + error!("Conditional check not met"); + Err(CompilationError::UnexpectedStatusError( + "Concurrent status change from another instance".into(), + )) } - CompilationError::VersionNotSupported(value) => Ok(on_compilation_failed( - &request.id, - &db_client, - format!("Unsupported compiler version: {}", value), - ) - .await?), - _ => Err(err), + _ => Err(DBError::from(SdkError::ServiceError(err)).into()), }, + Err(err) => Err(DBError::from(err).into()), } } @@ -197,9 +198,9 @@ pub async fn on_compilation_failed( pub async fn do_compile( namespace: &str, - compilation_request: CompilationInput, + compilation_input: CompilationInput, ) -> Result, CompilationError> { - let zksolc_version = compilation_request.config.version; + let zksolc_version = compilation_input.config.version; // check if the version is supported if !ZKSOLC_VERSIONS.contains(&zksolc_version.as_str()) { @@ -228,7 +229,7 @@ pub async fn do_compile( hardhat_config_builder .zksolc_version(&zksolc_version) .solidity_version(DEFAULT_SOLIDITY_VERSION); - if let Some(target_path) = compilation_request.config.target_path { + if let Some(target_path) = compilation_input.config.target_path { hardhat_config_builder.paths_sources(&target_path); } @@ -239,7 +240,7 @@ pub async fn do_compile( tokio::fs::write(hardhat_config_path, hardhat_config_content).await?; // filter test files from compilation candidates - let contracts = compilation_request + let contracts = compilation_input .contracts .into_iter() .filter(|contract| !contract.file_path.ends_with("_test.sol")) diff --git a/crates/worker/src/sqs_client/wrapper.rs b/crates/worker/src/sqs_client/wrapper.rs index 5b2e000f..557d3335 100644 --- a/crates/worker/src/sqs_client/wrapper.rs +++ b/crates/worker/src/sqs_client/wrapper.rs @@ -10,8 +10,10 @@ use tokio::select; use tokio::sync::{mpsc, oneshot}; use tokio::time::{sleep, Instant}; +#[derive(Default)] pub enum Action { - Default, + #[default] + Default, // TODO: get rid of this. crutches Receive(oneshot::Sender>), Delete { receipt_handle: String, @@ -19,12 +21,6 @@ pub enum Action { }, } -impl Default for Action { - fn default() -> Self { - Action::Default - } -} - enum State { Connected = 0, Reconnecting = 1, diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index a9bf86cc..bc4952d2 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -1,6 +1,5 @@ use std::num::NonZeroUsize; use std::time::Duration; -use tokio::sync::Mutex; use tokio::task::JoinHandle; use tracing::{error, warn}; use types::SqsMessage; @@ -156,5 +155,3 @@ impl RunningEngine { futures::future::join_all(self.worker_threads).await; } } - -// what are we purging? From d96fcb12adc15bc01f73c8203eb660e3fb07c0bd Mon Sep 17 00:00:00 2001 From: taco-paco Date: Mon, 9 Sep 2024 12:54:13 +0900 Subject: [PATCH 21/21] refactor: offload mod.rs --- crates/worker/src/main.rs | 4 ++-- .../src/{sqs_client/mod.rs => sqs_clients/client.rs} | 9 +++------ crates/worker/src/sqs_clients/mod.rs | 2 ++ crates/worker/src/{sqs_client => sqs_clients}/wrapper.rs | 2 +- crates/worker/src/sqs_listener.rs | 2 +- crates/worker/src/worker.rs | 2 +- 6 files changed, 10 insertions(+), 11 deletions(-) rename crates/worker/src/{sqs_client/mod.rs => sqs_clients/client.rs} (88%) create mode 100644 crates/worker/src/sqs_clients/mod.rs rename crates/worker/src/{sqs_client => sqs_clients}/wrapper.rs (99%) diff --git a/crates/worker/src/main.rs b/crates/worker/src/main.rs index 39ee2bcf..aea51598 100644 --- a/crates/worker/src/main.rs +++ b/crates/worker/src/main.rs @@ -3,7 +3,7 @@ mod dynamodb_client; mod errors; mod purgatory; mod s3_client; -mod sqs_client; +mod sqs_clients; mod sqs_listener; mod utils; mod worker; @@ -15,7 +15,7 @@ use std::num::NonZeroUsize; use crate::dynamodb_client::DynamoDBClient; use crate::purgatory::State; use crate::s3_client::S3Client; -use crate::sqs_client::wrapper::SqsClientWrapper; +use crate::sqs_clients::wrapper::SqsClientWrapper; use crate::worker::EngineBuilder; const AWS_PROFILE_DEFAULT: &str = "dev"; diff --git a/crates/worker/src/sqs_client/mod.rs b/crates/worker/src/sqs_clients/client.rs similarity index 88% rename from crates/worker/src/sqs_client/mod.rs rename to crates/worker/src/sqs_clients/client.rs index 722d16c3..c979c312 100644 --- a/crates/worker/src/sqs_client/mod.rs +++ b/crates/worker/src/sqs_clients/client.rs @@ -1,12 +1,9 @@ -use aws_config::retry::ErrorKind; use aws_sdk_sqs::operation::delete_message::DeleteMessageOutput; use aws_sdk_sqs::operation::receive_message::ReceiveMessageOutput; use aws_sdk_sqs::Client; use crate::errors::{SqsDeleteError, SqsReceiveError}; -pub mod wrapper; - macro_rules! match_result { ($err_type:ident, $result:expr) => { match $result { @@ -26,7 +23,7 @@ macro_rules! match_result { } if let Some(other) = dispatch_err.as_other() { return match other { - ErrorKind::ClientError => Err($err_type::DispatchFailure(dispatch_err)), + aws_config::retry::ErrorKind::ClientError => Err($err_type::DispatchFailure(dispatch_err)), _ => Ok(None), }; } @@ -52,7 +49,7 @@ impl SqsClient { } } - async fn receive_attempt(&self) -> Result, SqsReceiveError> { + pub async fn receive_attempt(&self) -> Result, SqsReceiveError> { let result = self .client .receive_message() @@ -64,7 +61,7 @@ impl SqsClient { match_result!(SqsReceiveError, result) } - async fn delete_attempt( + pub async fn delete_attempt( &self, receipt_handle: impl Into, ) -> Result, SqsDeleteError> { diff --git a/crates/worker/src/sqs_clients/mod.rs b/crates/worker/src/sqs_clients/mod.rs new file mode 100644 index 00000000..8402e4fc --- /dev/null +++ b/crates/worker/src/sqs_clients/mod.rs @@ -0,0 +1,2 @@ +pub mod client; +pub mod wrapper; diff --git a/crates/worker/src/sqs_client/wrapper.rs b/crates/worker/src/sqs_clients/wrapper.rs similarity index 99% rename from crates/worker/src/sqs_client/wrapper.rs rename to crates/worker/src/sqs_clients/wrapper.rs index 557d3335..ec054711 100644 --- a/crates/worker/src/sqs_client/wrapper.rs +++ b/crates/worker/src/sqs_clients/wrapper.rs @@ -1,5 +1,5 @@ use crate::errors::{SqsDeleteError, SqsReceiveError}; -use crate::sqs_client::SqsClient; +use crate::sqs_clients::client::SqsClient; use aws_sdk_sqs::operation::delete_message::DeleteMessageOutput; use aws_sdk_sqs::operation::receive_message::ReceiveMessageOutput; use aws_sdk_sqs::Client; diff --git a/crates/worker/src/sqs_listener.rs b/crates/worker/src/sqs_listener.rs index c88e7308..c8f5b151 100644 --- a/crates/worker/src/sqs_listener.rs +++ b/crates/worker/src/sqs_listener.rs @@ -5,7 +5,7 @@ use std::time::Duration; use tokio::task::JoinHandle; use tokio::time::sleep; -use crate::sqs_client::wrapper::SqsClientWrapper; +use crate::sqs_clients::wrapper::SqsClientWrapper; pub struct SqsListener { handle: JoinHandle>, diff --git a/crates/worker/src/worker.rs b/crates/worker/src/worker.rs index bc4952d2..4146c983 100644 --- a/crates/worker/src/worker.rs +++ b/crates/worker/src/worker.rs @@ -8,7 +8,7 @@ use crate::commands::compile::compile; use crate::dynamodb_client::DynamoDBClient; use crate::purgatory::{Purgatory, State}; use crate::s3_client::S3Client; -use crate::sqs_client::wrapper::SqsClientWrapper; +use crate::sqs_clients::wrapper::SqsClientWrapper; use crate::sqs_listener::{SqsListener, SqsReceiver}; pub struct EngineBuilder {