diff --git a/Cargo.lock b/Cargo.lock index ebc7608d..38e33333 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1496,6 +1496,7 @@ dependencies = [ "sp-io 38.0.0", "subxt", "tokio", + "tracing", "url 2.5.2 (git+https://github.com/domenukk/rust-url?branch=no_std)", "uuid 1.10.0", ] diff --git a/blueprint-manager/src/executor/event_handler.rs b/blueprint-manager/src/executor/event_handler.rs index cd9d984b..5c2736ab 100644 --- a/blueprint-manager/src/executor/event_handler.rs +++ b/blueprint-manager/src/executor/event_handler.rs @@ -9,7 +9,7 @@ use gadget_io::GadgetConfig; use gadget_sdk::clients::tangle::runtime::{TangleConfig, TangleEvent}; use gadget_sdk::clients::tangle::services::{RpcServicesWithBlueprint, ServicesClient}; use gadget_sdk::config::Protocol; -use gadget_sdk::logger::Logger; +use gadget_sdk::{error, info, trace, warn}; use std::fmt::Debug; use std::sync::atomic::Ordering; use tangle_subxt::subxt::utils::AccountId32; @@ -40,7 +40,6 @@ pub async fn handle_services<'a>( gadget_config: &GadgetConfig, blueprint_manager_opts: &BlueprintManagerConfig, active_gadgets: &mut ActiveGadgets, - logger: &Logger, ) -> color_eyre::Result<()> { for blueprint in blueprints { if let Err(err) = crate::sources::handle( @@ -48,11 +47,10 @@ pub async fn handle_services<'a>( gadget_config, blueprint_manager_opts, active_gadgets, - logger, ) .await { - logger.error(err) + error!("{err}"); } } @@ -68,7 +66,6 @@ pub struct EventPollResult { pub(crate) async fn check_blueprint_events( event: &TangleEvent, - logger: &Logger, active_gadgets: &mut ActiveGadgets, account_id: &AccountId32, ) -> EventPollResult { @@ -86,11 +83,11 @@ pub(crate) async fn check_blueprint_events( Ok(evt) => { if &evt.operator == account_id { result.blueprint_registrations.push(evt.blueprint_id); - logger.info(format!("Pre-registered event: {evt:?}")); + info!("Pre-registered event: {evt:?}"); } } Err(err) => { - logger.warn(format!("Error handling pre-registered event: {err:?}")); + warn!("Error handling pre-registered event: {err:?}"); } } } @@ -99,11 +96,11 @@ pub(crate) async fn check_blueprint_events( for evt in registered_events { match evt { Ok(evt) => { - logger.info(format!("Registered event: {evt:?}")); + info!("Registered event: {evt:?}"); result.needs_update = true; } Err(err) => { - logger.warn(format!("Error handling registered event: {err:?}")); + warn!("Error handling registered event: {err:?}"); } } } @@ -112,19 +109,16 @@ pub(crate) async fn check_blueprint_events( for evt in unregistered_events { match evt { Ok(evt) => { - logger.info(format!("Unregistered event: {evt:?}")); + info!("Unregistered event: {evt:?}"); if &evt.operator == account_id && active_gadgets.remove(&evt.blueprint_id).is_some() { - logger.info(format!( - "Removed services for blueprint_id: {}", - evt.blueprint_id, - )); + info!("Removed services for blueprint_id: {}", evt.blueprint_id,); result.needs_update = true; } } Err(err) => { - logger.warn(format!("Error handling unregistered event: {err:?}")); + warn!("Error handling unregistered event: {err:?}"); } } } @@ -133,10 +127,10 @@ pub(crate) async fn check_blueprint_events( for evt in service_initiated_events { match evt { Ok(evt) => { - logger.info(format!("Service initiated event: {evt:?}")); + info!("Service initiated event: {evt:?}"); } Err(err) => { - logger.warn(format!("Error handling service initiated event: {err:?}")); + warn!("Error handling service initiated event: {err:?}"); } } } @@ -145,10 +139,10 @@ pub(crate) async fn check_blueprint_events( for evt in job_called_events { match evt { Ok(evt) => { - logger.info(format!("Job called event: {evt:?}")); + info!("Job called event: {evt:?}"); } Err(err) => { - logger.warn(format!("Error handling job called event: {err:?}")); + warn!("Error handling job called event: {err:?}"); } } } @@ -157,12 +151,10 @@ pub(crate) async fn check_blueprint_events( for evt in job_result_submitted_events { match evt { Ok(evt) => { - logger.info(format!("Job result submitted event: {evt:?}")); + info!("Job result submitted event: {evt:?}"); } Err(err) => { - logger.warn(format!( - "Error handling job result submitted event: {err:?}" - )); + warn!("Error handling job result submitted event: {err:?}"); } } } @@ -174,16 +166,15 @@ pub(crate) async fn check_blueprint_events( pub(crate) async fn handle_tangle_event( event: &TangleEvent, blueprints: &[RpcServicesWithBlueprint], - logger: &Logger, gadget_config: &GadgetConfig, gadget_manager_opts: &BlueprintManagerConfig, active_gadgets: &mut ActiveGadgets, poll_result: EventPollResult, client: &ServicesClient, ) -> color_eyre::Result<()> { - logger.trace(format!("Received notification {}", event.number)); + info!("Received notification {}", event.number); const DEFAULT_PROTOCOL: Protocol = Protocol::Tangle; - logger.warn("Using Tangle protocol as default over Eigen. This is a temporary development workaround. You can alter this behavior here"); + warn!("Using Tangle protocol as default over Eigen. This is a temporary development workaround. You can alter this behavior here"); let mut registration_blueprints = vec![]; // First, check to see if we need to register any new services invoked by the PreRegistration event @@ -232,7 +223,6 @@ pub(crate) async fn handle_tangle_event( let fetcher = GithubBinaryFetcher { fetcher: gh.clone(), blueprint_id: blueprint.blueprint_id, - logger, gadget_name: blueprint.name.clone(), }; @@ -242,14 +232,13 @@ pub(crate) async fn handle_tangle_event( GadgetSourceFetcher::Testing(test) => { // TODO: demote to TRACE once proven to work if !gadget_manager_opts.test_mode { - logger.warn("Ignoring testing fetcher as we are not in test mode"); + warn!("Ignoring testing fetcher as we are not in test mode"); continue; } let fetcher = crate::sources::testing::TestSourceFetcher { fetcher: test.clone(), blueprint_id: blueprint.blueprint_id, - logger, gadget_name: blueprint.name.clone(), }; @@ -258,7 +247,7 @@ pub(crate) async fn handle_tangle_event( } _ => { - logger.warn("Blueprint does not contain a supported fetcher"); + warn!("Blueprint does not contain a supported fetcher"); continue; } } @@ -268,19 +257,16 @@ pub(crate) async fn handle_tangle_event( // Ensure that we have at least one fetcher if fetcher_candidates.is_empty() { - logger.warn(format!( - "No fetchers found for blueprint: {}", - blueprint.name, - )); + warn!("No fetchers found for blueprint: {}", blueprint.name,); continue; } // Ensure that we have a test fetcher if we are in test mode if gadget_manager_opts.test_mode && test_fetcher_idx.is_none() { - logger.warn(format!( + warn!( "No testing fetcher found for blueprint `{}` despite operating in TEST MODE", blueprint.name, - )); + ); continue; } @@ -292,10 +278,10 @@ pub(crate) async fn handle_tangle_event( // Ensure there is only a single candidate fetcher if fetcher_candidates.len() != 1 { - logger.warn(format!( + warn!( "Multiple fetchers found for blueprint: {}. Invalidating blueprint", blueprint.name, - )); + ); continue; } @@ -306,18 +292,17 @@ pub(crate) async fn handle_tangle_event( verified_blueprints.push(verified_blueprint); } else { - logger - .warn("Blueprint does not contain a native gadget and thus currently unsupported"); + warn!("Blueprint does not contain a native gadget and thus currently unsupported"); } } - logger.trace(format!( + trace!( "OnChain Verified Blueprints: {:?}", verified_blueprints .iter() .map(|r| format!("{r:?}")) .collect::>() - )); + ); // Step 3: Check to see if we need to start any new services handle_services( @@ -325,7 +310,6 @@ pub(crate) async fn handle_tangle_event( gadget_config, gadget_manager_opts, active_gadgets, - logger, ) .await?; @@ -335,9 +319,9 @@ pub(crate) async fn handle_tangle_event( // Loop through every (blueprint_id, service_id) running. See if the service is still on-chain. If not, kill it and add it to to_remove for (blueprint_id, process_handles) in &mut *active_gadgets { for service_id in process_handles.keys() { - logger.info(format!( + info!( "Checking service for on-chain termination: bid={blueprint_id}//sid={service_id}" - )); + ); // Since the below "verified blueprints" were freshly obtained from an on-chain source, // we compare all these fresh values to see if we're running a service locally that is no longer on-chain @@ -346,9 +330,7 @@ pub(crate) async fn handle_tangle_event( // Safe assertion since we know there is at least one fetcher. All fetchers should have the same blueprint id let fetcher = &verified_blueprints.fetcher; if fetcher.blueprint_id() == *blueprint_id && !services.contains(service_id) { - logger.warn(format!( - "Killing service that is no longer on-chain: bid={blueprint_id}//sid={service_id}", - )); + warn!("Killing service that is no longer on-chain: bid={blueprint_id}//sid={service_id}"); to_remove.push((*blueprint_id, *service_id)); } } @@ -362,28 +344,22 @@ pub(crate) async fn handle_tangle_event( && !process_handle.0.load(Ordering::Relaxed) { // By removing any killed processes, we will auto-restart them on the next finality notification if required - logger.warn("Killing service that has died to allow for auto-restart"); + warn!("Killing service that has died to allow for auto-restart"); to_remove.push((*blueprint_id, *service_id)); } } } for (blueprint_id, service_id) in to_remove { - logger.warn(format!( - "Removing service that is no longer active on-chain or killed: bid={blueprint_id}//sid={service_id}", - )); + warn!("Removing service that is no longer active on-chain or killed: bid={blueprint_id}//sid={service_id}"); let mut should_delete_blueprint = false; if let Some(gadgets) = active_gadgets.get_mut(&blueprint_id) { if let Some((_, mut process_handle)) = gadgets.remove(&service_id) { if let Some(abort_handle) = process_handle.take() { if abort_handle.send(()).is_err() { - logger.error(format!( - "Failed to send abort signal to service: bid={blueprint_id}//sid={service_id}", - )); + error!("Failed to send abort signal to service: bid={blueprint_id}//sid={service_id}"); } else { - logger.warn(format!( - "Sent abort signal to service: bid={blueprint_id}//sid={service_id}", - )); + warn!("Sent abort signal to service: bid={blueprint_id}//sid={service_id}"); } } } diff --git a/blueprint-manager/src/executor/mod.rs b/blueprint-manager/src/executor/mod.rs index 831b9dbe..e9ba009a 100644 --- a/blueprint-manager/src/executor/mod.rs +++ b/blueprint-manager/src/executor/mod.rs @@ -9,10 +9,10 @@ use gadget_io::GadgetConfig; use gadget_sdk::clients::tangle::runtime::{TangleConfig, TangleRuntimeClient}; use gadget_sdk::clients::tangle::services::{RpcServicesWithBlueprint, ServicesClient}; use gadget_sdk::clients::Client; +use gadget_sdk::info; use gadget_sdk::keystore::backend::fs::FilesystemKeystore; use gadget_sdk::keystore::backend::GenericKeyStore; use gadget_sdk::keystore::{sp_core_subxt, BackendExt, TanglePairSigner}; -use gadget_sdk::logger::Logger; use sp_core::H256; use std::collections::HashMap; use std::future::Future; @@ -44,7 +44,7 @@ pub struct BlueprintManagerHandle { shutdown_call: Option>, start_tx: Option>, running_task: JoinHandle>, - logger: Logger, + span: tracing::Span, sr25519_id: TanglePairSigner, ecdsa_id: gadget_sdk::keystore::TanglePairSigner, keystore_uri: String, @@ -53,10 +53,11 @@ pub struct BlueprintManagerHandle { impl BlueprintManagerHandle { /// Send a start signal to the blueprint manager pub fn start(&mut self) -> color_eyre::Result<()> { + let _span = self.span.enter(); match self.start_tx.take() { Some(tx) => match tx.send(()) { Ok(_) => { - self.logger.info("Start signal sent to Blueprint Manager"); + info!("Start signal sent to Blueprint Manager"); Ok(()) } Err(_) => Err(Report::msg( @@ -86,15 +87,14 @@ impl BlueprintManagerHandle { .map_err(|_| Report::msg("Failed to send shutdown signal to Blueprint Manager")) } - /// Returns the logger for this blueprint manager - pub fn logger(&self) -> &Logger { - &self.logger - } - /// Returns the keystore URI for this blueprint manager pub fn keystore_uri(&self) -> &str { &self.keystore_uri } + + pub fn span(&self) -> &tracing::Span { + &self.span + } } /// Add default behavior for unintentional dropping of the BlueprintManagerHandle @@ -142,11 +142,10 @@ pub async fn run_blueprint_manager>( "Local" }; - let logger = Logger::from(format!("Blueprint-Manager-{}", logger_id)); + let span = tracing::info_span!("Blueprint-Manager-{}", logger_id); - logger.info("Starting blueprint manager ... waiting for start signal ..."); - - let logger_clone = logger.clone(); + let _span = span.enter(); + info!("Starting blueprint manager ... waiting for start signal ..."); let (tangle_key, ecdsa_key) = { let keystore = GenericKeyStore::::Fs(FilesystemKeystore::open( @@ -161,12 +160,11 @@ pub async fn run_blueprint_manager>( let tangle_client = TangleRuntimeClient::from_url(gadget_config.url.as_str(), sub_account_id.clone()).await?; - let services_client = ServicesClient::new(logger.clone(), tangle_client.client()); + let services_client = ServicesClient::new(tangle_client.client()); let mut active_gadgets = HashMap::new(); let keystore_uri = gadget_config.keystore_uri.clone(); - let logger_manager = logger.clone(); let manager_task = async move { // With the basics setup, we must now implement the main logic of the Blueprint Manager // Handle initialization logic @@ -175,7 +173,6 @@ pub async fn run_blueprint_manager>( let mut operator_subscribed_blueprints = handle_init( &tangle_client, &services_client, - &logger_manager, &sub_account_id, &mut active_gadgets, &gadget_config, @@ -188,7 +185,6 @@ pub async fn run_blueprint_manager>( while let Some(event) = tangle_client.next_event().await { let result = event_handler::check_blueprint_events( &event, - &logger_manager, &mut active_gadgets, &sub_account_id.clone(), ) @@ -204,7 +200,6 @@ pub async fn run_blueprint_manager>( event_handler::handle_tangle_event( &event, &operator_subscribed_blueprints, - &logger_manager, &gadget_config, &blueprint_manager_config, &mut active_gadgets, @@ -219,15 +214,14 @@ pub async fn run_blueprint_manager>( let (tx_stop, rx_stop) = tokio::sync::oneshot::channel::<()>(); - let logger = logger.clone(); let shutdown_task = async move { tokio::select! { _res0 = shutdown_cmd => { - logger.info("Shutdown-1 command received, closing application"); + info!("Shutdown-1 command received, closing application"); }, _res1 = rx_stop => { - logger.info("Manual shutdown signal received, closing application"); + info!("Manual shutdown signal received, closing application"); } } }; @@ -250,13 +244,14 @@ pub async fn run_blueprint_manager>( } }; + drop(_span); let handle = tokio::spawn(combined_task); let handle = BlueprintManagerHandle { start_tx: Some(start_tx), shutdown_call: Some(tx_stop), running_task: handle, - logger: logger_clone, + span, sr25519_id: tangle_key, ecdsa_id: ecdsa_key, keystore_uri, @@ -272,13 +267,13 @@ pub async fn run_blueprint_manager>( async fn handle_init( tangle_runtime: &TangleRuntimeClient, services_client: &ServicesClient, - logger: &Logger, sub_account_id: &AccountId32, active_gadgets: &mut ActiveGadgets, gadget_config: &GadgetConfig, blueprint_manager_config: &BlueprintManagerConfig, ) -> color_eyre::Result> { - logger.info("Beginning initialization of Blueprint Manager"); + info!("Beginning initialization of Blueprint Manager"); + let (operator_subscribed_blueprints, init_event) = if let Some(event) = tangle_runtime.next_event().await { ( @@ -291,20 +286,18 @@ async fn handle_init( return Err(Report::msg("Failed to get initial block hash")); }; - logger.info(format!( + info!( "Received {} initial blueprints this operator is registered to", operator_subscribed_blueprints.len() - )); + ); // Immediately poll, handling the initial state let poll_result = - event_handler::check_blueprint_events(&init_event, logger, active_gadgets, sub_account_id) - .await; + event_handler::check_blueprint_events(&init_event, active_gadgets, sub_account_id).await; event_handler::handle_tangle_event( &init_event, &operator_subscribed_blueprints, - logger, gadget_config, blueprint_manager_config, active_gadgets, diff --git a/blueprint-manager/src/sdk/setup.rs b/blueprint-manager/src/sdk/setup.rs index abcb3a16..263a28ff 100644 --- a/blueprint-manager/src/sdk/setup.rs +++ b/blueprint-manager/src/sdk/setup.rs @@ -3,7 +3,6 @@ use std::time::Duration; use gadget_io::tokio::task::JoinHandle; use gadget_sdk::clients::tangle::runtime::TangleRuntimeClient; -use gadget_sdk::logger::Logger; use gadget_sdk::network::Network; use gadget_sdk::prometheus::PrometheusConfig; use gadget_sdk::store::{ECDSAKeyStore, KeyValueStoreBackend}; @@ -12,6 +11,7 @@ use sp_core::{keccak_256, sr25519, Pair}; use crate::sdk::config::SingleGadgetConfig; pub use gadget_io::KeystoreContainer; use gadget_io::SubstrateKeystore; +use gadget_sdk::debug; use gadget_sdk::network::gossip::GossipHandle; use gadget_sdk::network::setup::NetworkConfig; use itertools::Itertools; @@ -42,7 +42,6 @@ pub async fn generate_node_input( let keystore_config = KeystoreContainer::new(&config.keystore)?; let (ecdsa_key, acco_key) = (keystore_config.ecdsa_key()?, keystore_config.sr25519_key()?); //let network_key = ed25519::Pair::from_seed(&config.node_key).to_raw_vec(); - let logger = Logger::default(); let keystore = ECDSAKeyStore::new(config.keystore_backend.clone(), ecdsa_key.clone()); // Use the first 32 bytes of the sr25519 account key as the network key. We discard the 32 remaining nonce seed bytes @@ -69,15 +68,14 @@ pub async fn generate_node_input( config.bind_ip, config.bind_port, network_ids.clone(), - logger.clone(), ); let (networks, network_task) = gadget_sdk::network::setup::multiplexed_libp2p_network(libp2p_config) .map_err(|e| color_eyre::eyre::eyre!("Failed to setup network: {e}"))?; - logger.debug("Successfully initialized network, now waiting for bootnodes to connect ..."); - wait_for_connection_to_bootnodes(&config.bootnodes, &networks, &logger).await?; + debug!("Successfully initialized network, now waiting for bootnodes to connect ..."); + wait_for_connection_to_bootnodes(&config.bootnodes, &networks).await?; let client = TangleRuntimeClient::from_url(&config.bind_ip.to_string(), acco_key.public().0.into()) @@ -107,34 +105,31 @@ pub async fn generate_node_input( pub async fn wait_for_connection_to_bootnodes( bootnodes: &[Multiaddr], handles: &HashMap, - logger: &Logger, ) -> color_eyre::Result<()> { let n_required = bootnodes.len(); let n_networks = handles.len(); - logger.debug(format!( - "Waiting for {n_required} peers to show up across {n_networks} networks" - )); + debug!("Waiting for {n_required} peers to show up across {n_networks} networks"); let mut tasks = gadget_io::tokio::task::JoinSet::new(); // For each network, we start a task that checks if we have enough peers connected // and then we wait for all of them to finish. - let wait_for_peers = |handle: GossipHandle, n_required, logger: Logger| async move { + let wait_for_peers = |handle: GossipHandle, n_required| async move { 'inner: loop { let n_connected = handle.connected_peers(); if n_connected >= n_required { break 'inner; } let topic = handle.topic(); - logger.debug(format!("`{topic}`: We currently have {n_connected}/{n_required} peers connected to network")); + debug!("`{topic}`: We currently have {n_connected}/{n_required} peers connected to network"); gadget_io::tokio::time::sleep(Duration::from_millis(1000)).await; } }; for handle in handles.values() { - tasks.spawn(wait_for_peers(handle.clone(), n_required, logger.clone())); + tasks.spawn(wait_for_peers(handle.clone(), n_required)); } // Wait for all tasks to finish while tasks.join_next().await.is_some() {} diff --git a/blueprint-manager/src/sdk/utils.rs b/blueprint-manager/src/sdk/utils.rs index ee2da44b..4df5e6de 100644 --- a/blueprint-manager/src/sdk/utils.rs +++ b/blueprint-manager/src/sdk/utils.rs @@ -2,7 +2,7 @@ use crate::config::BlueprintManagerConfig; use crate::protocols::resolver::NativeGithubMetadata; use gadget_io::GadgetConfig; use gadget_sdk::config::Protocol; -use gadget_sdk::logger::Logger; +use gadget_sdk::{info, warn}; use sha2::Digest; use std::path::Path; use std::string::FromUtf8Error; @@ -161,19 +161,17 @@ pub fn is_windows() -> bool { pub fn generate_running_process_status_handle( process: gadget_io::tokio::process::Child, - logger: &Logger, service_name: &str, ) -> (Arc, gadget_io::tokio::sync::oneshot::Sender<()>) { let (stop_tx, stop_rx) = gadget_io::tokio::sync::oneshot::channel::<()>(); let status = Arc::new(AtomicBool::new(true)); let status_clone = status.clone(); - let logger = logger.clone(); let service_name = service_name.to_string(); let task = async move { - logger.info(format!("Starting process execution for {service_name}")); + info!("Starting process execution for {service_name}"); let output = process.wait_with_output().await; - logger.warn(format!("Process for {service_name} exited: {output:?}")); + warn!("Process for {service_name} exited: {output:?}"); status_clone.store(false, Ordering::Relaxed); }; diff --git a/blueprint-manager/src/sources/github.rs b/blueprint-manager/src/sources/github.rs index 8d63708b..0e3fa7b9 100644 --- a/blueprint-manager/src/sources/github.rs +++ b/blueprint-manager/src/sources/github.rs @@ -6,20 +6,19 @@ use crate::sdk::utils::{ use crate::sources::BinarySourceFetcher; use async_trait::async_trait; use color_eyre::eyre::OptionExt; -use gadget_sdk::logger::Logger; +use gadget_sdk::{error, info}; use std::path::PathBuf; use tangle_subxt::tangle_testnet_runtime::api::runtime_types::tangle_primitives::services::GithubFetcher; use tokio::io::AsyncWriteExt; -pub struct GithubBinaryFetcher<'a> { +pub struct GithubBinaryFetcher { pub fetcher: GithubFetcher, pub blueprint_id: u64, - pub logger: &'a Logger, pub gadget_name: String, } #[async_trait] -impl BinarySourceFetcher for GithubBinaryFetcher<'_> { +impl BinarySourceFetcher for GithubBinaryFetcher { async fn get_binary(&self) -> color_eyre::Result { let relevant_binary = get_gadget_binary(&self.fetcher.binaries.0) .ok_or_eyre("Unable to find matching binary")?; @@ -32,8 +31,7 @@ impl BinarySourceFetcher for GithubBinaryFetcher<'_> { binary_download_path += ".exe" } - self.logger - .info(format!("Downloading to {binary_download_path}")); + info!("Downloading to {binary_download_path}"); // Check if the binary exists, if not download it let retrieved_hash = if !valid_file_exists(&binary_download_path, &expected_hash).await { @@ -58,10 +56,10 @@ impl BinarySourceFetcher for GithubBinaryFetcher<'_> { if let Some(retrieved_hash) = retrieved_hash { if retrieved_hash.trim() != expected_hash.trim() { - self.logger.error(format!( + error!( "Binary hash {} mismatched expected hash of {} for protocol: {}", retrieved_hash, expected_hash, self.gadget_name - )); + ); return Ok(PathBuf::from(binary_download_path)); } } diff --git a/blueprint-manager/src/sources/mod.rs b/blueprint-manager/src/sources/mod.rs index 9ad8d952..6dd2ae7f 100644 --- a/blueprint-manager/src/sources/mod.rs +++ b/blueprint-manager/src/sources/mod.rs @@ -6,7 +6,7 @@ use crate::sdk::utils::{ }; use async_trait::async_trait; use gadget_io::GadgetConfig; -use gadget_sdk::logger::Logger; +use gadget_sdk::{error, info, warn}; use std::path::PathBuf; pub mod github; @@ -25,7 +25,6 @@ pub async fn handle<'a>( gadget_config: &GadgetConfig, blueprint_manager_opts: &BlueprintManagerConfig, active_gadgets: &mut ActiveGadgets, - logger: &Logger, ) -> color_eyre::Result<()> { let blueprint_source = &blueprint.fetcher; let blueprint = &blueprint.blueprint; @@ -42,7 +41,7 @@ pub async fn handle<'a>( binary_download_path.set_extension("exe"); } } else if let Err(err) = chmod_x_file(&binary_download_path).await { - logger.warn(format!("Failed to chmod +x the binary: {err}")); + warn!("Failed to chmod +x the binary: {err}"); } for service_id in &blueprint.services { @@ -74,9 +73,7 @@ pub async fn handle<'a>( env_vars.push(("REGISTRATION_MODE_ON".to_string(), "true".to_string())); } - logger.info(format!( - "Starting protocol: {sub_service_str} with args: {arguments:?}" - )); + info!("Starting protocol: {sub_service_str} with args: {arguments:?}"); // Now that the file is loaded, spawn the process let process_handle = tokio::process::Command::new(&binary_download_path) @@ -93,22 +90,19 @@ pub async fn handle<'a>( // We must wait for the process to exit successfully let status = process_handle.wait_with_output().await?; if !status.status.success() { - logger.error(format!( + error!( "Protocol (registration mode) {sub_service_str} failed to execute: {status:?}" - )); + ); } else { - logger.info(format!( + info!( "***Protocol (registration mode) {sub_service_str} executed successfully***" - )); + ); } } else { // A normal running gadget binary. Store the process handle and let the event loop handle the rest - let (status_handle, abort) = generate_running_process_status_handle( - process_handle, - logger, - &sub_service_str, - ); + let (status_handle, abort) = + generate_running_process_status_handle(process_handle, &sub_service_str); active_gadgets .entry(blueprint_id) diff --git a/blueprint-manager/src/sources/testing.rs b/blueprint-manager/src/sources/testing.rs index 81326409..170ea484 100644 --- a/blueprint-manager/src/sources/testing.rs +++ b/blueprint-manager/src/sources/testing.rs @@ -1,19 +1,18 @@ use crate::sources::BinarySourceFetcher; use async_trait::async_trait; use color_eyre::Report; -use gadget_sdk::logger::Logger; +use gadget_sdk::{info, trace}; use std::path::PathBuf; use tangle_subxt::tangle_testnet_runtime::api::runtime_types::tangle_primitives::services::TestFetcher; -pub struct TestSourceFetcher<'a> { +pub struct TestSourceFetcher { pub fetcher: TestFetcher, pub blueprint_id: u64, - pub logger: &'a Logger, pub gadget_name: String, } #[async_trait] -impl BinarySourceFetcher for TestSourceFetcher<'_> { +impl BinarySourceFetcher for TestSourceFetcher { async fn get_binary(&self) -> color_eyre::Result { // Step 1: Build the binary. It will be stored in the root directory/bin/ let TestFetcher { @@ -31,11 +30,9 @@ impl BinarySourceFetcher for TestSourceFetcher<'_> { let binary_path = git_repo_root.join(&base_path).join("bin").join(&cargo_bin); let binary_path = std::path::absolute(&binary_path)?; - self.logger - .trace(format!("Base Path: {}", base_path.display())); - self.logger - .trace(format!("Binary Path: {}", binary_path.display())); - self.logger.info("Building binary..."); + trace!("Base Path: {}", base_path.display()); + trace!("Binary Path: {}", binary_path.display()); + info!("Building binary..."); let env = std::env::vars().collect::>(); @@ -73,10 +70,7 @@ impl BinarySourceFetcher for TestSourceFetcher<'_> { ))); } - self.logger.info(format!( - "Successfully built binary to {}", - binary_path.display() - )); + info!("Successfully built binary to {}", binary_path.display()); Ok(binary_path) } diff --git a/blueprint-test-utils/Cargo.toml b/blueprint-test-utils/Cargo.toml index fad16f3a..dc8b8c6f 100644 --- a/blueprint-test-utils/Cargo.toml +++ b/blueprint-test-utils/Cargo.toml @@ -26,6 +26,7 @@ gadget-sdk = { workspace = true } uuid = { workspace = true, features = ["v4"] } subxt = { workspace = true, features = ["substrate-compat"] } color-eyre = { workspace = true } +tracing = { workspace = true } url = { workspace = true } cargo-tangle = { workspace = true, default-features = true } diff --git a/blueprint-test-utils/src/lib.rs b/blueprint-test-utils/src/lib.rs index 5b15af03..4e37d41c 100644 --- a/blueprint-test-utils/src/lib.rs +++ b/blueprint-test-utils/src/lib.rs @@ -4,7 +4,6 @@ use blueprint_manager::config::BlueprintManagerConfig; use blueprint_manager::executor::BlueprintManagerHandle; use gadget_io::{GadgetConfig, SupportedChains}; use gadget_sdk::clients::tangle::runtime::{TangleClient}; -use gadget_sdk::logger::Logger; use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api; use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::runtime_types; use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::services::calls::types::call::{Args, Job}; @@ -26,8 +25,9 @@ use subxt::tx::Signer; use subxt::utils::AccountId32; use url::Url; use uuid::Uuid; +use gadget_sdk::{info, error}; -pub use gadget_sdk::logger::setup_log; +pub use gadget_sdk::logging::setup_log; #[allow(unused_imports)] use cargo_tangle::deploy::Opts; @@ -208,9 +208,8 @@ pub async fn create_blueprint( pub async fn join_delegators( client: &TestClient, account_id: &TanglePairSigner, - logger: &Logger, ) -> Result<(), Box> { - logger.info("Joining delegators ..."); + info!("Joining delegators ..."); let call_pre = api::tx() .multi_asset_delegation() .join_operators(1_000_000_000_000_000); @@ -229,11 +228,8 @@ pub async fn register_blueprint( blueprint_id: u64, preferences: Preferences, registration_args: RegistrationArgs, - logger: &Logger, ) -> Result<(), Box> { - logger.info(format!( - "Registering to blueprint {blueprint_id} to become an operator ..." - )); + info!("Registering to blueprint {blueprint_id} to become an operator ..."); let call = api::tx() .services() .register(blueprint_id, preferences, registration_args); @@ -288,17 +284,16 @@ pub async fn wait_for_completion_of_tangle_job( client: &TestClient, service_id: u64, call_id: u64, - logger: &Logger, required_count: usize, ) -> Result> { let mut count = 0; loop { let events = client.events().at_latest().await?; let results = events.find::().collect::>(); - logger.info(format!( + info!( "Waiting for job completion. Found {} results ...", results.len() - )); + ); for result in results { match result { Ok(result) => { @@ -310,7 +305,7 @@ pub async fn wait_for_completion_of_tangle_job( } } Err(err) => { - logger.error(format!("Failed to get job result: {err}")); + error!("Failed to get job result: {err}"); } } } @@ -397,7 +392,7 @@ macro_rules! test_blueprint { run_test_blueprint_manager, ) .await - .execute_with_async(move |client, handles, logger| async move { + .execute_with_async(move |client, handles| async move { let keypair = handles[0].sr25519_id().clone(); let service_id = get_next_service_id(client) .await @@ -406,9 +401,9 @@ macro_rules! test_blueprint { .await .expect("Failed to get next job id"); - logger.info(format!( + info!( "Submitting job with params service ID: {service_id}, call ID: {call_id}" - )); + ); let mut job_args = Args::new(); for input in [$($input),+] { @@ -425,7 +420,7 @@ macro_rules! test_blueprint { .await .expect("Failed to submit job"); - let job_results = wait_for_completion_of_tangle_job(client, service_id, call_id, logger, $N) + let job_results = wait_for_completion_of_tangle_job(client, service_id, call_id, $N) .await .expect("Failed to wait for job completion"); @@ -463,6 +458,7 @@ mod tests_standard { use crate::test_ext::new_test_ext_blueprint_manager; use cargo_tangle::deploy::Opts; use gadget_sdk::logger::setup_log; + use gadget_sdk::{error, info}; /// This test requires that `yarn install` has been executed inside the /// `./blueprints/incredible-squaring/` directory @@ -493,7 +489,7 @@ mod tests_standard { new_test_ext_blueprint_manager::<5, 1, (), _, _>((), opts, run_test_blueprint_manager) .await - .execute_with_async(move |client, handles, logger| async move { + .execute_with_async(move |client, handles| async move { // At this point, blueprint has been deployed, every node has registered // as an operator for the relevant services, and, all gadgets are running @@ -510,9 +506,7 @@ mod tests_standard { .expect("Failed to get next job id") .saturating_sub(1); - logger.info(format!( - "Submitting job with params service ID: {service_id}, call ID: {call_id}" - )); + info!("Submitting job with params service ID: {service_id}, call ID: {call_id}"); // Pass the arguments let mut job_args = Args::new(); @@ -530,20 +524,15 @@ mod tests_standard { ) .await { - logger.error(format!("Failed to submit job: {err}")); + error!("Failed to submit job: {err}"); panic!("Failed to submit job: {err}"); } // Step 2: wait for the job to complete - let job_results = wait_for_completion_of_tangle_job( - client, - service_id, - call_id, - handles[0].logger(), - handles.len(), - ) - .await - .expect("Failed to wait for job completion"); + let job_results = + wait_for_completion_of_tangle_job(client, service_id, call_id, handles.len()) + .await + .expect("Failed to wait for job completion"); // Step 3: Get the job results, compare to expected value(s) let expected_result = diff --git a/blueprint-test-utils/src/test_ext.rs b/blueprint-test-utils/src/test_ext.rs index 2af6de7c..d66202ba 100644 --- a/blueprint-test-utils/src/test_ext.rs +++ b/blueprint-test-utils/src/test_ext.rs @@ -20,7 +20,6 @@ use blueprint_manager::executor::BlueprintManagerHandle; use blueprint_manager::sdk::entry::SendFuture; use cargo_tangle::deploy::Opts; use gadget_sdk::clients::tangle::runtime::TangleClient; -use gadget_sdk::logger::Logger; use gadget_sdk::tangle_subxt::subxt::OnlineClient; use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::runtime_types; use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::runtime_types::tangle_primitives::services::{ApprovalPrefrence, PriceTargets}; @@ -37,6 +36,8 @@ use std::path::PathBuf; use subxt::tx::Signer; use gadget_sdk::keystore::KeystoreUriSanitizer; use gadget_sdk::keystore::sp_core_subxt::Pair; +use tracing::Instrument; +use gadget_sdk::{error, info, warn}; const LOCAL_BIND_ADDR: &str = "127.0.0.1"; const LOCAL_TANGLE_NODE: &str = "ws://127.0.0.1:9944"; @@ -62,7 +63,8 @@ pub async fn new_test_ext_blueprint_manager< assert!(N > 0, "At least one node is required"); assert!(N <= NAME_IDS.len(), "Only up to 5 nodes are supported"); - let int_logger = Logger::from("Integration-Test"); + let span = tracing::info_span!("Integration-Test"); + let _span = span.enter(); let bind_addrs = (0..N) .map(|_| find_open_tcp_bind_port()) @@ -113,10 +115,9 @@ pub async fn new_test_ext_blueprint_manager< let tg_addr = handle.sr25519_id().account_id(); let evm_addr = handle.ecdsa_id().account_id(); - - int_logger.info(format!("Signer TG address: {tg_addr}")); - int_logger.info(format!("Signer EVM address: {evm_addr}")); - int_logger.info(format!("Signer EVM(alloy) address: {}", priv_key.address())); + info!("Signer TG address: {tg_addr}"); + info!("Signer EVM address: {evm_addr}"); + info!("Signer EVM(alloy) address: {}", priv_key.address()); if node_index == 0 { // Replace the None signer and signer_evm values inside opts with Alice's keys @@ -131,7 +132,7 @@ pub async fn new_test_ext_blueprint_manager< let blueprint_id = match cargo_tangle::deploy::deploy_to_tangle(opts).await { Ok(id) => id, Err(err) => { - int_logger.error(format!("Failed to deploy blueprint: {err}")); + error!("Failed to deploy blueprint: {err}"); panic!("Failed to deploy blueprint: {err}"); } }; @@ -153,7 +154,6 @@ pub async fn new_test_ext_blueprint_manager< let task = async move { let keypair = handle.sr25519_id().clone(); - let logger = handle.logger(); let key = runtime_types::sp_core::ecdsa::Public(handle.ecdsa_id().signer().public().0); let preferences = Preferences { @@ -168,16 +168,14 @@ pub async fn new_test_ext_blueprint_manager< }, }; - if let Err(err) = super::join_delegators(&client, &keypair, logger).await { + if let Err(err) = super::join_delegators(&client, &keypair).await { + let _span = handle.span().enter(); + let err_str = format!("{err}"); if err_str.contains("MultiAssetDelegation::AlreadyOperator") { - handle - .logger() - .warn(format!("{} is already an operator", keypair.account_id())); + warn!("{} is already an operator", keypair.account_id()); } else { - handle - .logger() - .error(format!("Failed to join delegators: {err}")); + error!("Failed to join delegators: {err}"); panic!("Failed to join delegators: {err}"); } } @@ -188,13 +186,10 @@ pub async fn new_test_ext_blueprint_manager< blueprint_id, preferences, registration_args.clone(), - handle.logger(), ) .await { - handle - .logger() - .error(format!("Failed to register as operator: {err}")); + error!("Failed to register as operator: {err}"); panic!("Failed to register as operator: {err}"); } @@ -215,13 +210,11 @@ pub async fn new_test_ext_blueprint_manager< .collect(); // Use Alice's account to register the service - int_logger.info(format!( - "Registering service for blueprint ID {blueprint_id} using Alice's keys ..." - )); + info!("Registering service for blueprint ID {blueprint_id} using Alice's keys ..."); if let Err(err) = super::register_service(&client, handles[0].sr25519_id(), blueprint_id, all_nodes).await { - int_logger.error(format!("Failed to register service: {err}")); + error!("Failed to register service: {err}"); panic!("Failed to register service: {err}"); } @@ -232,19 +225,21 @@ pub async fn new_test_ext_blueprint_manager< handle.start().expect("Failed to start blueprint manager"); } - int_logger.info("Waiting for all nodes to be online ..."); + info!("Waiting for all nodes to be online ..."); let all_paths = handles .iter() .map(|r| r.keystore_uri().to_string()) .map(PathBuf::from) .collect::>(); - wait_for_test_ready(all_paths, &int_logger).await; - int_logger.info("All nodes are online"); + wait_for_test_ready(all_paths).await; + info!("All nodes are online"); + + drop(_span); LocalhostTestExt { client, handles, - logger: int_logger, + span, } } @@ -260,45 +255,48 @@ fn find_open_tcp_bind_port() -> u16 { pub struct LocalhostTestExt { client: TangleClient, handles: Vec, - logger: Logger, + span: tracing::Span, } impl LocalhostTestExt { /// An identity function (For future reverse-compatible changes) pub fn execute_with< - T: FnOnce(&TangleClient, &Vec, &Logger) -> R + Send + 'static, + T: FnOnce(&TangleClient, &Vec) -> R + Send + 'static, R: Send + 'static, >( &self, function: T, ) -> R { - function(&self.client, &self.handles, &self.logger) + let _span = self.span.enter(); + function(&self.client, &self.handles) } /// An identity function (For future reverse-compatible changes) pub async fn execute_with_async< 'a, 'b: 'a, - T: FnOnce(&'a TangleClient, &'a Vec, &'a Logger) -> R + Send + 'a, + T: FnOnce(&'a TangleClient, &'a Vec) -> R + Send + 'a, R: Future + Send + 'a, Out: Send + 'b, >( &'a self, function: T, ) -> Out { - function(&self.client, &self.handles, &self.logger).await + function(&self.client, &self.handles) + .instrument(self.span.clone()) + .await } } /// `base_paths`: All the paths pointing to the keystore for each node /// This function returns when every test_started.tmp file exists -async fn wait_for_test_ready(base_paths: Vec, logger: &Logger) { +async fn wait_for_test_ready(base_paths: Vec) { let paths = base_paths .into_iter() .map(|r| r.join("test_started.tmp")) .map(|r| r.sanitize_file_path()) .collect::>(); - logger.info(format!("Waiting for these paths to exist: {paths:?}")); + info!("Waiting for these paths to exist: {paths:?}"); loop { let mut ready_count = 0; for path in &paths { @@ -311,10 +309,10 @@ async fn wait_for_test_ready(base_paths: Vec, logger: &Logger) { break; } - logger.info(format!( + info!( "Not all operators are ready yet ({ready_count}/{}). Waiting ...", paths.len() - )); + ); tokio::time::sleep(Duration::from_secs(3)).await; } } diff --git a/blueprints/ecdsa-threshold-mpc/src/jobs/keygen.rs b/blueprints/ecdsa-threshold-mpc/src/jobs/keygen.rs index 86e02cd9..1426a52c 100644 --- a/blueprints/ecdsa-threshold-mpc/src/jobs/keygen.rs +++ b/blueprints/ecdsa-threshold-mpc/src/jobs/keygen.rs @@ -4,7 +4,6 @@ use cggmp21::{ }; use gadget_sdk::{ self as sdk, - logger::Logger, network::{channels::UserID, IdentifierInfo, ProtocolMessage}, }; use generic_ec::Curve; @@ -102,7 +101,6 @@ pub async fn keygen( t, hd_wallet, rng, - &logger, &job_id_bytes[..], ) .await diff --git a/blueprints/ecdsa-threshold-mpc/src/mpc/keygen.rs b/blueprints/ecdsa-threshold-mpc/src/mpc/keygen.rs index 4c5b7304..0a39dfbe 100644 --- a/blueprints/ecdsa-threshold-mpc/src/mpc/keygen.rs +++ b/blueprints/ecdsa-threshold-mpc/src/mpc/keygen.rs @@ -12,7 +12,6 @@ use cggmp21::KeyShare; use cggmp21::PregeneratedPrimes; use digest::typenum::U32; use digest::Digest; -use gadget_sdk::logger::Logger; use gadget_sdk::network::channels::create_job_manager_to_async_protocol_channel_split_io; use gadget_sdk::network::channels::UserID; use gadget_sdk::network::deserialize; @@ -21,6 +20,7 @@ use gadget_sdk::network::IdentifierInfo; use gadget_sdk::network::Network; use gadget_sdk::network::ProtocolMessage; use gadget_sdk::store::KeyValueStoreBackend; +use gadget_sdk::{debug, trace}; use itertools::Itertools; use rand::rngs::{OsRng, StdRng}; use rand::{CryptoRng, RngCore, SeedableRng}; @@ -45,7 +45,6 @@ pub async fn run_and_serialize_keygen< D, R, >( - logger: &Logger, tracer: &mut PerfProfiler, eid: cggmp21::ExecutionId<'r>, i: u16, @@ -66,7 +65,7 @@ where .hd_wallet(hd_wallet) .start(&mut rng, party) .await?; - logger.debug("Finished AsyncProtocol - Keygen"); + debug!("Finished AsyncProtocol - Keygen"); serialize(&incomplete_key_share).map_err(Into::into) } @@ -78,7 +77,6 @@ pub async fn run_and_serialize_keyrefresh< H: Digest + Clone + Send + 'static, D, >( - logger: &Logger, incomplete_key_share: Vec, pregenerated_primes: PregeneratedPrimes, tracer: &mut PerfProfiler, @@ -103,8 +101,8 @@ where .start(&mut rng, party) .await?; let perf_report = tracer.get_report()?; - logger.trace(format!("Aux info protocol report: {perf_report}")); - logger.debug("Finished AsyncProtocol - Aux Info"); + trace!("Aux info protocol report: {perf_report}"); + debug!("Finished AsyncProtocol - Aux Info"); let key_share: KeyShare = DirtyKeyShare { core: incomplete_key_share.into_inner(), @@ -120,12 +118,11 @@ where async fn pregenerate_primes( tracer: &PerfProfiler, - logger: &Logger, job_id_bytes: &[u8], ) -> Result<(PerfProfiler, PregeneratedPrimes), Error> { let perf_report = tracer.get_report()?; - logger.trace(format!("Incomplete Keygen protocol report: {perf_report}")); - logger.debug("Finished AsyncProtocol - Incomplete Keygen"); + trace!("Incomplete Keygen protocol report: {perf_report}"); + debug!("Finished AsyncProtocol - Incomplete Keygen"); let tracer = PerfProfiler::new(); let pregenerated_primes_key = @@ -138,7 +135,7 @@ async fn pregenerate_primes( .await?; let elapsed = now.elapsed(); - logger.debug(format!("Pregenerated primes took {elapsed:?}")); + debug!("Pregenerated primes took {elapsed:?}"); Ok((tracer, pregenerated_primes)) } @@ -165,7 +162,6 @@ pub async fn run_full_keygen_protocol< t: u16, hd_wallet: bool, rng: StdRng, - logger: &Logger, job_id_bytes: &[u8], ) -> Result<(Vec, Vec), Error> { let (tx0, rx0, tx1, rx1) = create_job_manager_to_async_protocol_channel_split_io( @@ -174,13 +170,11 @@ pub async fn run_full_keygen_protocol< mapping, my_role_id, network, - logger.clone(), i, ); let delivery = (rx0, tx0); let party = MpcParty::, _, _>::connected(delivery); let incomplete_key_share = run_and_serialize_keygen::( - logger, &mut tracer, eid, i, @@ -192,12 +186,11 @@ pub async fn run_full_keygen_protocol< ) .await?; let (mut tracer, pregenerated_primes) = - pregenerate_primes::(&tracer, logger, job_id_bytes).await?; + pregenerate_primes::(&tracer, job_id_bytes).await?; let delivery = (rx1, tx1); let party = MpcParty::, _, _>::connected(delivery); let (key_share, serialized_public_key) = run_and_serialize_keyrefresh::( - logger, incomplete_key_share, pregenerated_primes, &mut tracer, diff --git a/blueprints/incredible-squaring/src/eigenlayer.rs b/blueprints/incredible-squaring/src/eigenlayer.rs index 739429df..d62fba84 100644 --- a/blueprints/incredible-squaring/src/eigenlayer.rs +++ b/blueprints/incredible-squaring/src/eigenlayer.rs @@ -19,6 +19,7 @@ use gadget_sdk::{ keystore::Backend, network::setup::{start_p2p_network, NetworkConfig}, run::GadgetRunner, + info }; use std::convert::Infallible; use std::ops::Deref; @@ -173,7 +174,7 @@ impl GadgetRunner for EigenlayerGadgetRunner { async fn register(&mut self) -> Result<()> { if self.env.test_mode { - self.env.logger.info("Skipping registration in test mode"); + info!("Skipping registration in test mode"); return Ok(()); } @@ -198,10 +199,8 @@ impl GadgetRunner for EigenlayerGadgetRunner { Default::default(), ); // send the tx to the tangle and exit. - let result = tx::tangle::send(&client, &signer, &xt, &self.env.logger).await?; - self.env - .logger - .info(format!("Registered operator with hash: {:?}", result)); + let result = tx::tangle::send(&client, &signer, &xt).await?; + info!("Registered operator with hash: {:?}", result); // Now we handle the EigenLayer portion of the Registration let keystore = self.env.keystore().map_err(|e| eyre!(e))?; @@ -301,7 +300,7 @@ impl GadgetRunner for EigenlayerGadgetRunner { // self.set_operator(operator); - tracing::info!("Registered operator for Eigenlayer"); + info!("Registered operator for Eigenlayer"); Ok(()) } @@ -313,11 +312,7 @@ impl GadgetRunner for EigenlayerGadgetRunner { // Tangle Portion of Run let _client = self.env.client().await.map_err(|e| eyre!(e))?; let signer = self.env.first_sr25519_signer().map_err(|e| eyre!(e))?; - let _logger = self.env.logger.clone(); - self.env.logger.info(format!( - "Starting the event watcher for {} ...", - signer.account_id() - )); + info!("Starting the event watcher for {} ...", signer.account_id()); // Then we handle the EigenLayer portion let keystore = self.env.keystore().map_err(|e| eyre!(e))?; @@ -372,7 +367,6 @@ impl GadgetRunner for EigenlayerGadgetRunner { bind_ip: self.env.bind_addr, bind_port: self.env.bind_port, topics: vec!["__TESTING_INCREDIBLE_SQUARING".to_string()], - logger: self.env.logger.clone(), }; let _network: GossipHandle = diff --git a/blueprints/incredible-squaring/src/main.rs b/blueprints/incredible-squaring/src/main.rs index a05351ea..f1b76f53 100644 --- a/blueprints/incredible-squaring/src/main.rs +++ b/blueprints/incredible-squaring/src/main.rs @@ -8,6 +8,7 @@ use gadget_sdk::{ runtime_types::{sp_core::ecdsa, tangle_primitives::services}, }, tx, + info }; use std::io::Write; use incredible_squaring_blueprint as blueprint; @@ -32,7 +33,7 @@ impl GadgetRunner for TangleGadgetRunner { async fn register(&mut self) -> Result<()> { // TODO: Use the function in blueprint-test-utils if self.env.test_mode { - self.env.logger.info("Skipping registration in test mode"); + info!("Skipping registration in test mode"); return Ok(()); } @@ -62,10 +63,8 @@ impl GadgetRunner for TangleGadgetRunner { ); // send the tx to the tangle and exit. - let result = tx::tangle::send(&client, &signer, &xt, &self.env.logger).await?; - self.env - .logger - .info(format!("Registered operator with hash: {:?}", result)); + let result = tx::tangle::send(&client, &signer, &xt).await?; + info!("Registered operator with hash: {:?}", result); Ok(()) } @@ -76,22 +75,17 @@ impl GadgetRunner for TangleGadgetRunner { async fn run(&self) -> Result<()> { let client = self.env.client().await.map_err(|e| eyre!(e))?; let signer = self.env.first_sr25519_signer().map_err(|e| eyre!(e))?; - let logger = self.env.logger.clone(); - self.env.logger.info(format!( - "Starting the event watcher for {} ...", - signer.account_id() - )); + info!("Starting the event watcher for {} ...", signer.account_id()); let x_square = blueprint::XsquareEventHandler { service_id: self.env.service_id.unwrap(), signer, - logger, }; SubstrateEventWatcher::run( &TangleEventsWatcher { - logger: self.env.logger.clone(), + span: self.env.span.clone(), }, client, // Add more handler here if we have more functions. @@ -123,14 +117,13 @@ async fn create_gadget_runner( #[tokio::main] async fn main() -> Result<()> { - gadget_sdk::logger::setup_log(); + gadget_sdk::logging::setup_log(); // Load the environment and create the gadget runner let config = ContextConfig::from_args(); let (env, mut runner) = create_gadget_runner(config.clone()).await; - env.logger - .info("~~~ Executing the incredible squaring blueprint ~~~"); + info!("~~~ Executing the incredible squaring blueprint ~~~"); check_for_test(&env, &config)?; @@ -147,7 +140,7 @@ async fn main() -> Result<()> { #[allow(irrefutable_let_patterns)] fn check_for_test( - env: &GadgetConfiguration, + _env: &GadgetConfiguration, config: &ContextConfig, ) -> Result<()> { // create a file to denote we have started @@ -163,10 +156,7 @@ fn check_for_test( let path = base_path.sanitize_file_path().join("test_started.tmp"); let mut file = std::fs::File::create(&path)?; file.write_all(b"test_started")?; - env.logger.info(format!( - "Successfully wrote test file to {}", - path.display() - )) + info!("Successfully wrote test file to {}", path.display()) } Ok(()) diff --git a/blueprints/tangle-avs-blueprint/src/main.rs b/blueprints/tangle-avs-blueprint/src/main.rs index a12727e5..b2a52a76 100644 --- a/blueprints/tangle-avs-blueprint/src/main.rs +++ b/blueprints/tangle-avs-blueprint/src/main.rs @@ -1,7 +1,9 @@ use color_eyre::{eyre::eyre, Result}; +use gadget_sdk::env::{ContextConfig, GadgetConfiguration}; use gadget_sdk::{ env::Protocol, events_watcher::{substrate::SubstrateEventWatcher, tangle::TangleEventsWatcher}, + info, tangle_subxt::tangle_testnet_runtime::api::{ self, runtime_types::{sp_core::ecdsa, tangle_primitives::services}, @@ -9,7 +11,6 @@ use gadget_sdk::{ tx, }; use incredible_squaring_blueprint as blueprint; -use gadget_sdk::env::{ContextConfig, GadgetConfiguration}; use std::sync::Arc; use structopt::StructOpt; @@ -30,7 +31,7 @@ impl GadgetRunner for TangleGadgetRunner { async fn register(&self) -> Result<()> { // TODO: Use the function in blueprint-test-utils if self.env.test_mode { - self.env.logger.info("Skipping registration in test mode"); + info!("Skipping registration in test mode"); return Ok(()); } @@ -49,7 +50,7 @@ impl GadgetRunner for TangleGadgetRunner { // send the tx to the tangle and exit. let result = tx::tangle::send(&client, &signer, &xt).await?; - tracing::info!("Registered operator with hash: {:?}", result); + info!("Registered operator with hash: {:?}", result); Ok(()) } @@ -58,7 +59,6 @@ impl GadgetRunner for TangleGadgetRunner { bind_ip: self.env.bind_addr, bind_port: self.env.bind_port, test_mode: self.env.test_mode, - logger: self.env.logger.clone(), }; let env = gadget_sdk::env::load(None, config).map_err(|e| eyre!(e))?; @@ -70,7 +70,7 @@ impl GadgetRunner for TangleGadgetRunner { signer, }; - tracing::info!("Starting the event watcher ..."); + info!("Starting the event watcher ..."); SubstrateEventWatcher::run( &TangleEventsWatcher, diff --git a/macros/blueprint-proc-macro/src/job.rs b/macros/blueprint-proc-macro/src/job.rs index b9feaefa..1005678f 100644 --- a/macros/blueprint-proc-macro/src/job.rs +++ b/macros/blueprint-proc-macro/src/job.rs @@ -225,7 +225,7 @@ pub fn generate_event_handler_for( ).await { Ok(r) => r, Err(e) => { - tracing::error!("Error in job: {e}"); + ::gadget_sdk::error!("Error in job: {e}"); let error = gadget_sdk::events_watcher::Error::Handler(Box::new(e)); return Err(error); } @@ -239,7 +239,7 @@ pub fn generate_event_handler_for( ) { Ok(r) => r, Err(e) => { - tracing::error!("Error in job: {e}"); + ::gadget_sdk::error!("Error in job: {e}"); let error = gadget_sdk::events_watcher::Error::Handler(Box::new(e)); return Err(error); } diff --git a/macros/blueprint-proc-macro/src/tangle/event_listener.rs b/macros/blueprint-proc-macro/src/tangle/event_listener.rs index 5fffcb56..a83381e1 100644 --- a/macros/blueprint-proc-macro/src/tangle/event_listener.rs +++ b/macros/blueprint-proc-macro/src/tangle/event_listener.rs @@ -19,16 +19,9 @@ pub(crate) fn generate_tangle_event_handler( pub struct #struct_name { pub service_id: u64, pub signer: gadget_sdk::keystore::TanglePairSigner, - pub logger: gadget_sdk::logger::Logger, #(#additional_params)* } - // impl gadget_sdk::events_watcher::substrate::LoggerEnv for #struct_name { - // fn logger(&self) -> &gadget_sdk::logger::Logger { - // &self.logger - // } - // } - #[automatically_derived] #[async_trait::async_trait] impl gadget_sdk::events_watcher::substrate::EventHandler for #struct_name { @@ -41,7 +34,7 @@ pub(crate) fn generate_tangle_event_handler( for evt in events.iter() { if let Ok(evt) = evt { - self.logger.info(format!("Event found || required: sid={}, jid={}", self.service_id, #job_id)); + ::gadget_sdk::info!("Event found || required: sid={}, jid={}", self.service_id, #job_id); } } @@ -72,7 +65,7 @@ pub(crate) fn generate_tangle_event_handler( }, }; - self.logger.info("Handling actionable events ..."); + ::gadget_sdk::info!("Handling actionable events ..."); let job_events: Vec<_> = events .find::() @@ -82,7 +75,7 @@ pub(crate) fn generate_tangle_event_handler( }) .collect(); for call in job_events { - self.logger.info(format!("Handling JobCalled Events: #{block_number}",)); + ::gadget_sdk::info!("Handling JobCalled Events: #{block_number}"); let mut args_iter = call.args.into_iter(); #(#params_tokens)* @@ -95,7 +88,7 @@ pub(crate) fn generate_tangle_event_handler( TangleApi::tx() .services() .submit_result(self.service_id, call.call_id, result); - gadget_sdk::tx::tangle::send(&client, &self.signer, &response, &self.logger).await?; + gadget_sdk::tx::tangle::send(&client, &self.signer, &response).await?; } Ok(()) } diff --git a/macros/blueprint-proc-macro/src/tangle/mod.rs b/macros/blueprint-proc-macro/src/tangle/mod.rs index ef10f9ea..4d989bb6 100644 --- a/macros/blueprint-proc-macro/src/tangle/mod.rs +++ b/macros/blueprint-proc-macro/src/tangle/mod.rs @@ -55,7 +55,7 @@ pub fn field_type_to_param_token(ident: &Ident, t: &FieldType) -> proc_macro2::T let #ident = match String::from_utf8(#inner_ident) { Ok(s) => s, Err(e) => { - tracing::warn!("failed to convert bytes to a valid utf8 string: {e}"); + ::gadget_sdk::warn!("failed to convert bytes to a valid utf8 string: {e}"); use gadget_sdk::events_watcher::Error; return Err(Error::Handler(Box::new(e))); } diff --git a/sdk/src/clients/tangle/services.rs b/sdk/src/clients/tangle/services.rs index 6d3957c4..8a591284 100644 --- a/sdk/src/clients/tangle/services.rs +++ b/sdk/src/clients/tangle/services.rs @@ -1,5 +1,4 @@ use crate::error::Error; -use crate::logger::Logger; use sp_core::Encode; use subxt::utils::AccountId32; use tangle_subxt::subxt::backend::BlockRef; @@ -14,14 +13,12 @@ use tangle_subxt::tangle_testnet_runtime::api::runtime_types::tangle_primitives: #[derive(Debug)] pub struct ServicesClient { rpc_client: OnlineClient, - #[allow(dead_code)] - logger: Logger, } impl ServicesClient { /// Create a new services client - pub fn new(logger: Logger, rpc_client: OnlineClient) -> Self { - Self { logger, rpc_client } + pub fn new(rpc_client: OnlineClient) -> Self { + Self { rpc_client } } /// Get the associated RPC client diff --git a/sdk/src/config.rs b/sdk/src/config.rs index c35bd5f9..e67cdb50 100644 --- a/sdk/src/config.rs +++ b/sdk/src/config.rs @@ -3,7 +3,6 @@ use crate::keystore::backend::GenericKeyStore; use crate::keystore::BackendExt; #[cfg(any(feature = "std", feature = "wasm"))] use crate::keystore::{sp_core_subxt, TanglePairSigner}; -use crate::logger::Logger; use alloc::string::{String, ToString}; use core::fmt::Debug; use core::net::IpAddr; @@ -102,8 +101,7 @@ pub struct GadgetConfiguration { pub bind_port: u16, /// The Address of the Network that will be interacted with pub bind_addr: IpAddr, - /// The Logger that will be used in this Gadget Configuration - pub logger: Logger, + pub span: tracing::Span, /// Whether the gadget is in test mode pub test_mode: bool, _lock: core::marker::PhantomData, @@ -120,7 +118,6 @@ impl Debug for GadgetConfiguration { .field("protocol", &self.protocol) .field("bind_port", &self.bind_port) .field("bind_addr", &self.bind_addr) - .field("logger", &self.logger) .field("test_mode", &self.test_mode) .finish() } @@ -137,7 +134,7 @@ impl Clone for GadgetConfiguration { protocol: self.protocol, bind_port: self.bind_port, bind_addr: self.bind_addr, - logger: self.logger.clone(), + span: self.span.clone(), test_mode: self.test_mode, _lock: core::marker::PhantomData, } @@ -216,8 +213,8 @@ pub enum GadgetCLICoreSettings { bind_port: u16, #[structopt(long, short = "t")] test_mode: bool, - #[structopt(long, short = "l", parse(from_str))] - logger: Option, + #[structopt(long, short = "l")] + log_id: Option, #[structopt(long, short = "u", long = "url", parse(try_from_str = url::Url::parse))] #[serde(default = "gadget_io::defaults::rpc_url")] url: Url, @@ -287,7 +284,7 @@ fn load_inner( bind_addr, bind_port, test_mode, - logger, + log_id, url, keystore_uri, blueprint_id, @@ -298,13 +295,16 @@ fn load_inner( .. } = config; - let logger = logger.unwrap_or_default(); + let span = match log_id { + Some(id) => tracing::info_span!("gadget", id = id), + None => tracing::info_span!("gadget"), + }; Ok(GadgetConfiguration { bind_addr, bind_port, test_mode, - logger, + span, rpc_endpoint: url.to_string(), keystore_uri, blueprint_id, diff --git a/sdk/src/events_watcher/evm.rs b/sdk/src/events_watcher/evm.rs index 9aea1c92..da7b02b5 100644 --- a/sdk/src/events_watcher/evm.rs +++ b/sdk/src/events_watcher/evm.rs @@ -3,6 +3,7 @@ use crate::events_watcher::error::Error; use crate::events_watcher::retry::UnboundedConstantBuilder; use crate::store::LocalDatabase; +use crate::{error, trace, warn}; use alloy_network::ReceiptResponse; use alloy_network::{Ethereum, Network}; use alloy_primitives::FixedBytes; @@ -177,7 +178,7 @@ pub trait EventWatcher>: Send + Sync { let events = events_filter.query().await.map_err(Into::::into)?; let number_of_events = events.len(); - tracing::trace!("Found #{number_of_events} events"); + trace!("Found #{number_of_events} events"); for (event, log) in events { // Wraps each handler future in a retry logic, that will retry the handler // if it fails, up to `MAX_RETRY_COUNT`, after this it will ignore that event for @@ -203,7 +204,7 @@ pub trait EventWatcher>: Send + Sync { // wrong. for r in &result { if let Err(e) = r { - tracing::error!(?e, %chain_id, "Error while handling the event"); + error!(?e, %chain_id, "Error while handling the event"); } } if mark_as_handled { @@ -212,8 +213,8 @@ pub trait EventWatcher>: Send + Sync { log.block_number.unwrap_or_default(), ); } else { - tracing::error!(%chain_id, "Error while handling event, all handlers failed."); - tracing::warn!(%chain_id, "Restarting event watcher ..."); + error!(%chain_id, "Error while handling event, all handlers failed."); + warn!(%chain_id, "Restarting event watcher ..."); // this a transient error, so we will retry again. return Err(Error::ForceRestart); } @@ -228,7 +229,7 @@ pub trait EventWatcher>: Send + Sync { let should_cooldown = dest_block == target_block_number; if should_cooldown { let duration = Duration::from_secs(10); - tracing::trace!("Cooldown a bit for {}ms", duration.as_millis()); + trace!("Cooldown a bit for {}ms", duration.as_millis()); tokio::time::sleep(duration).await; // update the latest block number target_block_number = contract diff --git a/sdk/src/events_watcher/substrate.rs b/sdk/src/events_watcher/substrate.rs index 2d6caeb9..4200834a 100644 --- a/sdk/src/events_watcher/substrate.rs +++ b/sdk/src/events_watcher/substrate.rs @@ -8,7 +8,7 @@ //! action to take when the specified event is found in a block at the `handle_event` api. use crate::events_watcher::error::Error; -use crate::logger::Logger; +use crate::{error, info, warn}; use backon::{ConstantBuilder, ExponentialBuilder, Retryable}; use core::time::Duration; use futures::TryFutureExt; @@ -98,9 +98,6 @@ where /// The name of the pallet that this event watcher is watching. const PALLET_NAME: &'static str; - /// Returns a reference to the Event Watcher's [`Logger`] - fn logger(&self) -> &Logger; - /// Returns a task that should be running in the background /// that will watch events #[tracing::instrument( @@ -135,8 +132,7 @@ where } } let events = latest_block.events().map_err(Into::::into).await?; - self.logger() - .info(format!("Found #{} events: {:?}", events.len(), events)); + info!("Found #{} events: {:?}", events.len(), events); // wraps each handler future in a retry logic, that will retry the handler // if it fails, up to `MAX_RETRY_COUNT`, after this it will ignore that event for // that specific handler. @@ -160,19 +156,16 @@ where // wrong. for r in &result { if let Err(e) = r { - self.logger().error(format!("Error from result: {e:?}")); + error!("Error from result: {e:?}"); } } if mark_as_handled { - self.logger().info(format!( - "event handled successfully at block #{latest_block_number}", - )); + info!("event handled successfully at block #{latest_block_number}",); best_block = Some(latest_block_number); } else { - self.logger() - .error("Error while handling event, all handlers failed."); - self.logger().warn("Restarting event watcher ..."); + error!("Error while handling event, all handlers failed."); + warn!("Restarting event watcher ..."); // this a transient error, so we will retry again. return Err(Error::ForceRestart); } @@ -182,7 +175,3 @@ where Ok(()) } } - -pub trait LoggerEnv { - fn logger(&self) -> &Logger; -} diff --git a/sdk/src/events_watcher/tangle.rs b/sdk/src/events_watcher/tangle.rs index cc7239c4..4050ea77 100644 --- a/sdk/src/events_watcher/tangle.rs +++ b/sdk/src/events_watcher/tangle.rs @@ -3,27 +3,15 @@ #![allow(clippy::module_name_repetitions)] use crate::clients::tangle::runtime::TangleConfig; -use crate::events_watcher::substrate::LoggerEnv; -use crate::logger::Logger; /// An event watcher for the Tangle network. #[derive(Debug, Clone)] pub struct TangleEventsWatcher { - pub logger: Logger, + pub span: tracing::Span, } #[async_trait::async_trait] impl super::substrate::SubstrateEventWatcher for TangleEventsWatcher { const TAG: &'static str = "tangle"; const PALLET_NAME: &'static str = "Services"; - - fn logger(&self) -> &Logger { - &self.logger - } -} - -impl LoggerEnv for TangleEventsWatcher { - fn logger(&self) -> &Logger { - &self.logger - } } diff --git a/sdk/src/keystore/backend/fs.rs b/sdk/src/keystore/backend/fs.rs index 73b22085..8274275d 100644 --- a/sdk/src/keystore/backend/fs.rs +++ b/sdk/src/keystore/backend/fs.rs @@ -3,6 +3,7 @@ use crate::keystore::{ bls381, bn254, ecdsa, ed25519, sr25519, Backend, Error, KeystoreUriSanitizer, }; +use crate::{debug, warn}; use alloc::string::ToString; use ark_serialize::CanonicalSerialize; use std::{fs, io::Write, path::PathBuf}; @@ -76,10 +77,10 @@ impl FilesystemKeystore { // if so, we need to decode them if content.iter().all(|&b| b.is_ascii_hexdigit()) { if let Ok(decoded) = hex::decode(&content) { - tracing::debug!("Decoded hex-encoded key from file {:?}", path); + debug!("Decoded hex-encoded key from file {:?}", path); Ok(Some(decoded)) } else { - tracing::warn!("Invalid hex encoding in file {:?}", path); + warn!("Invalid hex encoding in file {:?}", path); Ok(None) } } else { diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 4216410a..34f683c2 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -29,8 +29,7 @@ pub mod events_watcher; pub mod executor; /// Keystore Module pub mod keystore; -/// Debug logger -pub mod logger; +pub mod logging; /// Metrics Module #[cfg(feature = "std")] pub mod metrics; diff --git a/sdk/src/logger.rs b/sdk/src/logger.rs deleted file mode 100644 index 1279f7a1..00000000 --- a/sdk/src/logger.rs +++ /dev/null @@ -1,84 +0,0 @@ -use crate::alloc::string::ToString; -#[cfg(not(feature = "std"))] -use alloc::string::String; -use core::fmt::Display; -use serde::{Deserialize, Serialize}; -use tracing::{debug, error, info, trace, warn}; -use tracing_subscriber::fmt::format::FmtSpan; -use tracing_subscriber::fmt::SubscriberBuilder; -use tracing_subscriber::util::SubscriberInitExt; -use tracing_subscriber::EnvFilter; - -#[derive(Clone, Debug, Default, Serialize, Deserialize)] -pub struct Logger { - pub id: String, -} - -impl From<&str> for Logger { - fn from(id: &str) -> Self { - Logger::from(id.to_string()) - } -} - -impl From for Logger { - fn from(id: String) -> Self { - Logger { id } - } -} - -impl Logger { - /// Calls the [`log::trace!`] macro, using the logger's configurations. - pub fn trace(&self, msg: T) { - if self.id.is_empty() { - trace!(target: "gadget", "{msg}"); - return; - } - trace!(target: "gadget", "[{}] {msg}", &self.id); - } - - /// Calls the [`log::debug!`] macro, using the logger's configurations. - pub fn debug(&self, msg: T) { - if self.id.is_empty() { - debug!(target: "gadget", "{msg}"); - return; - } - debug!(target: "gadget", "[{}] {msg}", &self.id); - } - - /// Calls the [`log::info!`] macro, using the logger's configurations. - pub fn info(&self, msg: T) { - if self.id.is_empty() { - info!(target: "gadget", "{msg}"); - return; - } - info!(target: "gadget", "[{}] {msg}", &self.id); - } - - /// Calls the [`log::warn!`] macro, using the logger's configurations. - pub fn warn(&self, msg: T) { - if self.id.is_empty() { - warn!(target: "gadget", "{msg}"); - return; - } - warn!(target: "gadget", "[{}] {msg}", &self.id); - } - - /// Calls the [`log::error!`] macro, using the logger's configurations. - pub fn error(&self, msg: T) { - if self.id.is_empty() { - error!(target: "gadget", "{msg}"); - return; - } - error!(target: "gadget", "[{}] {msg}", &self.id); - } -} - -/// Sets up the logging for any crate -pub fn setup_log() { - let _ = SubscriberBuilder::default() - .without_time() - .with_span_events(FmtSpan::NONE) - .with_env_filter(EnvFilter::from_default_env()) - .finish() - .try_init(); -} diff --git a/sdk/src/logging.rs b/sdk/src/logging.rs new file mode 100644 index 00000000..c8bdc64f --- /dev/null +++ b/sdk/src/logging.rs @@ -0,0 +1,61 @@ +/// A [`trace`] log with the target `"gadget"` +/// +/// [`trace`]: tracing::trace +#[macro_export] +macro_rules! trace { + ($($tt:tt)*) => { + tracing::trace!(target: "gadget", $($tt)*) + } +} + +/// A [`debug`] log with the target `"gadget"` +/// +/// [`debug`]: tracing::debug +#[macro_export] +macro_rules! debug { + ($($tt:tt)*) => { + tracing::debug!(target: "gadget", $($tt)*) + } +} + +/// An [`error`] log with the target `"gadget"` +/// +/// [`error`]: tracing::error +#[macro_export] +macro_rules! error { + ($($tt:tt)*) => { + tracing::error!(target: "gadget", $($tt)*) + } +} + +/// A [`warn`] log with the target `"gadget"` +/// +/// [`warn`]: tracing::warn +#[macro_export] +macro_rules! warn { + ($($tt:tt)*) => { + tracing::warn!(target: "gadget", $($tt)*) + } +} + +/// An [`info`] log with the target `"gadget"` +/// +/// [`info`]: tracing::info +#[macro_export] +macro_rules! info { + ($($tt:tt)*) => { + tracing::info!(target: "gadget", $($tt)*) + } +} + +/// Sets up the logging for any crate +pub fn setup_log() { + use tracing_subscriber::util::SubscriberInitExt; + + let _ = tracing_subscriber::fmt::SubscriberBuilder::default() + .without_time() + .with_span_events(tracing_subscriber::fmt::format::FmtSpan::NONE) + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .finish() + .try_init(); +} diff --git a/sdk/src/network/channels.rs b/sdk/src/network/channels.rs index ad7a8fa0..d77554c9 100644 --- a/sdk/src/network/channels.rs +++ b/sdk/src/network/channels.rs @@ -10,7 +10,7 @@ use std::collections::HashMap; use std::sync::Arc; use crate::error::Error; -use crate::logger::Logger; +use crate::{error, trace, warn}; /// Unique identifier for an involved party /// @@ -59,7 +59,6 @@ pub fn create_job_manager_to_async_protocol_channel_split< user_id_mapping: Arc>, my_account_id: ecdsa::Public, network: N, - logger: Logger, ) -> ( futures::channel::mpsc::UnboundedSender, futures::channel::mpsc::UnboundedReceiver>, @@ -69,7 +68,6 @@ pub fn create_job_manager_to_async_protocol_channel_split< let (tx_to_async_proto_1, rx_for_async_proto_1) = futures::channel::mpsc::unbounded(); let (tx_to_async_proto_2, rx_for_async_proto_2) = gadget_io::tokio::sync::mpsc::unbounded_channel(); - let logger_outgoing = logger.clone(); // Take the messages from the gadget and send them to the async protocol let _ = gadget_io::tokio::task::spawn(async move { while let Some(msg) = rx_gadget.recv().await { @@ -77,12 +75,12 @@ pub fn create_job_manager_to_async_protocol_channel_split< Ok(msg) => match msg { MultiplexedChannelMessage::Channel1(msg) => { if tx_to_async_proto_1.unbounded_send(Ok(msg)).is_err() { - logger.error("Failed to send message to C1 protocol"); + error!("Failed to send message to C1 protocol"); } } MultiplexedChannelMessage::Channel2(msg) => { if tx_to_async_proto_2.send(msg).is_err() { - logger.error("Failed to send message to C2 protocol"); + error!("Failed to send message to C2 protocol"); } } @@ -91,7 +89,7 @@ pub fn create_job_manager_to_async_protocol_channel_split< } }, Err(err) => { - logger.error(format!("Failed to deserialize message: {err:?}")); + error!("Failed to deserialize message: {err:?}"); } } } @@ -115,7 +113,6 @@ pub fn create_job_manager_to_async_protocol_channel_split< // Take the messages the async protocol sends to the outbound channel and send them to the gadget let _ = gadget_io::tokio::task::spawn(async move { - let logger = &logger_outgoing; let channel_1_task = async move { while let Some(msg) = rx_to_outbound_1.next().await { if let Err(err) = wrap_message_and_forward_to_network::<_, C1, C2, (), _>( @@ -125,11 +122,10 @@ pub fn create_job_manager_to_async_protocol_channel_split< my_user_id, identifier_info, MultiplexedChannelMessage::Channel1, - logger, ) .await { - logger.error(format!("Failed to send message to outbound: {err:?}")); + error!("Failed to send message to outbound: {err:?}"); } } }; @@ -143,11 +139,10 @@ pub fn create_job_manager_to_async_protocol_channel_split< my_user_id, identifier_info, MultiplexedChannelMessage::Channel2, - logger, ) .await { - logger.error(format!("Failed to send message to outbound: {err:?}")); + error!("Failed to send message to outbound: {err:?}"); } } }; @@ -426,12 +421,10 @@ pub fn create_job_manager_to_async_protocol_channel_split_io< user_id_mapping: Arc>, my_account_id: ecdsa::Public, network: N, - logger: Logger, i: UserID, ) -> DuplexedChannel { let (tx_to_async_proto_1, rx_for_async_proto_1) = futures::channel::mpsc::unbounded(); let (tx_to_async_proto_2, rx_for_async_proto_2) = futures::channel::mpsc::unbounded(); - let logger_outgoing = logger.clone(); let mapping_clone = user_id_mapping.clone(); let my_user_id = user_id_mapping @@ -446,10 +439,7 @@ pub fn create_job_manager_to_async_protocol_channel_split_io< .expect("Failed to find my user id"); if my_user_id != i { - logger.error(format!( - "My user id is not equal to i: {} != {}", - my_user_id, i - )); + error!("My user id is not equal to i: {} != {}", my_user_id, i); } // Take the messages from the gadget and send them to the async protocol @@ -457,30 +447,30 @@ pub fn create_job_manager_to_async_protocol_channel_split_io< let mut id = 0; while let Some(msg_orig) = rx_gadget.recv().await { if msg_orig.payload.is_empty() { - logger.warn(format!( + warn!( "Received empty message from Peer {:?}", msg_orig.sender.user_id - )); + ); continue; } match deserialize::>(&msg_orig.payload[..]) { Ok(msg) => match msg { MultiplexedChannelMessage::Channel1(msg) => { - logger.trace(format!("Received message count: {id}", id = id + 1)); - logger.trace(format!( + trace!("Received message count: {id}", id = id + 1); + trace!( "Received message from {:?} as {:?}", - msg_orig.sender.user_id, msg_orig.recipient - )); + msg_orig.sender.user_id, + msg_orig.recipient + ); let msg_type = if let Some(to) = msg_orig.recipient { if let Some(to_account_id) = mapping_clone.get(&to.user_id) { if *to_account_id != my_account_id { - logger.error("Invalid message received"); + error!("Invalid message received"); continue; } } else { - logger - .error("Invalid message received (`to` not found in mapping)"); + error!("Invalid message received (`to` not found in mapping)"); continue; } @@ -492,14 +482,14 @@ pub fn create_job_manager_to_async_protocol_channel_split_io< let incoming = I::from_inbound(id, msg_orig.sender.user_id, msg_type, msg); if tx_to_async_proto_1.unbounded_send(Ok(incoming)).is_err() { - logger.error("Failed to send Incoming message to protocol"); + error!("Failed to send Incoming message to protocol"); } id += 1; } MultiplexedChannelMessage::Channel2(msg) => { if tx_to_async_proto_2.unbounded_send(msg).is_err() { - logger.error("Failed to send C2 message to protocol"); + error!("Failed to send C2 message to protocol"); } } _ => { @@ -507,7 +497,7 @@ pub fn create_job_manager_to_async_protocol_channel_split_io< } }, Err(err) => { - logger.error(format!("Failed to deserialize message: {err:?}")); + error!("Failed to deserialize message: {err:?}"); } } } @@ -520,7 +510,6 @@ pub fn create_job_manager_to_async_protocol_channel_split_io< // Take the messages from the async protocol and send them to the gadget let _ = gadget_io::tokio::task::spawn(async move { - let logger = &logger_outgoing; let channel_1_task = async move { while let Some(msg) = rx_to_outbound_1.next().await { if let Err(err) = wrap_message_and_forward_to_network::<_, O::Inner, C2, (), _>( @@ -530,15 +519,14 @@ pub fn create_job_manager_to_async_protocol_channel_split_io< my_user_id, identifier_info, |m| MultiplexedChannelMessage::Channel1(m.inner_message()), - logger, ) .await { - logger.error(format!("Failed to send message to outbound: {err:?}")); + error!("Failed to send message to outbound: {err:?}"); } } - logger.trace("Channel 1 outgoing task closing") + trace!("Channel 1 outgoing task closing") }; let channel_2_task = async move { @@ -550,15 +538,14 @@ pub fn create_job_manager_to_async_protocol_channel_split_io< my_user_id, identifier_info, |m| MultiplexedChannelMessage::Channel2(m), - logger, ) .await { - logger.error(format!("Failed to send message to outbound: {err:?}")); + error!("Failed to send message to outbound: {err:?}"); } } - logger.trace("Channel 2 outgoing task closing") + trace!("Channel 2 outgoing task closing") }; gadget_io::tokio::join!(channel_1_task, channel_2_task); @@ -596,22 +583,20 @@ pub fn create_job_manager_to_async_protocol_channel_split_io_triplex< user_id_mapping: Arc>, my_account_id: ecdsa::Public, network: N, - logger: Logger, ) -> TriplexedChannel { let (tx_to_async_proto_1, rx_for_async_proto_1) = futures::channel::mpsc::unbounded(); let (tx_to_async_proto_2, rx_for_async_proto_2) = futures::channel::mpsc::unbounded(); let (tx_to_async_proto_3, rx_for_async_proto_3) = futures::channel::mpsc::unbounded(); - let logger_outgoing = logger.clone(); // Take the messages from the gadget and send them to the async protocol let _ = gadget_io::tokio::task::spawn(async move { let mut id = 0; while let Some(msg_orig) = rx_gadget.recv().await { if msg_orig.payload.is_empty() { - logger.warn(format!( + warn!( "Received empty message from Peer {:?}", msg_orig.sender.user_id - )); + ); continue; } @@ -629,7 +614,7 @@ pub fn create_job_manager_to_async_protocol_channel_split_io_triplex< let incoming = I1::from_inbound(id, msg_orig.sender.user_id, msg_type, msg); if tx_to_async_proto_1.unbounded_send(Ok(incoming)).is_err() { - logger.error("Failed to send Incoming message to protocol"); + error!("Failed to send Incoming message to protocol"); } id += 1; @@ -644,20 +629,20 @@ pub fn create_job_manager_to_async_protocol_channel_split_io_triplex< let incoming = I2::from_inbound(id, msg_orig.sender.user_id, msg_type, msg); if tx_to_async_proto_2.unbounded_send(Ok(incoming)).is_err() { - logger.error("Failed to send Incoming message to protocol"); + error!("Failed to send Incoming message to protocol"); } id += 1; } MultiplexedChannelMessage::Channel3(msg) => { if tx_to_async_proto_3.unbounded_send(msg).is_err() { - logger.error("Failed to send C2 message to protocol"); + error!("Failed to send C2 message to protocol"); } } }, Err(err) => { - logger.error(format!("Failed to deserialize message: {err:?}")); + error!("Failed to deserialize message: {err:?}"); } } } @@ -681,7 +666,6 @@ pub fn create_job_manager_to_async_protocol_channel_split_io_triplex< let _ = gadget_io::tokio::task::spawn(async move { let user_id_mapping = &user_id_mapping; let network = &network; - let logger = &logger_outgoing; let task0 = async move { while let Some(msg) = rx_to_outbound_1.next().await { if let Err(err) = @@ -692,11 +676,10 @@ pub fn create_job_manager_to_async_protocol_channel_split_io_triplex< my_user_id, identifier_info, |m| MultiplexedChannelMessage::Channel1(m.inner_message()), - logger, ) .await { - logger.error(format!("Failed to send message to outbound: {err:?}")); + error!("Failed to send message to outbound: {err:?}"); } } }; @@ -711,11 +694,10 @@ pub fn create_job_manager_to_async_protocol_channel_split_io_triplex< my_user_id, identifier_info, |m| MultiplexedChannelMessage::Channel2(m.inner_message()), - logger, ) .await { - logger.error(format!("Failed to send message to outbound: {err:?}")); + error!("Failed to send message to outbound: {err:?}"); } } }; @@ -730,11 +712,10 @@ pub fn create_job_manager_to_async_protocol_channel_split_io_triplex< my_user_id, identifier_info, |m| MultiplexedChannelMessage::Channel3(m), - logger, ) .await { - logger.error(format!("Failed to send message to outbound: {err:?}")); + error!("Failed to send message to outbound: {err:?}"); } } }; @@ -767,14 +748,13 @@ async fn wrap_message_and_forward_to_network< my_user_id: UserID, identifier_info: IdentifierInfo, splitter: impl FnOnce(M) -> MultiplexedChannelMessage, - logger: &Logger, ) -> Result<(), Error> where M: MaybeSenderReceiver + Send + 'static, { let from = msg.maybe_sender(); let to = msg.maybe_receiver(); - logger.trace(format!("Sending message from {:?} to {:?}", from, to)); + trace!("Sending message from {:?} to {:?}", from, to); let from_account_id = user_id_mapping .get(&from.as_user_id().unwrap_or(my_user_id)) diff --git a/sdk/src/network/gossip.rs b/sdk/src/network/gossip.rs index a5edec39..341d26ed 100644 --- a/sdk/src/network/gossip.rs +++ b/sdk/src/network/gossip.rs @@ -20,7 +20,7 @@ use std::sync::atomic::AtomicU32; use std::sync::Arc; use crate::error::Error; -use crate::logger::Logger; +use crate::{debug, error, trace, warn}; use super::{Network, ParticipantInfo, ProtocolMessage}; @@ -44,7 +44,6 @@ pub struct MyBehaviour { pub type InboundMapping = (IdentTopic, UnboundedSender>, Arc); pub struct NetworkServiceWithoutSwarm<'a> { - pub logger: &'a Logger, pub inbound_mapping: &'a [InboundMapping], pub ecdsa_peer_id_to_libp2p_id: Arc>>, pub ecdsa_key: &'a ecdsa::Pair, @@ -58,7 +57,6 @@ impl<'a> NetworkServiceWithoutSwarm<'a> { ) -> NetworkService<'a> { NetworkService { swarm, - logger: self.logger, inbound_mapping: self.inbound_mapping, ecdsa_peer_id_to_libp2p_id: &self.ecdsa_peer_id_to_libp2p_id, ecdsa_key: self.ecdsa_key, @@ -69,7 +67,6 @@ impl<'a> NetworkServiceWithoutSwarm<'a> { pub struct NetworkService<'a> { pub swarm: &'a mut libp2p::Swarm, - pub logger: &'a Logger, pub inbound_mapping: &'a [InboundMapping], pub ecdsa_peer_id_to_libp2p_id: &'a Arc>>, pub ecdsa_key: &'a ecdsa::Pair, @@ -89,7 +86,7 @@ impl<'a> NetworkService<'a> { .gossipsub .publish(msg.topic, gossip_message) { - self.logger.error(format!("Publish error: {e:?}")); + error!("Publish error: {e:?}"); } } @@ -100,15 +97,13 @@ impl<'a> NetworkService<'a> { self.swarm.behaviour_mut().p2p.send_request(&peer_id, req); } (MessageType::Broadcast, GossipOrRequestResponse::Request(_)) => { - self.logger.error("Broadcasting a request is not supported"); + error!("Broadcasting a request is not supported"); } (MessageType::Broadcast, GossipOrRequestResponse::Response(_)) => { - self.logger - .error("Broadcasting a response is not supported"); + error!("Broadcasting a response is not supported"); } (MessageType::P2P(_), GossipOrRequestResponse::Gossip(_)) => { - self.logger - .error("P2P message should be a request or response"); + error!("P2P message should be a request or response"); } (MessageType::P2P(_), GossipOrRequestResponse::Response(_)) => { // TODO: Send the response to the peer. @@ -143,7 +138,7 @@ impl<'a> NetworkService<'a> { self.handle_identify_event(event).await; } Behaviour(Kadmelia(event)) => { - self.logger.trace(format!("Kadmelia event: {event:?}")); + trace!("Kadmelia event: {event:?}"); } Behaviour(Dcutr(event)) => { self.handle_dcutr_event(event).await; @@ -162,8 +157,7 @@ impl<'a> NetworkService<'a> { address, listener_id, } => { - self.logger - .debug(format!("{listener_id} has a new address: {address}")); + debug!("{listener_id} has a new address: {address}"); } ConnectionEstablished { peer_id, @@ -216,50 +210,38 @@ impl<'a> NetworkService<'a> { listener_id, address, } => { - self.logger - .trace(format!("{listener_id} has an expired address: {address}")); + trace!("{listener_id} has an expired address: {address}"); } ListenerClosed { listener_id, addresses, reason, } => { - self.logger.trace(format!( - "{listener_id} on {addresses:?} has been closed: {reason:?}" - )); + trace!("{listener_id} on {addresses:?} has been closed: {reason:?}"); } ListenerError { listener_id, error } => { - self.logger - .error(format!("{listener_id} has an error: {error}")); + error!("{listener_id} has an error: {error}"); } Dialing { peer_id, connection_id, } => { - self.logger.debug(format!( - "Dialing peer: {peer_id:?} with connection_id: {connection_id}" - )); + debug!("Dialing peer: {peer_id:?} with connection_id: {connection_id}"); } NewExternalAddrCandidate { address } => { - self.logger - .trace(format!("New external address candidate: {address}")); + trace!("New external address candidate: {address}"); } ExternalAddrConfirmed { address } => { - self.logger - .trace(format!("External address confirmed: {address}")); + trace!("External address confirmed: {address}"); } ExternalAddrExpired { address } => { - self.logger - .trace(format!("External address expired: {address}")); + trace!("External address expired: {address}"); } NewExternalAddrOfPeer { peer_id, address } => { - self.logger.trace(format!( - "New external address of peer: {peer_id} with address: {address}" - )); + trace!("New external address of peer: {peer_id} with address: {address}"); } unknown => { - self.logger - .warn(format!("Unknown swarm event: {unknown:?}")); + warn!("Unknown swarm event: {unknown:?}"); } } } @@ -270,7 +252,6 @@ pub struct GossipHandle { pub topic: IdentTopic, pub tx_to_outbound: UnboundedSender, pub rx_from_inbound: Arc>>>, - pub logger: Logger, pub connected_peers: Arc, pub ecdsa_peer_id_to_libp2p_id: Arc>>, } @@ -356,8 +337,7 @@ impl Network for GossipHandle { match bincode::deserialize(&message) { Ok(message) => Some(message), Err(e) => { - self.logger - .error(format!("Failed to deserialize message: {e}")); + error!("Failed to deserialize message: {e}"); drop(lock); Network::next_message(self).await } diff --git a/sdk/src/network/handlers/connections.rs b/sdk/src/network/handlers/connections.rs index c0f55a2a..10d645a2 100644 --- a/sdk/src/network/handlers/connections.rs +++ b/sdk/src/network/handlers/connections.rs @@ -1,6 +1,7 @@ #![allow(unused_results, clippy::used_underscore_binding)] use crate::network::gossip::{MyBehaviourRequest, NetworkService}; +use crate::{debug, error}; use libp2p::PeerId; use sp_core::{keccak_256, Pair}; @@ -11,7 +12,7 @@ impl NetworkService<'_> { peer_id: PeerId, num_established: u32, ) { - self.logger.debug("Connection established"); + debug!("Connection established"); if num_established == 1 { let my_peer_id = self.swarm.local_peer_id(); let msg = my_peer_id.to_bytes(); @@ -39,7 +40,7 @@ impl NetworkService<'_> { num_established: u32, _cause: Option, ) { - self.logger.debug("Connection closed"); + debug!("Connection closed"); if num_established == 0 { self.swarm .behaviour_mut() @@ -55,7 +56,7 @@ impl NetworkService<'_> { _local_addr: libp2p::Multiaddr, _send_back_addr: libp2p::Multiaddr, ) { - self.logger.debug("Incoming connection"); + debug!("Incoming connection"); } #[tracing::instrument(skip(self))] @@ -64,8 +65,7 @@ impl NetworkService<'_> { peer_id: PeerId, _connection_id: libp2p::swarm::ConnectionId, ) { - self.logger - .debug(format!("Outgoing connection to peer: {peer_id}",)); + debug!("Outgoing connection to peer: {peer_id}"); } #[tracing::instrument(skip(self, error))] @@ -76,8 +76,7 @@ impl NetworkService<'_> { _send_back_addr: libp2p::Multiaddr, error: libp2p::swarm::ListenError, ) { - self.logger - .error(format!("Incoming connection error: {error}",)); + error!("Incoming connection error: {error}"); } #[tracing::instrument(skip(self, error))] @@ -87,7 +86,6 @@ impl NetworkService<'_> { _peer_id: Option, error: libp2p::swarm::DialError, ) { - self.logger - .error(format!("Outgoing connection error: {error}",)); + error!("Outgoing connection error: {error}"); } } diff --git a/sdk/src/network/handlers/dcutr.rs b/sdk/src/network/handlers/dcutr.rs index 08b53801..4d83ab73 100644 --- a/sdk/src/network/handlers/dcutr.rs +++ b/sdk/src/network/handlers/dcutr.rs @@ -1,8 +1,9 @@ +use crate::debug; use crate::network::gossip::NetworkService; impl NetworkService<'_> { #[tracing::instrument(skip(self, event))] pub async fn handle_dcutr_event(&mut self, event: libp2p::dcutr::Event) { - self.logger.debug(format!("DCUTR event: {event:?}")); + debug!("DCUTR event: {event:?}"); } } diff --git a/sdk/src/network/handlers/gossip.rs b/sdk/src/network/handlers/gossip.rs index b34ccd29..dd427c86 100644 --- a/sdk/src/network/handlers/gossip.rs +++ b/sdk/src/network/handlers/gossip.rs @@ -2,6 +2,7 @@ use crate::network::gossip::{GossipMessage, NetworkService}; +use crate::{debug, error, trace}; use libp2p::gossipsub::TopicHash; use libp2p::{gossipsub, PeerId}; use std::sync::atomic::AtomicU32; @@ -38,11 +39,9 @@ impl NetworkService<'_> { connected_peers.fetch_add(1, std::sync::atomic::Ordering::Relaxed); }); if added { - self.logger - .trace(format!("{peer_id} subscribed to {topic}",)); + trace!("{peer_id} subscribed to {topic}"); } else { - self.logger - .error(format!("{peer_id} subscribed to unknown topic: {topic}")); + error!("{peer_id} subscribed to unknown topic: {topic}"); } } Unsubscribed { peer_id, topic } => { @@ -50,17 +49,13 @@ impl NetworkService<'_> { connected_peers.fetch_sub(1, std::sync::atomic::Ordering::Relaxed); }); if removed { - self.logger - .trace(format!("{peer_id} unsubscribed from {topic}",)); + trace!("{peer_id} unsubscribed from {topic}"); } else { - self.logger.error(format!( - "{peer_id} unsubscribed from unknown topic: {topic}" - )); + error!("{peer_id} unsubscribed from unknown topic: {topic}"); } } GossipsubNotSupported { peer_id } => { - self.logger - .trace(format!("{peer_id} does not support gossipsub!")); + trace!("{peer_id} does not support gossipsub!"); } } } @@ -80,12 +75,10 @@ impl NetworkService<'_> { message: gossipsub::Message, ) { let Some(origin) = message.source else { - self.logger - .error("Got message from unknown peer".to_string()); + error!("Got message from unknown peer"); return; }; - self.logger - .debug(format!("Got message from peer: {origin}",)); + debug!("Got message from peer: {origin}"); match bincode::deserialize::(&message.data) { Ok(GossipMessage { topic, raw_payload }) => { if let Some((_, tx, _)) = self @@ -94,17 +87,14 @@ impl NetworkService<'_> { .find(|r| r.0.to_string() == topic) { if let Err(e) = tx.send(raw_payload) { - self.logger - .error(format!("Failed to send message to worker: {e}")); + error!("Failed to send message to worker: {e}"); } } else { - self.logger - .error(format!("No registered worker for topic: {topic}!")); + error!("No registered worker for topic: {topic}!"); } } Err(e) => { - self.logger - .error(format!("Failed to deserialize message: {e}")); + error!("Failed to deserialize message: {e}"); } } } diff --git a/sdk/src/network/handlers/identify.rs b/sdk/src/network/handlers/identify.rs index 3d1c0e88..4660b619 100644 --- a/sdk/src/network/handlers/identify.rs +++ b/sdk/src/network/handlers/identify.rs @@ -1,4 +1,5 @@ use crate::network::gossip::NetworkService; +use crate::{debug, error, trace}; impl NetworkService<'_> { #[tracing::instrument(skip(self, event))] @@ -13,14 +14,11 @@ impl NetworkService<'_> { format!("Supported Protocols: {:?}", info.protocols), ]; let info_lines = info_lines.join(", "); - self.logger.debug(format!( - "Received identify event from peer: {peer_id} with info: {info_lines}" - )); + debug!("Received identify event from peer: {peer_id} with info: {info_lines}"); self.swarm.add_external_address(info.observed_addr); } Sent { peer_id, .. } => { - self.logger - .trace(format!("Sent identify event to peer: {peer_id}")); + trace!("Sent identify event to peer: {peer_id}"); } Pushed { peer_id, info, .. } => { let info_lines = [ @@ -29,14 +27,10 @@ impl NetworkService<'_> { format!("Supported Protocols: {:?}", info.protocols), ]; let info_lines = info_lines.join(", "); - self.logger.debug(format!( - "Pushed identify event to peer: {peer_id} with info: {info_lines}" - )); + debug!("Pushed identify event to peer: {peer_id} with info: {info_lines}"); } Error { peer_id, error, .. } => { - self.logger.error(format!( - "Identify error from peer: {peer_id} with error: {error}" - )); + error!("Identify error from peer: {peer_id} with error: {error}"); } } } diff --git a/sdk/src/network/handlers/kadmelia.rs b/sdk/src/network/handlers/kadmelia.rs index cd05765c..88825039 100644 --- a/sdk/src/network/handlers/kadmelia.rs +++ b/sdk/src/network/handlers/kadmelia.rs @@ -1,9 +1,10 @@ use crate::network::gossip::NetworkService; +use crate::trace; impl NetworkService<'_> { #[tracing::instrument(skip(self, event))] async fn handle_kadmelia_event(&mut self, event: libp2p::kad::Event) { // TODO: Handle kadmelia events - self.logger.trace(format!("Kadmelia event: {event:?}")); + trace!("Kadmelia event: {event:?}"); } } diff --git a/sdk/src/network/handlers/mdns.rs b/sdk/src/network/handlers/mdns.rs index 022d60fd..7d2455fd 100644 --- a/sdk/src/network/handlers/mdns.rs +++ b/sdk/src/network/handlers/mdns.rs @@ -1,4 +1,5 @@ use crate::network::gossip::NetworkService; +use crate::{debug, error}; use libp2p::mdns; impl NetworkService<'_> { @@ -8,22 +9,19 @@ impl NetworkService<'_> { match event { Discovered(list) => { for (peer_id, multiaddr) in list { - self.logger - .debug(format!("discovered a new peer: {peer_id} on {multiaddr}")); + debug!("discovered a new peer: {peer_id} on {multiaddr}"); self.swarm .behaviour_mut() .gossipsub .add_explicit_peer(&peer_id); if let Err(err) = self.swarm.dial(multiaddr) { - self.logger.error(format!("Failed to dial peer: {err}")); + error!("Failed to dial peer: {err}"); } } } Expired(list) => { for (peer_id, multiaddr) in list { - self.logger.debug(format!( - "discover peer has expired: {peer_id} with {multiaddr}" - )); + debug!("discover peer has expired: {peer_id} with {multiaddr}"); self.swarm .behaviour_mut() .gossipsub diff --git a/sdk/src/network/handlers/p2p.rs b/sdk/src/network/handlers/p2p.rs index 7388aed0..10dbf4b4 100644 --- a/sdk/src/network/handlers/p2p.rs +++ b/sdk/src/network/handlers/p2p.rs @@ -1,8 +1,9 @@ #![allow(unused_results)] use crate::network::gossip::{MyBehaviourRequest, MyBehaviourResponse, NetworkService}; -use libp2p::gossipsub::IdentTopic; +use crate::{debug, error, warn}; +use libp2p::gossipsub::IdentTopic; use libp2p::{request_response, PeerId}; use sp_core::{keccak_256, Pair}; use sp_io::crypto::ecdsa_verify_prehashed; @@ -16,8 +17,7 @@ impl NetworkService<'_> { use request_response::Event::{InboundFailure, Message, OutboundFailure, ResponseSent}; match event { Message { peer, message } => { - self.logger - .debug(format!("Received P2P message from: {peer}")); + debug!("Received P2P message from: {peer}"); self.handle_p2p_message(peer, message).await; } OutboundFailure { @@ -25,19 +25,17 @@ impl NetworkService<'_> { request_id, error, } => { - self.logger.error(format!("Failed to send message to peer: {peer} with request_id: {request_id} and error: {error}")); + error!("Failed to send message to peer: {peer} with request_id: {request_id} and error: {error}"); } InboundFailure { peer, request_id, error, } => { - self.logger.error(format!("Failed to receive message from peer: {peer} with request_id: {request_id} and error: {error}")); + error!("Failed to receive message from peer: {peer} with request_id: {request_id} and error: {error}"); } ResponseSent { peer, request_id } => { - self.logger.debug(format!( - "Sent response to peer: {peer} with request_id: {request_id}" - )); + debug!("Sent response to peer: {peer} with request_id: {request_id}"); } } } @@ -55,9 +53,7 @@ impl NetworkService<'_> { channel, request_id, } => { - self.logger.debug(format!( - "Received request with request_id: {request_id} from peer: {peer}" - )); + debug!("Received request with request_id: {request_id} from peer: {peer}"); self.handle_p2p_request(peer, request_id, request, channel) .await; } @@ -65,9 +61,7 @@ impl NetworkService<'_> { response, request_id, } => { - self.logger.debug(format!( - "Received response from peer: {peer} with request_id: {request_id}" - )); + debug!("Received response from peer: {peer} with request_id: {request_id}"); self.handle_p2p_response(peer, request_id, response).await; } } @@ -90,8 +84,7 @@ impl NetworkService<'_> { let hash = keccak_256(&msg); let valid = ecdsa_verify_prehashed(&signature, &hash, &ecdsa_public_key); if !valid { - self.logger - .warn(format!("Invalid signature from peer: {peer}")); + warn!("Invalid signature from peer: {peer}"); // TODO: report this peer. self.ecdsa_peer_id_to_libp2p_id .write() @@ -123,15 +116,13 @@ impl NetworkService<'_> { ecdsa_public_key, signature, } => { - self.logger - .debug(format!("Received handshake from peer: {peer}")); + debug!("Received handshake from peer: {peer}"); // Verify the signature let msg = peer.to_bytes(); let hash = keccak_256(&msg); let valid = ecdsa_verify_prehashed(&signature, &hash, &ecdsa_public_key); if !valid { - self.logger - .warn(format!("Invalid signature from peer: {peer}")); + warn!("Invalid signature from peer: {peer}"); let _ = self.swarm.disconnect_peer_id(peer); return; } @@ -160,12 +151,10 @@ impl NetworkService<'_> { .find(|r| r.0.to_string() == topic.to_string()) { if let Err(e) = tx.send(raw_payload) { - self.logger - .error(format!("Failed to send message to worker: {e}")); + error!("Failed to send message to worker: {e}"); } } else { - self.logger - .error(format!("No registered worker for topic: {topic}!")); + error!("No registered worker for topic: {topic}!"); } self.swarm .behaviour_mut() @@ -174,8 +163,7 @@ impl NetworkService<'_> { } }; if result.is_err() { - self.logger - .error(format!("Failed to send response for {request_id}")); + error!("Failed to send response for {request_id}"); } } } diff --git a/sdk/src/network/handlers/ping.rs b/sdk/src/network/handlers/ping.rs index 52df1446..f303171f 100644 --- a/sdk/src/network/handlers/ping.rs +++ b/sdk/src/network/handlers/ping.rs @@ -1,8 +1,9 @@ +use crate::debug; use crate::network::gossip::NetworkService; impl NetworkService<'_> { #[tracing::instrument(skip(self, event))] pub async fn handle_ping_event(&mut self, event: libp2p::ping::Event) { - self.logger.debug(format!("Ping event: {event:?}")); + debug!("Ping event: {event:?}") } } diff --git a/sdk/src/network/handlers/relay.rs b/sdk/src/network/handlers/relay.rs index e53b0f86..9e13ef24 100644 --- a/sdk/src/network/handlers/relay.rs +++ b/sdk/src/network/handlers/relay.rs @@ -1,13 +1,14 @@ +use crate::debug; use crate::network::gossip::NetworkService; impl NetworkService<'_> { #[tracing::instrument(skip(self, event))] pub async fn handle_relay_event(&mut self, event: libp2p::relay::Event) { - self.logger.debug(format!("Relay event: {event:?}")); + debug!("Relay event: {event:?}"); } #[tracing::instrument(skip(self, event))] pub async fn handle_relay_client_event(&mut self, event: libp2p::relay::client::Event) { - self.logger.debug(format!("Relay client event: {event:?}")); + debug!("Relay client event: {event:?}"); } } diff --git a/sdk/src/network/matchbox.rs b/sdk/src/network/matchbox.rs index e6a6453c..b1c322a5 100644 --- a/sdk/src/network/matchbox.rs +++ b/sdk/src/network/matchbox.rs @@ -1,6 +1,6 @@ -use crate::logging::Logger; use crate::network::matchbox::MatchboxEvent::P2p; use crate::network::network::NetworkHandle; +use crate::warn; use gadget_io::tokio::sync::{Mutex, RwLock}; use matchbox_socket::PeerId; use serde::{Deserialize, Serialize}; @@ -13,7 +13,6 @@ use std::{ pub type InboundMapping = (String, UnboundedSender>, Arc); pub struct MatchboxNetworkService<'a> { - pub logger: &'a Logger, pub inbound_mapping: &'a [InboundMapping], pub ecdsa_peer_id_to_matchbox_id: &'a Arc>>, @@ -33,8 +32,7 @@ impl<'a> MatchboxNetworkService<'a> { match event { P2p { peer_id: _ } => {} unknown => { - self.logger - .warn(format!("Unknown swarm event: {unknown:?}")); + warn!(format!("Unknown swarm event: {unknown:?}")); } } } diff --git a/sdk/src/network/setup.rs b/sdk/src/network/setup.rs index c6f61a4d..2f3be3f5 100644 --- a/sdk/src/network/setup.rs +++ b/sdk/src/network/setup.rs @@ -1,5 +1,4 @@ #![allow(unused_results, missing_docs)] -use crate::logger::Logger; #[cfg(not(target_family = "wasm"))] use crate::network::gossip::{ GossipHandle, IntraNodePayload, MyBehaviour, NetworkServiceWithoutSwarm, MAX_MESSAGE_SIZE, @@ -43,7 +42,6 @@ pub struct NetworkConfig { pub bind_ip: IpAddr, pub bind_port: u16, pub topics: Vec, - pub logger: Logger, } impl std::fmt::Debug for NetworkConfig { @@ -69,7 +67,6 @@ impl NetworkConfig { bind_ip: IpAddr, bind_port: u16, topics: Vec, - logger: Logger, ) -> Self { Self { identity, @@ -78,7 +75,6 @@ impl NetworkConfig { bind_ip, bind_port, topics, - logger, } } @@ -91,7 +87,6 @@ impl NetworkConfig { bind_ip: IpAddr, bind_port: u16, service_name: T, - logger: Logger, ) -> Self { Self::new( identity, @@ -100,7 +95,6 @@ impl NetworkConfig { bind_ip, bind_port, vec![service_name.into()], - logger, ) } } @@ -153,7 +147,6 @@ pub fn multiplexed_libp2p_network(config: NetworkConfig) -> NetworkResult { bind_ip, bind_port, topics, - logger, ecdsa_key, } = config; @@ -277,7 +270,6 @@ pub fn multiplexed_libp2p_network(config: NetworkConfig) -> NetworkResult { topic, tx_to_outbound: tx_to_outbound.clone(), rx_from_inbound: Arc::new(Mutex::new(inbound_rx)), - logger: logger.clone(), ecdsa_peer_id_to_libp2p_id: ecdsa_peer_id_to_libp2p_id.clone(), }, ); @@ -318,7 +310,6 @@ pub fn multiplexed_libp2p_network(config: NetworkConfig) -> NetworkResult { let span = tracing::debug_span!("network_worker"); let _enter = span.enter(); let service = NetworkServiceWithoutSwarm { - logger: &logger, inbound_mapping: &inbound_mapping, ecdsa_peer_id_to_libp2p_id, ecdsa_key: &ecdsa_key, diff --git a/sdk/src/tx/tangle.rs b/sdk/src/tx/tangle.rs index bb91156c..a211407b 100644 --- a/sdk/src/tx/tangle.rs +++ b/sdk/src/tx/tangle.rs @@ -1,4 +1,4 @@ -use crate::logger::Logger; +use crate::debug; /// Send a transaction to the Tangle network. /// @@ -10,7 +10,6 @@ pub async fn send( client: &subxt::OnlineClient, signer: &S, xt: &X, - logger: &Logger, ) -> Result, subxt::Error> where T: subxt::Config, @@ -19,23 +18,20 @@ where >::Params: Default, { if let Some(details) = xt.validation_details() { - logger.debug(format!( - "Calling {}.{}", - details.pallet_name, details.call_name - )); + debug!("Calling {}.{}", details.pallet_name, details.call_name); } - logger.debug("Waiting for the transaction to be included in a finalized block"); + debug!("Waiting for the transaction to be included in a finalized block"); let progress = client .tx() .sign_and_submit_then_watch_default(xt, signer) .await?; - logger.debug("Waiting for finalized success ..."); + debug!("Waiting for finalized success ..."); let result = progress.wait_for_finalized_success().await?; - logger.debug(format!( + debug!( "Transaction with hash: {:?} has been finalized", result.extrinsic_hash() - )); + ); Ok(result) }