From c1037b3752571de420fe161673629b4b61049169 Mon Sep 17 00:00:00 2001 From: Thomas Braun <38082993+tbraun96@users.noreply.github.com> Date: Wed, 20 Nov 2024 09:34:35 -0500 Subject: [PATCH 1/7] fix: race conditions in multiplexer (#486) * fix: network wrapper to use [0u8; 32] task hash * refactor: cleanup round_based_compat fix: don't poll newly created futures * fix: race conditions, out of order delivery * chore: cleanup --- Cargo.lock | 11 - Cargo.toml | 1 - sdk/Cargo.toml | 1 - sdk/src/event_listener/tangle/mod.rs | 2 +- sdk/src/network/mod.rs | 425 +++++++++++++++++++++----- sdk/src/network/round_based_compat.rs | 230 +++++++------- 6 files changed, 459 insertions(+), 211 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 881348d8..f118c458 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1754,16 +1754,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bincode2" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f49f6183038e081170ebbbadee6678966c7d54728938a3e7de7f4e780770318f" -dependencies = [ - "byteorder", - "serde", -] - [[package]] name = "bindgen" version = "0.69.5" @@ -4834,7 +4824,6 @@ dependencies = [ "auto_impl", "backon", "bincode", - "bincode2", "bollard", "clap", "color-eyre", diff --git a/Cargo.toml b/Cargo.toml index ac7a0c67..ad3d0e2b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -196,7 +196,6 @@ testcontainers = { version = "0.20.1" } # Symbiotic symbiotic-rs = { version = "0.1.0" } dashmap = "6.1.0" -bincode2 = "2.0.1" lru-mem = "0.3.0" [profile.dev.package.backtrace] diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index c2267046..9c111096 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -99,7 +99,6 @@ lru-mem = { workspace = true } sysinfo = { workspace = true } dashmap = { workspace = true } lazy_static = "1.5.0" -bincode2 = { workspace = true } color-eyre = { workspace = true } diff --git a/sdk/src/event_listener/tangle/mod.rs b/sdk/src/event_listener/tangle/mod.rs index d8ad0990..496bd897 100644 --- a/sdk/src/event_listener/tangle/mod.rs +++ b/sdk/src/event_listener/tangle/mod.rs @@ -176,7 +176,7 @@ impl .filter_map(|r| r.ok().and_then(E::try_decode)) .collect::>(); - crate::info!("Found {} possible events ...", events.len()); + crate::debug!("Found {} possible events ...", events.len()); self.enqueued_events = events; } } diff --git a/sdk/src/network/mod.rs b/sdk/src/network/mod.rs index 55d9a671..7066af2e 100644 --- a/sdk/src/network/mod.rs +++ b/sdk/src/network/mod.rs @@ -5,7 +5,9 @@ use dashmap::DashMap; use futures::{Stream, StreamExt}; use serde::{Deserialize, Serialize}; use sp_core::{ecdsa, sha2_256}; -use std::ops::Deref; +use std::cmp::Reverse; +use std::collections::{BinaryHeap, HashMap}; +use std::ops::{Deref, DerefMut}; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; @@ -24,31 +26,15 @@ pub mod setup; #[derive(Debug, Serialize, Deserialize, Clone, Copy, Default)] pub struct IdentifierInfo { - pub block_id: Option, - pub session_id: Option, - pub retry_id: Option, - pub task_id: Option, + pub message_id: u64, + pub round_id: u16, } impl Display for IdentifierInfo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let block_id = self - .block_id - .map(|id| format!("block_id: {}", id)) - .unwrap_or_default(); - let session_id = self - .session_id - .map(|id| format!("session_id: {}", id)) - .unwrap_or_default(); - let retry_id = self - .retry_id - .map(|id| format!("retry_id: {}", id)) - .unwrap_or_default(); - let task_id = self - .task_id - .map(|id| format!("task_id: {}", id)) - .unwrap_or_default(); - write!(f, "{} {} {} {}", block_id, session_id, retry_id, task_id) + let message_id = format!("message_id: {}", self.message_id); + let round_id = format!("round_id: {}", self.round_id); + write!(f, "{} {}", message_id, round_id) } } @@ -117,10 +103,49 @@ pub trait Network: Send + Sync + 'static { } } +#[derive(Debug, Serialize, Deserialize)] +struct SequencedMessage { + seq: u64, + payload: Vec, +} + +#[derive(Debug)] +struct PendingMessage { + seq: u64, + message: ProtocolMessage, +} + +impl PartialEq for PendingMessage { + fn eq(&self, other: &Self) -> bool { + self.seq == other.seq + } +} + +impl Eq for PendingMessage {} + +impl PartialOrd for PendingMessage { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Ord for PendingMessage { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.seq.cmp(&other.seq) + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct MultiplexedMessage { + stream_id: StreamKey, + payload: SequencedMessage, +} + pub struct NetworkMultiplexer { to_receiving_streams: ActiveStreams, unclaimed_receiving_streams: Arc>, tx_to_networking_layer: MultiplexedSender, + sequence_numbers: Arc>, } type ActiveStreams = Arc>>; @@ -179,16 +204,25 @@ impl Deref for MultiplexedReceiver { } } +impl DerefMut for MultiplexedReceiver { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } +} + impl Drop for MultiplexedReceiver { fn drop(&mut self) { let _ = self.active_streams.remove(&self.stream_id); } } -#[derive(Debug, Serialize, Deserialize)] -struct MultiplexedMessage { - payload: Vec, +// Since a single stream can be used for multiple users, and, multiple users assign seq's independently, +// we need to make a key that is unique for each (send->dest) pair and stream. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +struct CompoundStreamKey { stream_id: StreamKey, + send_user_id: UserID, + recv_user_id: Option, } impl NetworkMultiplexer { @@ -200,27 +234,50 @@ impl NetworkMultiplexer { unclaimed_receiving_streams: Arc::new(DashMap::new()), tx_to_networking_layer: MultiplexedSender { inner: tx_to_networking_layer, - stream_id: Default::default(), // Start with an arbitrary stream ID, this won't get used + stream_id: Default::default(), }, + sequence_numbers: Arc::new(DashMap::new()), }; let active_streams = this.to_receiving_streams.clone(); let unclaimed_streams = this.unclaimed_receiving_streams.clone(); let tx_to_networking_layer = this.tx_to_networking_layer.clone(); + let sequence_numbers = this.sequence_numbers.clone(); + drop(tokio::spawn(async move { let network_clone = &network; let task1 = async move { - while let Some((stream_id, proto_message)) = rx_from_substreams.recv().await { + while let Some((stream_id, msg)) = rx_from_substreams.recv().await { + let compound_key = CompoundStreamKey { + stream_id, + send_user_id: msg.sender.user_id, + recv_user_id: msg.recipient.as_ref().map(|p| p.user_id), + }; + + let mut seq = sequence_numbers.entry(compound_key).or_insert(0); + let current_seq = *seq; + *seq += 1; + + crate::trace!( + "SEND SEQ {current_seq} FROM {} | StreamKey: {:?}", + msg.sender.user_id, + hex::encode(bincode::serialize(&compound_key).unwrap()) + ); + let multiplexed_message = MultiplexedMessage { - payload: proto_message.payload, stream_id, + payload: SequencedMessage { + seq: current_seq, + payload: msg.payload, + }, }; + let message = ProtocolMessage { - identifier_info: proto_message.identifier_info, - sender: proto_message.sender, - recipient: proto_message.recipient, - payload: bincode2::serialize(&multiplexed_message) + identifier_info: msg.identifier_info, + sender: msg.sender, + recipient: msg.recipient, + payload: bincode::serialize(&multiplexed_message) .expect("Failed to serialize message"), }; @@ -232,32 +289,101 @@ impl NetworkMultiplexer { }; let task2 = async move { + let mut pending_messages: HashMap< + CompoundStreamKey, + BinaryHeap>, + > = Default::default(); + let mut expected_seqs: HashMap = Default::default(); + while let Some(mut msg) = network_clone.next_message().await { if let Ok(multiplexed_message) = - bincode2::deserialize::(&msg.payload) + bincode::deserialize::(&msg.payload) { let stream_id = multiplexed_message.stream_id; - msg.payload = multiplexed_message.payload; - // Two possibilities: the entry already exists, or, it doesn't and we need to enqueue + let compound_key = CompoundStreamKey { + stream_id, + send_user_id: msg.sender.user_id, + recv_user_id: msg.recipient.as_ref().map(|p| p.user_id), + }; + let seq = multiplexed_message.payload.seq; + msg.payload = multiplexed_message.payload.payload; + + // Get or create the pending heap for this stream + let pending = pending_messages.entry(compound_key).or_default(); + let expected_seq = expected_seqs.entry(compound_key).or_default(); + + let send_user = msg.sender.user_id; + let recv_user = msg + .recipient + .as_ref() + .map(|p| p.user_id as i32) + .unwrap_or(-1); + + let compound_key_hex = + hex::encode(bincode::serialize(&compound_key).unwrap()); + crate::trace!( + "RECV SEQ {seq} FROM {} as user {:?} | Expecting: {} | StreamKey: {:?}", + send_user, + recv_user, + *expected_seq, + compound_key_hex, + ); + + // Add the message to pending + pending.push(Reverse(PendingMessage { seq, message: msg })); + + // Try to deliver messages in order if let Some(active_receiver) = active_streams.get(&stream_id) { - if let Err(err) = active_receiver.send(msg) { - crate::error!(%err, "Failed to send message to receiver"); - // Delete entry since the receiver is dead - let _ = active_streams.remove(&stream_id); + while let Some(Reverse(PendingMessage { seq, message: _ })) = + pending.peek() + { + if *seq != *expected_seq { + break; + } + + crate::trace!("DELIVERING SEQ {seq} FROM {} as user {:?} | Expecting: {} | StreamKey: {:?}", send_user, recv_user, *expected_seq, compound_key_hex); + + *expected_seq += 1; + + let message = pending.pop().unwrap().0.message; + + if let Err(err) = active_receiver.send(message) { + crate::error!(%err, "Failed to send message to receiver"); + let _ = active_streams.remove(&stream_id); + break; + } } } else { - // Second possibility: the entry does not exist, and another substream is received for this task. - // In this case, reserve an entry locally and store the message in the unclaimed streams. Later, - // when the user attempts to open the substream with the same ID, the message will be sent to the user. let (tx, rx) = Self::create_multiplexed_stream_inner( tx_to_networking_layer.clone(), &active_streams, stream_id, ); - let _ = tx.send(msg); - //let _ = active_streams.insert(stream_id, tx); TX already passed into active_streams above + + // Deliver any pending messages in order + while let Some(Reverse(PendingMessage { seq, message: _ })) = + pending.peek() + { + if *seq != *expected_seq { + break; + } + + crate::warn!("EARLY DELIVERY SEQ {seq} FROM {} as user {:?} | Expecting: {} | StreamKey: {:?}", send_user, recv_user, *expected_seq, compound_key_hex); + + *expected_seq += 1; + + let message = pending.pop().unwrap().0.message; + + if let Err(err) = tx.send(message) { + crate::error!(%err, "Failed to send message to receiver"); + break; + } + } + let _ = unclaimed_streams.insert(stream_id, rx); } + } else { + crate::error!("Failed to deserialize message"); } } }; @@ -282,7 +408,7 @@ impl NetworkMultiplexer { tx_to_networking_layer.stream_id = id; return SubNetwork { tx: tx_to_networking_layer, - rx: unclaimed.1.into(), + rx: Some(unclaimed.1.into()), }; } @@ -292,7 +418,42 @@ impl NetworkMultiplexer { id, ); - SubNetwork { tx, rx: rx.into() } + SubNetwork { + tx, + rx: Some(rx.into()), + } + } + + /// Creates a subnetwork, and also forwards all messages to the given channel. The network cannot be used to + /// receive messages since the messages will be forwarded to the provided channel. + pub fn multiplex_with_forwarding( + &self, + id: impl Into, + forward_tx: tokio::sync::mpsc::UnboundedSender, + ) -> SubNetwork { + let mut network = self.multiplex(id); + let rx = network.rx.take().expect("Rx from network should be Some"); + let forwarding_task = async move { + let mut rx = rx.into_inner(); + while let Some(msg) = rx.recv().await { + crate::info!( + "Round {}: Received message from {} to {:?} (id: {})", + msg.identifier_info.round_id, + msg.sender.user_id, + msg.recipient.as_ref().map(|p| p.user_id), + msg.identifier_info.message_id, + ); + if let Err(err) = forward_tx.send(msg) { + crate::error!(%err, "Failed to forward message to network"); + // TODO: Add AtomicBool to make sending stop + break; + } + } + }; + + drop(tokio::spawn(forwarding_task)); + + network } fn create_multiplexed_stream_inner( @@ -327,7 +488,7 @@ impl From for NetworkMultiplexer { pub struct SubNetwork { tx: MultiplexedSender, - rx: Mutex, + rx: Option>, } impl SubNetwork { @@ -336,7 +497,7 @@ impl SubNetwork { } pub async fn recv(&self) -> Option { - self.rx.lock().await.next().await + self.rx.as_ref()?.lock().await.next().await } } @@ -351,14 +512,6 @@ impl Network for SubNetwork { } } -impl Stream for SubNetwork { - type Item = ProtocolMessage; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - Pin::new(self.rx.get_mut()).poll_next(cx) - } -} - pub fn deserialize<'a, T>(data: &'a [u8]) -> Result where T: Deserialize<'a>, @@ -409,7 +562,6 @@ mod tests { velocity: (u16, u16, u16), } - // NOTE: if you lower the number of nodes to 2, this test passes without issues. const NODE_COUNT: u16 = 10; pub fn setup_log() { @@ -492,17 +644,17 @@ mod tests { // used throughout the program must also use the multiplexer to prevent mixed messages. let multiplexer = NetworkMultiplexer::new(node); - let mut round1_network = multiplexer.multiplex(StreamKey { + let round1_network = multiplexer.multiplex(StreamKey { task_hash, // To differentiate between different instances of a running program (i.e., a task) round_id: 0, // To differentiate between different subsets of a running task }); - let mut round2_network = multiplexer.multiplex(StreamKey { + let round2_network = multiplexer.multiplex(StreamKey { task_hash, // To differentiate between different instances of a running program (i.e., a task) round_id: 1, // To differentiate between different subsets of a running task }); - let mut round3_network = multiplexer.multiplex(StreamKey { + let round3_network = multiplexer.multiplex(StreamKey { task_hash, // To differentiate between different instances of a running program (i.e., a task) round_id: 2, // To differentiate between different subsets of a running task }); @@ -519,10 +671,8 @@ mod tests { GossipHandle::build_protocol_message( IdentifierInfo { - block_id: None, - session_id: None, - retry_id: None, - task_id: None, + message_id: 0, + round_id: 0, }, i, None, @@ -539,7 +689,7 @@ mod tests { // Wait for all other nodes to send their messages let mut msgs = BTreeMap::new(); - while let Some(msg) = round1_network.next().await { + while let Some(msg) = round1_network.recv().await { let m = deserialize::(&msg.payload).unwrap(); crate::debug!(from = %msg.sender.user_id, ?m, "Received message"); // Expecting Round1 message @@ -553,7 +703,7 @@ mod tests { assert!( old.is_none(), "Duplicate message from node {}", - msg.sender.user_id + msg.sender.user_id, ); // Break if all messages are received if msgs.len() == usize::from(NODE_COUNT) - 1 { @@ -573,10 +723,8 @@ mod tests { .map(|j| { GossipHandle::build_protocol_message( IdentifierInfo { - block_id: None, - session_id: None, - retry_id: None, - task_id: None, + message_id: 0, + round_id: 0, }, i, Some(j), @@ -596,7 +744,7 @@ mod tests { // Wait for all other nodes to send their messages let mut msgs = BTreeMap::new(); - while let Some(msg) = round2_network.next().await { + while let Some(msg) = round2_network.recv().await { let m = deserialize::(&msg.payload).unwrap(); crate::debug!(from = %msg.sender.user_id, ?m, "Received message"); // Expecting Round2 message @@ -610,7 +758,7 @@ mod tests { assert!( old.is_none(), "Duplicate message from node {}", - msg.sender.user_id + msg.sender.user_id, ); // Break if all messages are received if msgs.len() == usize::from(NODE_COUNT) - 1 { @@ -628,10 +776,8 @@ mod tests { }; GossipHandle::build_protocol_message( IdentifierInfo { - block_id: None, - session_id: None, - retry_id: None, - task_id: None, + message_id: 0, + round_id: 0, }, i, None, @@ -646,7 +792,7 @@ mod tests { // Wait for all other nodes to send their messages let mut msgs = BTreeMap::new(); - while let Some(msg) = round3_network.next().await { + while let Some(msg) = round3_network.recv().await { let m = deserialize::(&msg.payload).unwrap(); crate::debug!(from = %msg.sender.user_id, ?m, "Received message"); // Expecting Round3 message @@ -660,7 +806,7 @@ mod tests { assert!( old.is_none(), "Duplicate message from node {}", - msg.sender.user_id + msg.sender.user_id, ); // Break if all messages are received if msgs.len() == usize::from(NODE_COUNT) - 1 { @@ -674,18 +820,133 @@ mod tests { Ok(()) } - fn node() -> gossip::GossipHandle { + fn node_with_id() -> (gossip::GossipHandle, ecdsa::Pair) { let identity = libp2p::identity::Keypair::generate_ed25519(); let ecdsa_key = sp_core::ecdsa::Pair::generate().0; let bind_port = 0; - setup::start_p2p_network(setup::NetworkConfig::new_service_network( + let handle = setup::start_p2p_network(setup::NetworkConfig::new_service_network( identity, - ecdsa_key, + ecdsa_key.clone(), Default::default(), bind_port, TOPIC, )) - .unwrap() + .unwrap(); + + (handle, ecdsa_key) + } + + fn node() -> gossip::GossipHandle { + node_with_id().0 + } + + #[tokio::test(flavor = "multi_thread")] + async fn test_stress_test_multiplexer() { + setup_log(); + crate::info!("Starting test_stress_test_multiplexer"); + + let (network0, id0) = node_with_id(); + let (network1, id1) = node_with_id(); + let mut networks = vec![network0, network1]; + + wait_for_nodes_connected(&networks).await; + + let (network0, network1) = (networks.remove(0), networks.remove(0)); + + let public0 = id0.public(); + let public1 = id1.public(); + + let multiplexer0 = NetworkMultiplexer::new(network0); + let multiplexer1 = NetworkMultiplexer::new(network1); + + let stream_key = StreamKey { + task_hash: sha2_256(&[255u8]), + round_id: 100, + }; + + let sub0 = multiplexer0.multiplex(stream_key); + let sub1 = multiplexer1.multiplex(stream_key); + + const MESSAGE_COUNT: u64 = 100; + + #[derive(Serialize, Deserialize)] + struct StressTestPayload { + value: u64, + } + + let handle0 = tokio::spawn(async move { + let sub0 = &sub0; + + let recv_task = async move { + let mut count = 0; + while let Some(msg) = sub0.next_message().await { + assert_eq!(msg.sender.user_id, 1, "Bad sender"); + assert_eq!(msg.recipient.unwrap().user_id, 0, "Bad recipient"); + + let number: StressTestPayload = deserialize(&msg.payload).unwrap(); + assert_eq!(number.value, count, "Bad message order"); + count += 1; + + if count == MESSAGE_COUNT { + break; + } + } + }; + + let send_task = async move { + for i in 0..MESSAGE_COUNT { + let msg = GossipHandle::build_protocol_message( + IdentifierInfo::default(), + 0, + Some(1), + &StressTestPayload { value: i }, + Some(public0), + Some(public1), + ); + sub0.send(msg).unwrap(); + } + }; + + tokio::join!(recv_task, send_task) + }); + + let handle1 = tokio::spawn(async move { + let sub1 = &sub1; + + let recv_task = async move { + let mut count = 0; + while let Some(msg) = sub1.next_message().await { + assert_eq!(msg.sender.user_id, 0, "Bad sender"); + assert_eq!(msg.recipient.unwrap().user_id, 1, "Bad recipient"); + let number: StressTestPayload = deserialize(&msg.payload).unwrap(); + assert_eq!(number.value, count, "Bad message order"); + count += 1; + + if count == MESSAGE_COUNT { + break; + } + } + }; + + let send_task = async move { + for i in 0..MESSAGE_COUNT { + let msg = GossipHandle::build_protocol_message( + IdentifierInfo::default(), + 1, + Some(0), + &StressTestPayload { value: i }, + Some(public1), + Some(public0), + ); + sub1.send(msg).unwrap(); + } + }; + + tokio::join!(recv_task, send_task) + }); + + // Wait for all tasks to complete + tokio::try_join!(handle0, handle1).unwrap(); } #[tokio::test(flavor = "multi_thread")] diff --git a/sdk/src/network/round_based_compat.rs b/sdk/src/network/round_based_compat.rs index d2872f19..012e6931 100644 --- a/sdk/src/network/round_based_compat.rs +++ b/sdk/src/network/round_based_compat.rs @@ -5,89 +5,88 @@ use std::collections::{BTreeMap, HashMap, VecDeque}; use std::sync::Arc; use crate::futures::prelude::*; -use crate::network::{self, IdentifierInfo, Network, NetworkMultiplexer, StreamKey, SubNetwork}; +use crate::network::{IdentifierInfo, NetworkMultiplexer, ProtocolMessage, StreamKey, SubNetwork}; use crate::subxt_core::ext::sp_core::ecdsa; -use round_based::{Delivery, Incoming, Outgoing}; -use round_based::{MessageDestination, MessageType, MsgId, PartyIndex}; +use round_based::{Delivery, Incoming, MessageType, Outgoing}; +use round_based::{MessageDestination, MsgId, PartyIndex}; use stream::{SplitSink, SplitStream}; -pub struct NetworkDeliveryWrapper { +use super::ParticipantInfo; + +pub struct NetworkDeliveryWrapper { /// The wrapped network implementation. - network: NetworkWrapper, + network: NetworkWrapper, } -impl NetworkDeliveryWrapper +impl NetworkDeliveryWrapper where - N: Network + Unpin, M: Clone + Send + Unpin + 'static, - M: serde::Serialize, - M: serde::de::DeserializeOwned, + M: serde::Serialize + serde::de::DeserializeOwned, { /// Create a new NetworkDeliveryWrapper over a network implementation with the given party index. pub fn new( - network: N, + mux: Arc, i: PartyIndex, task_hash: [u8; 32], parties: BTreeMap, ) -> Self { - let mux = NetworkMultiplexer::new(network); - // By default, we create 4 substreams for each party. - let sub_streams = (1..5) - .map(|i| { - let key = StreamKey { - // This is a dummy task hash, it should be replaced with the actual task hash - task_hash: [0u8; 32], - round_id: i, - }; - let substream = mux.multiplex(key); - (key, substream) - }) - .collect(); + let (tx_forward, rx) = tokio::sync::mpsc::unbounded_channel(); + // By default, we create 10 substreams for each party. + let mut sub_streams = HashMap::new(); + for x in 0..10 { + let key = StreamKey { + task_hash, + round_id: x, + }; + // Creates a multiplexed subnetwork, and also forwards all messages to the given channel + let _ = sub_streams.insert(key, mux.multiplex_with_forwarding(key, tx_forward.clone())); + } + let network = NetworkWrapper { me: i, mux, incoming_queue: VecDeque::new(), - outgoing_queue: VecDeque::new(), sub_streams, participants: parties, task_hash, + tx_forward, + rx, next_msg_id: Arc::new(NextMessageId::default()), - _network: core::marker::PhantomData, }; + NetworkDeliveryWrapper { network } } } /// A NetworkWrapper wraps a network implementation and implements [`Stream`] and [`Sink`] for /// it. -pub struct NetworkWrapper { +pub struct NetworkWrapper { /// The current party index. me: PartyIndex, /// Our network Multiplexer. - mux: NetworkMultiplexer, + mux: Arc, /// A Map of substreams for each round. - sub_streams: HashMap, + sub_streams: HashMap, //HashMap, /// A queue of incoming messages. + #[allow(dead_code)] incoming_queue: VecDeque>, - /// A queue of outgoing messages. - outgoing_queue: VecDeque>, /// Participants in the network with their corresponding ECDSA public keys. // Note: This is a BTreeMap to ensure that the participants are sorted by their party index. participants: BTreeMap, next_msg_id: Arc, + tx_forward: tokio::sync::mpsc::UnboundedSender, + rx: tokio::sync::mpsc::UnboundedReceiver, task_hash: [u8; 32], - _network: core::marker::PhantomData, } -impl Delivery for NetworkDeliveryWrapper +impl Delivery for NetworkDeliveryWrapper where - N: Network + Unpin, M: Clone + Send + Unpin + 'static, M: serde::Serialize + serde::de::DeserializeOwned, M: round_based::ProtocolMessage, { - type Send = SplitSink, Outgoing>; - type Receive = SplitStream>; + type Send = SplitSink, Outgoing>; + type Receive = SplitStream>; type SendError = crate::Error; type ReceiveError = crate::Error; @@ -97,61 +96,46 @@ where } } -impl Stream for NetworkWrapper +impl Stream for NetworkWrapper where - N: Network + Unpin, M: serde::de::DeserializeOwned + Unpin, M: round_based::ProtocolMessage, { type Item = Result, crate::Error>; fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let sub_streams = self.sub_streams.values(); - // pull all substreams - let mut messages = Vec::new(); - for sub_stream in sub_streams { - let p = sub_stream.next_message().poll_unpin(cx); - let m = match p { - Poll::Ready(Some(msg)) => msg, - _ => continue, + let res = ready!(self.get_mut().rx.poll_recv(cx)); + if let Some(res) = res { + let msg_type = if res.recipient.is_some() { + MessageType::P2P + } else { + MessageType::Broadcast }; - let msg = network::deserialize::(&m.payload)?; - messages.push((m.sender.user_id, m.recipient, msg)); - } - // Sort the incoming messages by round. - messages.sort_by_key(|(_, _, msg)| msg.round()); + let id = res.identifier_info.message_id; - let this = self.get_mut(); - // Push all messages to the incoming queue - messages - .into_iter() - .map(|(sender, recipient, msg)| Incoming { - id: this.next_msg_id.next(), - sender, - msg_type: match recipient { - Some(_) => MessageType::P2P, - None => MessageType::Broadcast, - }, + let msg = match bincode::deserialize(&res.payload) { + Ok(msg) => msg, + Err(err) => { + crate::error!(%err, "Failed to deserialize message"); + return Poll::Ready(Some(Err(crate::Error::Other(err.to_string())))); + } + }; + + Poll::Ready(Some(Ok(Incoming { msg, - }) - .for_each(|m| this.incoming_queue.push_back(m)); - // Reorder the incoming queue by round message. - let maybe_msg = this.incoming_queue.pop_front(); - if let Some(msg) = maybe_msg { - Poll::Ready(Some(Ok(msg))) + sender: res.sender.user_id, + id, + msg_type, + }))) } else { - // No message in the queue, and no message in the substreams. - // Tell the network to wake us up when a new message arrives. - cx.waker().wake_by_ref(); - Poll::Pending + Poll::Ready(None) } } } -impl Sink> for NetworkWrapper +impl Sink> for NetworkWrapper where - N: Network + Unpin, M: Unpin + serde::Serialize, M: round_based::ProtocolMessage, { @@ -161,48 +145,64 @@ where Poll::Ready(Ok(())) } - fn start_send(self: Pin<&mut Self>, msg: Outgoing) -> Result<(), Self::Error> { - self.get_mut().outgoing_queue.push_back(msg); - Ok(()) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - // Dequeue all messages and send them one by one to the network + fn start_send(self: Pin<&mut Self>, out: Outgoing) -> Result<(), Self::Error> { let this = self.get_mut(); - while let Some(out) = this.outgoing_queue.pop_front() { - // Get the substream to send the message to. - let key = StreamKey { - task_hash: this.task_hash, - round_id: i32::from(out.msg.round()), - }; - let substream = this - .sub_streams - .entry(key) - .or_insert_with(|| this.mux.multiplex(key)); - let identifier_info = IdentifierInfo { - block_id: None, - session_id: None, - retry_id: None, - task_id: None, - }; - let (to, to_network_id) = match out.recipient { - MessageDestination::AllParties => (None, None), - MessageDestination::OneParty(p) => (Some(p), this.participants.get(&p).cloned()), - }; - let protocol_message = N::build_protocol_message( - identifier_info, - this.me, - to, - &out.msg, - this.participants.get(&this.me).cloned(), - to_network_id, - ); - let p = substream.send_message(protocol_message).poll_unpin(cx); - match ready!(p) { - Ok(()) => continue, - Err(e) => return Poll::Ready(Err(e)), - } + let id = this.next_msg_id.next(); + + let round_id = out.msg.round(); + + crate::info!( + "Round {}: Sending message from {} to {:?} (id: {})", + round_id, + this.me, + out.recipient, + id, + ); + + // Get the substream to send the message to. + let key = StreamKey { + task_hash: this.task_hash, + round_id: i32::from(round_id), + }; + let substream = this.sub_streams.entry(key).or_insert_with(|| { + this.mux + .multiplex_with_forwarding(key, this.tx_forward.clone()) + }); + + let identifier_info = IdentifierInfo { + message_id: id, + round_id, + }; + let (to, to_network_id) = match out.recipient { + MessageDestination::AllParties => (None, None), + MessageDestination::OneParty(p) => (Some(p), this.participants.get(&p).cloned()), + }; + + if matches!(out.recipient, MessageDestination::OneParty(_)) && to_network_id.is_none() { + crate::warn!("Recipient not found when required for {:?}", out.recipient); + return Err(crate::Error::Other("Recipient not found".to_string())); } + + let protocol_message = ProtocolMessage { + identifier_info, + sender: ParticipantInfo { + user_id: this.me, + ecdsa_key: this.participants.get(&this.me).cloned(), + }, + recipient: to.map(|user_id| ParticipantInfo { + user_id, + ecdsa_key: to_network_id, + }), + payload: bincode::serialize(&out.msg).expect("Should be able to serialize message"), + }; + + match substream.send(protocol_message) { + Ok(()) => Ok(()), + Err(e) => Err(e), + } + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { Poll::Ready(Ok(())) } @@ -215,7 +215,7 @@ where struct NextMessageId(AtomicU64); impl NextMessageId { - pub fn next(&self) -> MsgId { - self.0.fetch_add(1, core::sync::atomic::Ordering::Relaxed) + fn next(&self) -> MsgId { + self.0.fetch_add(1, std::sync::atomic::Ordering::Relaxed) } } From 9cc7c804eb394ee1afa52704e22671e6944f0102 Mon Sep 17 00:00:00 2001 From: Donovan Tjemmes <37707055+Tjemmmic@users.noreply.github.com> Date: Wed, 20 Nov 2024 11:05:55 -0600 Subject: [PATCH 2/7] fix: solve aggregator initialization race condition (#491) --- .../src/contexts/aggregator.rs | 10 ++++++++++ blueprints/incredible-squaring-eigenlayer/src/tests.rs | 6 +++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/blueprints/incredible-squaring-eigenlayer/src/contexts/aggregator.rs b/blueprints/incredible-squaring-eigenlayer/src/contexts/aggregator.rs index 7b039081..d79a67b1 100644 --- a/blueprints/incredible-squaring-eigenlayer/src/contexts/aggregator.rs +++ b/blueprints/incredible-squaring-eigenlayer/src/contexts/aggregator.rs @@ -291,6 +291,16 @@ impl AggregatorContext { let task_index = task_response.referenceTaskIndex; let task_response_digest = keccak256(TaskResponse::abi_encode(&task_response)); + // Check if we have the task initialized first + if !self.tasks.lock().await.contains_key(&task_index) { + info!( + "Task {} not yet initialized, caching response for later processing", + task_index + ); + self.response_cache.lock().await.push_back(resp); + return Ok(()); + } + if self .tasks_responses .lock() diff --git a/blueprints/incredible-squaring-eigenlayer/src/tests.rs b/blueprints/incredible-squaring-eigenlayer/src/tests.rs index c3f3d05d..a6d9a5fd 100644 --- a/blueprints/incredible-squaring-eigenlayer/src/tests.rs +++ b/blueprints/incredible-squaring-eigenlayer/src/tests.rs @@ -245,7 +245,8 @@ pub async fn setup_task_spawner( let quorums = Bytes::from(vec![0]); async move { loop { - tokio::time::sleep(std::time::Duration::from_millis(5000)).await; + // Increased delay to allow for proper task initialization + tokio::time::sleep(std::time::Duration::from_secs(10)).await; info!("Creating a new task..."); if get_receipt( @@ -270,6 +271,9 @@ pub async fn setup_task_spawner( info!("Updated operators for quorum..."); } + // Wait for task initialization to complete + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + tokio::process::Command::new("sh") .arg("-c") .arg(format!( From 851c73fbb31280046cb1676009a33affd086971c Mon Sep 17 00:00:00 2001 From: "webb-spider[bot]" <182531479+webb-spider[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 09:06:12 -0800 Subject: [PATCH 3/7] chore: release (#488) Co-authored-by: webb-spider[bot] <182531479+webb-spider[bot]@users.noreply.github.com> --- Cargo.lock | 10 +++++----- Cargo.toml | 8 ++++---- blueprint-manager/CHANGELOG.md | 6 ++++++ blueprint-manager/Cargo.toml | 2 +- blueprint-test-utils/CHANGELOG.md | 6 ++++++ blueprint-test-utils/Cargo.toml | 2 +- cli/CHANGELOG.md | 6 ++++++ cli/Cargo.toml | 2 +- macros/context-derive/CHANGELOG.md | 6 ++++++ macros/context-derive/Cargo.toml | 2 +- sdk/CHANGELOG.md | 10 ++++++++++ sdk/Cargo.toml | 2 +- 12 files changed, 48 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f118c458..48ac1b0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1963,7 +1963,7 @@ dependencies = [ [[package]] name = "blueprint-manager" -version = "0.1.2" +version = "0.1.3" dependencies = [ "async-trait", "auto_impl", @@ -2003,7 +2003,7 @@ dependencies = [ [[package]] name = "blueprint-test-utils" -version = "0.1.2" +version = "0.1.3" dependencies = [ "alloy-contract", "alloy-primitives 0.7.7", @@ -2256,7 +2256,7 @@ dependencies = [ [[package]] name = "cargo-tangle" -version = "0.2.4" +version = "0.3.0" dependencies = [ "alloy-json-abi", "alloy-network", @@ -4762,7 +4762,7 @@ dependencies = [ [[package]] name = "gadget-context-derive" -version = "0.2.1" +version = "0.2.2" dependencies = [ "alloy-network", "alloy-provider", @@ -4803,7 +4803,7 @@ dependencies = [ [[package]] name = "gadget-sdk" -version = "0.5.0" +version = "0.5.1" dependencies = [ "alloy-contract", "alloy-json-abi", diff --git a/Cargo.toml b/Cargo.toml index ad3d0e2b..2a844379 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -45,10 +45,10 @@ broken_intra_doc_links = "deny" [workspace.dependencies] gadget-io = { version = "0.0.5", path = "./gadget-io", default-features = false } -blueprint-manager = { version = "0.1.2", path = "./blueprint-manager" } +blueprint-manager = { version = "0.1.3", path = "./blueprint-manager" } blueprint-serde = { version = "0.2.0", path = "./blueprint-serde", package = "gadget-blueprint-serde" } blueprint-test-utils = { path = "./blueprint-test-utils" } -gadget-sdk = { path = "./sdk", default-features = false, version = "0.5.0" } +gadget-sdk = { path = "./sdk", default-features = false, version = "0.5.1" } incredible-squaring-blueprint = { path = "./blueprints/incredible-squaring", default-features = false, version = "0.1.1" } incredible-squaring-blueprint-eigenlayer = { path = "./blueprints/incredible-squaring-eigenlayer", default-features = false, version = "0.1.1" } @@ -56,10 +56,10 @@ incredible-squaring-blueprint-symbiotic = { path = "./blueprints/incredible-squa blueprint-examples = { path = "./blueprints/examples", default-features = false, version = "0.1.1" } gadget-blueprint-proc-macro = { path = "./macros/blueprint-proc-macro", default-features = false, version = "0.4.0" } gadget-blueprint-proc-macro-core = { path = "./macros/blueprint-proc-macro-core", default-features = false, version = "0.2.0" } -gadget-context-derive = { path = "./macros/context-derive", default-features = false, version = "0.2.1" } +gadget-context-derive = { path = "./macros/context-derive", default-features = false, version = "0.2.2" } blueprint-build-utils = { path = "./blueprint-build-utils", default-features = false, version = "0.1.0" } blueprint-metadata = { path = "./blueprint-metadata", default-features = false, version = "0.1.7" } -cargo-tangle = { path = "./cli", version = "0.2.4" } +cargo-tangle = { path = "./cli", version = "0.3.0" } cargo_metadata = { version = "0.18.1" } # Tangle-related dependencies diff --git a/blueprint-manager/CHANGELOG.md b/blueprint-manager/CHANGELOG.md index 2a616408..584341e6 100644 --- a/blueprint-manager/CHANGELOG.md +++ b/blueprint-manager/CHANGELOG.md @@ -7,6 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.1.3](https://github.com/tangle-network/gadget/compare/blueprint-manager-v0.1.2...blueprint-manager-v0.1.3) - 2024-11-20 + +### Other + +- updated the following local packages: gadget-sdk + ## [0.1.2](https://github.com/tangle-network/gadget/compare/blueprint-manager-v0.1.1...blueprint-manager-v0.1.2) - 2024-11-16 ### Other diff --git a/blueprint-manager/Cargo.toml b/blueprint-manager/Cargo.toml index 6e7d8248..2ee1f6a1 100644 --- a/blueprint-manager/Cargo.toml +++ b/blueprint-manager/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "blueprint-manager" -version = "0.1.2" +version = "0.1.3" description = "Tangle Blueprint manager and Runner" authors.workspace = true edition.workspace = true diff --git a/blueprint-test-utils/CHANGELOG.md b/blueprint-test-utils/CHANGELOG.md index 1cf1b3da..7b396105 100644 --- a/blueprint-test-utils/CHANGELOG.md +++ b/blueprint-test-utils/CHANGELOG.md @@ -7,6 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.1.3](https://github.com/tangle-network/gadget/compare/blueprint-test-utils-v0.1.2...blueprint-test-utils-v0.1.3) - 2024-11-20 + +### Other + +- updated the following local packages: gadget-sdk, cargo-tangle + ## [0.1.2](https://github.com/tangle-network/gadget/compare/blueprint-test-utils-v0.1.1...blueprint-test-utils-v0.1.2) - 2024-11-16 ### Added diff --git a/blueprint-test-utils/Cargo.toml b/blueprint-test-utils/Cargo.toml index 90409598..958c2d80 100644 --- a/blueprint-test-utils/Cargo.toml +++ b/blueprint-test-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "blueprint-test-utils" -version = "0.1.2" +version = "0.1.3" description = "Tangle Blueprint test utils" authors.workspace = true edition.workspace = true diff --git a/cli/CHANGELOG.md b/cli/CHANGELOG.md index b6ef143b..f01d88bf 100644 --- a/cli/CHANGELOG.md +++ b/cli/CHANGELOG.md @@ -7,6 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.3.0](https://github.com/tangle-network/gadget/compare/cargo-tangle-v0.2.4...cargo-tangle-v0.3.0) - 2024-11-20 + +### Added + +- *(cargo-tangle)* [**breaking**] use Soldeer for dependencies ([#487](https://github.com/tangle-network/gadget/pull/487)) + ## [0.2.4](https://github.com/tangle-network/gadget/compare/cargo-tangle-v0.2.3...cargo-tangle-v0.2.4) - 2024-11-16 ### Other diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 418bb664..64909b50 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "cargo-tangle" -version = "0.2.4" +version = "0.3.0" description = "A command-line tool to create and deploy blueprints on Tangle Network" authors.workspace = true edition.workspace = true diff --git a/macros/context-derive/CHANGELOG.md b/macros/context-derive/CHANGELOG.md index 7e435519..99cba0b2 100644 --- a/macros/context-derive/CHANGELOG.md +++ b/macros/context-derive/CHANGELOG.md @@ -7,6 +7,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.2.2](https://github.com/tangle-network/gadget/compare/gadget-context-derive-v0.2.1...gadget-context-derive-v0.2.2) - 2024-11-20 + +### Added + +- add more service ctx methods for Tangle ([#477](https://github.com/tangle-network/gadget/pull/477)) + ## [0.2.1](https://github.com/tangle-network/gadget/compare/gadget-context-derive-v0.2.0...gadget-context-derive-v0.2.1) - 2024-11-16 ### Added diff --git a/macros/context-derive/Cargo.toml b/macros/context-derive/Cargo.toml index bee33ec8..93e8997a 100644 --- a/macros/context-derive/Cargo.toml +++ b/macros/context-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "gadget-context-derive" -version = "0.2.1" +version = "0.2.2" authors.workspace = true edition.workspace = true license.workspace = true diff --git a/sdk/CHANGELOG.md b/sdk/CHANGELOG.md index 1fa9ba8e..bb91de3c 100644 --- a/sdk/CHANGELOG.md +++ b/sdk/CHANGELOG.md @@ -7,6 +7,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [0.5.1](https://github.com/tangle-network/gadget/compare/gadget-sdk-v0.5.0...gadget-sdk-v0.5.1) - 2024-11-20 + +### Added + +- add more service ctx methods for Tangle ([#477](https://github.com/tangle-network/gadget/pull/477)) + +### Fixed + +- race conditions in multiplexer ([#486](https://github.com/tangle-network/gadget/pull/486)) + ## [0.5.0](https://github.com/tangle-network/gadget/compare/gadget-sdk-v0.4.0...gadget-sdk-v0.5.0) - 2024-11-16 ### Added diff --git a/sdk/Cargo.toml b/sdk/Cargo.toml index 9c111096..54f9b5e0 100644 --- a/sdk/Cargo.toml +++ b/sdk/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "gadget-sdk" -version = "0.5.0" +version = "0.5.1" authors.workspace = true edition.workspace = true homepage.workspace = true From a2f2a4715fb1b7d9f92d6805ddc35efcf8dbb89b Mon Sep 17 00:00:00 2001 From: Serial <69764315+Serial-ATA@users.noreply.github.com> Date: Fri, 22 Nov 2024 10:54:44 -0500 Subject: [PATCH 4/7] fix(gadget-sdk): use the right field for container status --- sdk/src/docker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/src/docker.rs b/sdk/src/docker.rs index e600d713..33b532e1 100644 --- a/sdk/src/docker.rs +++ b/sdk/src/docker.rs @@ -459,7 +459,7 @@ impl<'a> Container<'a> { }); let containers = self.connection.list_containers(options).await?; - let Some(status) = &containers[0].status else { + let Some(status) = &containers[0].state else { return Ok(None); }; From 042718b9e9067857ad60fb3b0a15b7d89153613a Mon Sep 17 00:00:00 2001 From: Alex <69764315+Serial-ATA@users.noreply.github.com> Date: Tue, 26 Nov 2024 13:16:27 -0500 Subject: [PATCH 5/7] fix(gadget-blueprint-serde)!: handle bytes properly (#500) * fix(gadget-blueprint-serde)!: test `AccountId32` and `Bytes` serialization * fix(gadget-blueprint-proc-macro)!: recognize `ByteBuf` as `FieldType::Bytes` * feat(gadget-sdk): export ByteBuf * chore: fmt --- Cargo.lock | 1 + Cargo.toml | 1 + blueprint-serde/Cargo.toml | 6 +- blueprint-serde/src/de.rs | 115 +++----------------- blueprint-serde/src/lib.rs | 1 + blueprint-serde/src/ser.rs | 67 +----------- blueprint-serde/src/tests.rs | 121 ++++++++++++++++++++++ macros/blueprint-proc-macro/src/shared.rs | 9 +- sdk/src/lib.rs | 1 + 9 files changed, 152 insertions(+), 170 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 48ac1b0d..a4611c87 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4756,6 +4756,7 @@ version = "0.2.0" dependencies = [ "paste", "serde", + "serde_bytes", "serde_test", "tangle-subxt", ] diff --git a/Cargo.toml b/Cargo.toml index 2a844379..ef20b058 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -130,6 +130,7 @@ reqwest = "0.12.7" rustdoc-types = "0.31.0" schnorrkel = { version = "0.11.4", default-features = false, features = ["preaudit_deprecated", "getrandom"] } serde = { version = "1.0.208", default-features = false } +serde_bytes = { version = "0.11.15", default-features = false } serde_json = "1.0" serde_test = "1.0.177" sha2 = "0.10.8" diff --git a/blueprint-serde/Cargo.toml b/blueprint-serde/Cargo.toml index 4be99d22..13fc0431 100644 --- a/blueprint-serde/Cargo.toml +++ b/blueprint-serde/Cargo.toml @@ -11,6 +11,7 @@ repository.workspace = true [dependencies] paste.workspace = true serde.workspace = true +serde_bytes = { workspace = true, features = ["alloc"] } tangle-subxt.workspace = true [dev-dependencies] @@ -21,4 +22,7 @@ workspace = true [features] default = ["std"] -std = [] +std = [ + "serde/std", + "serde_bytes/std" +] diff --git a/blueprint-serde/src/de.rs b/blueprint-serde/src/de.rs index 87d0b12f..ffbd6cd8 100644 --- a/blueprint-serde/src/de.rs +++ b/blueprint-serde/src/de.rs @@ -1,10 +1,10 @@ use crate::error::{Error, Result, UnsupportedType}; use crate::Field; use alloc::collections::BTreeMap; -use alloc::string::String; +use alloc::string::{String, ToString}; use alloc::vec::Vec; -use serde::de; use serde::de::IntoDeserializer; +use serde::{de, forward_to_deserialize_any}; use tangle_subxt::subxt_core::utils::AccountId32; use tangle_subxt::tangle_testnet_runtime::api::runtime_types::bounded_collections::bounded_vec::BoundedVec; use tangle_subxt::tangle_testnet_runtime::api::runtime_types::tangle_primitives::services::field::BoundedString; @@ -16,24 +16,6 @@ use tangle_subxt::tangle_testnet_runtime::api::runtime_types::tangle_primitives: /// See [`crate::from_field`]. pub struct Deserializer(pub(crate) Field); -macro_rules! deserialize_primitive { - ($($t:ty => $pat:pat),+ $(,)?) => { - $( - paste::paste! { - fn [](self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - match self.0 { - $pat(value) => visitor.[](value), - _ => Err(self.invalid_type(&visitor)) - } - } - } - )+ - } -} - impl<'de> de::Deserializer<'de> for Deserializer { type Error = Error; @@ -56,25 +38,16 @@ impl<'de> de::Deserializer<'de> for Deserializer { let s = String::from_utf8(s.0 .0)?; visitor.visit_string(s) } - Field::Bytes(b) => visitor.visit_bytes(b.0.as_slice()), + Field::Bytes(b) => { + // Unless `deserialize_bytes` is explicitly called, assume a sequence is desired + de::value::SeqDeserializer::new(b.0.into_iter()).deserialize_any(visitor) + } Field::Array(seq) | Field::List(seq) => visit_seq(seq.0, visitor), Field::Struct(_, fields) => visit_struct(*fields, visitor), - Field::AccountId(a) => visitor.visit_bytes(a.0.as_slice()), + Field::AccountId(a) => visitor.visit_string(a.to_string()), } } - deserialize_primitive!( - bool => Field::Bool, - i8 => Field::Int8, - i16 => Field::Int16, - i32 => Field::Int32, - i64 => Field::Int64, - u8 => Field::Uint8, - u16 => Field::Uint16, - u32 => Field::Uint32, - u64 => Field::Uint64, - ); - fn deserialize_f32(self, _visitor: V) -> Result where V: de::Visitor<'de>, @@ -118,36 +91,6 @@ impl<'de> de::Deserializer<'de> for Deserializer { self.deserialize_string(visitor) } - fn deserialize_string(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - match self.0 { - Field::String(bound_string) => { - visitor.visit_string(String::from_utf8(bound_string.0 .0)?) - } - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_bytes(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_byte_buf(visitor) - } - - fn deserialize_byte_buf(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - match self.0 { - Field::Bytes(seq) => visitor.visit_byte_buf(seq.0), - Field::String(s) => visitor.visit_string(String::from_utf8(s.0 .0)?), - _ => Err(self.invalid_type(&visitor)), - } - } - fn deserialize_option(self, visitor: V) -> Result where V: de::Visitor<'de>, @@ -182,23 +125,6 @@ impl<'de> de::Deserializer<'de> for Deserializer { visitor.visit_newtype_struct(self) } - fn deserialize_seq(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - match self.0 { - Field::Array(seq) | Field::List(seq) => visit_seq(seq.0, visitor), - _ => Err(self.invalid_type(&visitor)), - } - } - - fn deserialize_tuple(self, _len: usize, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_seq(visitor) - } - fn deserialize_tuple_struct( self, _name: &'static str, @@ -228,21 +154,6 @@ impl<'de> de::Deserializer<'de> for Deserializer { Err(Error::UnsupportedType(UnsupportedType::Map)) } - fn deserialize_struct( - self, - _name: &'static str, - _fields: &'static [&'static str], - visitor: V, - ) -> Result - where - V: de::Visitor<'de>, - { - match self.0 { - Field::Struct(_name, fields) => visit_struct(*fields, visitor), - _ => Err(self.invalid_type(&visitor)), - } - } - fn deserialize_enum( self, _name: &'static str, @@ -261,13 +172,6 @@ impl<'de> de::Deserializer<'de> for Deserializer { } } - fn deserialize_identifier(self, visitor: V) -> Result - where - V: de::Visitor<'de>, - { - self.deserialize_str(visitor) - } - fn deserialize_ignored_any(self, visitor: V) -> Result where V: de::Visitor<'de>, @@ -275,6 +179,11 @@ impl<'de> de::Deserializer<'de> for Deserializer { drop(self); visitor.visit_unit() } + + forward_to_deserialize_any! { + bool u8 u16 u32 u64 i8 i16 i32 i64 string + bytes byte_buf seq struct identifier tuple + } } impl Deserializer { diff --git a/blueprint-serde/src/lib.rs b/blueprint-serde/src/lib.rs index c1df4d12..53270c8b 100644 --- a/blueprint-serde/src/lib.rs +++ b/blueprint-serde/src/lib.rs @@ -58,6 +58,7 @@ use tangle_subxt::subxt_core::utils::AccountId32; pub use tangle_subxt::tangle_testnet_runtime::api::runtime_types::tangle_primitives::services::field::Field; pub use tangle_subxt::tangle_testnet_runtime::api::runtime_types::bounded_collections::bounded_vec::BoundedVec; pub use ser::new_bounded_string; +pub use serde_bytes::ByteBuf; use error::Result; /// Derive a [`Field`] from an instance of type `S` diff --git a/blueprint-serde/src/ser.rs b/blueprint-serde/src/ser.rs index 04b02036..5099b3d8 100644 --- a/blueprint-serde/src/ser.rs +++ b/blueprint-serde/src/ser.rs @@ -21,10 +21,10 @@ impl<'a> serde::Serializer for &'a mut Serializer { type SerializeSeq = SerializeSeq<'a>; type SerializeTuple = Self::SerializeSeq; type SerializeTupleStruct = SerializeTupleStruct<'a>; - type SerializeTupleVariant = Self; - type SerializeMap = Self; + type SerializeTupleVariant = ser::Impossible; + type SerializeMap = ser::Impossible; type SerializeStruct = SerializeStruct<'a>; - type SerializeStructVariant = Self; + type SerializeStructVariant = ser::Impossible; fn serialize_bool(self, v: bool) -> Result { Ok(Field::Bool(v)) @@ -164,7 +164,7 @@ impl<'a> serde::Serializer for &'a mut Serializer { _variant: &'static str, _len: usize, ) -> Result { - Ok(self) + Err(Self::Error::UnsupportedType(UnsupportedType::NonUnitEnum)) } fn serialize_map(self, _len: Option) -> Result { @@ -187,7 +187,7 @@ impl<'a> serde::Serializer for &'a mut Serializer { _variant: &'static str, _len: usize, ) -> Result { - Ok(self) + Err(Self::Error::UnsupportedType(UnsupportedType::NonUnitEnum)) } fn is_human_readable(&self) -> bool { @@ -342,63 +342,6 @@ impl ser::SerializeStruct for SerializeStruct<'_> { } } -// === UNSUPPORTED TYPES === - -impl ser::SerializeTupleVariant for &mut Serializer { - type Ok = Field; - type Error = crate::error::Error; - - fn serialize_field(&mut self, _value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - Err(Self::Error::UnsupportedType(UnsupportedType::NonUnitEnum)) - } - - fn end(self) -> Result { - Err(Self::Error::UnsupportedType(UnsupportedType::NonUnitEnum)) - } -} - -impl ser::SerializeMap for &mut Serializer { - type Ok = Field; - type Error = crate::error::Error; - - fn serialize_key(&mut self, _key: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - Err(Self::Error::UnsupportedType(UnsupportedType::Map)) - } - - fn serialize_value(&mut self, _value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - Err(Self::Error::UnsupportedType(UnsupportedType::Map)) - } - - fn end(self) -> Result { - Err(Self::Error::UnsupportedType(UnsupportedType::Map)) - } -} - -impl ser::SerializeStructVariant for &mut Serializer { - type Ok = Field; - type Error = crate::error::Error; - - fn serialize_field(&mut self, _key: &'static str, _value: &T) -> Result<()> - where - T: ?Sized + Serialize, - { - Err(Self::Error::UnsupportedType(UnsupportedType::NonUnitEnum)) - } - - fn end(self) -> Result { - Err(Self::Error::UnsupportedType(UnsupportedType::NonUnitEnum)) - } -} - pub fn new_bounded_string(s: S) -> BoundedString where S: Into, diff --git a/blueprint-serde/src/tests.rs b/blueprint-serde/src/tests.rs index b14d6d01..200c01f1 100644 --- a/blueprint-serde/src/tests.rs +++ b/blueprint-serde/src/tests.rs @@ -456,6 +456,83 @@ mod sequences { use super::*; use alloc::vec::Vec; + fn expected_empty_bytes_field() -> Field { + Field::Bytes(BoundedVec(Vec::new())) + } + + #[test] + fn test_ser_bytes_empty() { + let bytes: serde_bytes::ByteBuf = serde_bytes::ByteBuf::from(Vec::new()); + + assert_ser_tokens(&bytes, &[Token::Bytes(&[])]); + + let field = to_field(&bytes).unwrap(); + assert_eq!(field, expected_empty_bytes_field()); + } + + #[test] + fn test_de_bytes_empty() { + let bytes: serde_bytes::ByteBuf = serde_bytes::ByteBuf::from(Vec::new()); + + assert_de_tokens(&bytes, &[Token::Bytes(&[])]); + + let bytes_de: serde_bytes::ByteBuf = from_field(expected_empty_bytes_field()).unwrap(); + assert_eq!(bytes, bytes_de); + } + + #[test] + fn test_de_bytes_seq_empty() { + let bytes: Vec = Vec::new(); + + assert_de_tokens(&bytes, &[Token::Seq { len: Some(0) }, Token::SeqEnd]); + + let bytes_de: Vec = from_field(expected_empty_bytes_field()).unwrap(); + assert_eq!(bytes, bytes_de); + } + + fn expected_bytes_field() -> Field { + Field::Bytes(BoundedVec(vec![1, 2, 3])) + } + + #[test] + fn test_ser_bytes() { + let bytes: serde_bytes::ByteBuf = serde_bytes::ByteBuf::from(vec![1, 2, 3]); + + assert_ser_tokens(&bytes, &[Token::Bytes(&[1, 2, 3])]); + + let field = to_field(&bytes).unwrap(); + assert_eq!(field, expected_bytes_field()); + } + + #[test] + fn test_de_bytes() { + let bytes: serde_bytes::ByteBuf = serde_bytes::ByteBuf::from(vec![1, 2, 3]); + + assert_de_tokens(&bytes, &[Token::Bytes(&[1, 2, 3])]); + + let bytes_de: serde_bytes::ByteBuf = from_field(expected_bytes_field()).unwrap(); + assert_eq!(bytes, bytes_de); + } + + #[test] + fn test_de_bytes_seq() { + let bytes: Vec = vec![1, 2, 3]; + + assert_de_tokens( + &bytes, + &[ + Token::Seq { len: Some(3) }, + Token::U8(1), + Token::U8(2), + Token::U8(3), + Token::SeqEnd, + ], + ); + + let bytes_de: Vec = from_field(expected_bytes_field()).unwrap(); + assert_eq!(bytes, bytes_de); + } + fn expected_vec_field() -> Field { Field::List(BoundedVec(vec![ Field::Uint32(1), @@ -641,3 +718,47 @@ mod sequences { assert_eq!(tuple, tuple_de); } } + +mod accountid32 { + use super::*; + use core::str::FromStr; + + fn expected_accountid32_field() -> Field { + Field::AccountId( + AccountId32::from_str("12bzRJfh7arnnfPPUZHeJUaE62QLEwhK48QnH9LXeK2m1iZU").unwrap(), + ) + } + + #[test] + #[should_panic = "assertion `left == right` failed"] // TODO: No way to differentiate, AccountId32 is serialized as a string. + fn test_ser_accountid32() { + let account_id: AccountId32 = + AccountId32::from_str("12bzRJfh7arnnfPPUZHeJUaE62QLEwhK48QnH9LXeK2m1iZU").unwrap(); + + assert_ser_tokens( + &account_id, + &[Token::Str( + "5DfhGyQdFobKM8NsWvEeAKk5EQQgYe9AydgJ7rMB6E1EqRzV", + )], + ); + + let field = to_field(account_id).unwrap(); + assert_eq!(field, expected_accountid32_field()); + } + + #[test] + fn test_de_accountid32() { + let account_id: AccountId32 = + AccountId32::from_str("12bzRJfh7arnnfPPUZHeJUaE62QLEwhK48QnH9LXeK2m1iZU").unwrap(); + + assert_de_tokens( + &account_id, + &[Token::Str( + "5DfhGyQdFobKM8NsWvEeAKk5EQQgYe9AydgJ7rMB6E1EqRzV", + )], + ); + + let account_id_de: AccountId32 = from_field(expected_accountid32_field()).unwrap(); + assert_eq!(account_id, account_id_de); + } +} diff --git a/macros/blueprint-proc-macro/src/shared.rs b/macros/blueprint-proc-macro/src/shared.rs index fd61df12..f8b8f840 100644 --- a/macros/blueprint-proc-macro/src/shared.rs +++ b/macros/blueprint-proc-macro/src/shared.rs @@ -88,6 +88,10 @@ pub fn path_to_field_type(path: &syn::Path) -> syn::Result { .last() .ok_or_else(|| syn::Error::new_spanned(path, "path must have at least one segment"))?; let ident = &seg.ident; + if ident == "ByteBuf" { + return Ok(FieldType::Bytes); + } + let args = &seg.arguments; match args { syn::PathArguments::None => { @@ -104,10 +108,7 @@ pub fn path_to_field_type(path: &syn::Path) -> syn::Result { let inner_arg = &inner.args[0]; if let syn::GenericArgument::Type(inner_ty) = inner_arg { let inner_type = type_to_field_type(inner_ty)?; - match inner_type.ty { - FieldType::Uint8 => Ok(FieldType::Bytes), - others => Ok(FieldType::List(Box::new(others))), - } + Ok(FieldType::List(Box::new(inner_type.ty))) } else { Err(syn::Error::new_spanned( inner_arg, diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index 7feadd15..e406de43 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -71,6 +71,7 @@ pub mod utils; // Re-exports pub use alloy_rpc_types; pub use async_trait; +pub use blueprint_serde::ByteBuf; pub use clap; pub use error::Error; pub use futures; From 071b7c030693f3fa6e6538e06f26c7f3f73d2616 Mon Sep 17 00:00:00 2001 From: Thomas Braun <38082993+tbraun96@users.noreply.github.com> Date: Tue, 26 Nov 2024 15:14:51 -0500 Subject: [PATCH 6/7] feat: add MPCContext derive + test utils refactor (#497) * feat: add MPCContext derive * feat: add mpc test macro refactor: cleanup contexts, test crate * fix: get syntax working with bls example * cleanup: address review --- Cargo.lock | 13 + Cargo.toml | 1 + blueprint-test-utils/Cargo.toml | 1 + blueprint-test-utils/src/anvil.rs | 13 + blueprint-test-utils/src/lib.rs | 455 ++---------------- blueprint-test-utils/src/mpc.rs | 101 ++++ blueprint-test-utils/src/tangle/mod.rs | 166 +++++++ .../src/{tangle.rs => tangle/node.rs} | 21 +- .../src/tangle/transactions.rs | 233 +++++++++ blueprint-test-utils/src/test_ext.rs | 26 +- blueprints/examples/src/eigen_context.rs | 2 +- blueprints/examples/src/services_context.rs | 4 +- .../src/contexts/aggregator.rs | 2 +- .../src/contexts/x_square.rs | 2 +- .../src/jobs/compute_x_square.rs | 2 +- macros/context-derive/Cargo.toml | 2 + macros/context-derive/src/eigenlayer.rs | 4 +- macros/context-derive/src/evm.rs | 2 +- macros/context-derive/src/keystore.rs | 4 +- macros/context-derive/src/lib.rs | 15 + macros/context-derive/src/mpc.rs | 158 ++++++ macros/context-derive/src/services.rs | 2 +- macros/context-derive/src/subxt.rs | 2 +- macros/context-derive/tests/tests.rs | 14 +- macros/context-derive/tests/ui/01_basic.rs | 24 - macros/context-derive/tests/ui/basic.rs | 98 ++++ ...03_generic_struct.rs => generic_struct.rs} | 12 +- ..._config_attr.rs => missing_config_attr.rs} | 2 +- ...attr.stderr => missing_config_attr.stderr} | 2 +- macros/context-derive/tests/ui/mod.rs | 3 + .../{05_not_a_struct.rs => not_a_struct.rs} | 2 +- ...ot_a_struct.stderr => not_a_struct.stderr} | 2 +- .../ui/{06_unit_struct.rs => unit_struct.rs} | 2 +- ..._unit_struct.stderr => unit_struct.stderr} | 2 +- ...02_unnamed_fields.rs => unnamed_fields.rs} | 12 +- sdk/src/contexts/eigenlayer.rs | 132 +++++ sdk/src/contexts/evm_provider.rs | 12 + sdk/src/contexts/gossip_network.rs | 5 + sdk/src/contexts/keystore.rs | 7 + sdk/src/contexts/mod.rs | 68 +++ sdk/src/contexts/mpc.rs | 63 +++ sdk/src/contexts/services.rs | 100 ++++ sdk/src/contexts/tangle_client.rs | 10 + sdk/src/ctx.rs | 307 ------------ sdk/src/lib.rs | 7 +- 45 files changed, 1303 insertions(+), 814 deletions(-) create mode 100644 blueprint-test-utils/src/mpc.rs create mode 100644 blueprint-test-utils/src/tangle/mod.rs rename blueprint-test-utils/src/{tangle.rs => tangle/node.rs} (94%) create mode 100644 blueprint-test-utils/src/tangle/transactions.rs create mode 100644 macros/context-derive/src/mpc.rs delete mode 100644 macros/context-derive/tests/ui/01_basic.rs create mode 100644 macros/context-derive/tests/ui/basic.rs rename macros/context-derive/tests/ui/{03_generic_struct.rs => generic_struct.rs} (66%) rename macros/context-derive/tests/ui/{04_missing_config_attr.rs => missing_config_attr.rs} (79%) rename macros/context-derive/tests/ui/{04_missing_config_attr.stderr => missing_config_attr.stderr} (80%) create mode 100644 macros/context-derive/tests/ui/mod.rs rename macros/context-derive/tests/ui/{05_not_a_struct.rs => not_a_struct.rs} (67%) rename macros/context-derive/tests/ui/{05_not_a_struct.stderr => not_a_struct.stderr} (74%) rename macros/context-derive/tests/ui/{06_unit_struct.rs => unit_struct.rs} (58%) rename macros/context-derive/tests/ui/{06_unit_struct.stderr => unit_struct.stderr} (76%) rename macros/context-derive/tests/ui/{02_unnamed_fields.rs => unnamed_fields.rs} (57%) create mode 100644 sdk/src/contexts/eigenlayer.rs create mode 100644 sdk/src/contexts/evm_provider.rs create mode 100644 sdk/src/contexts/gossip_network.rs create mode 100644 sdk/src/contexts/keystore.rs create mode 100644 sdk/src/contexts/mod.rs create mode 100644 sdk/src/contexts/mpc.rs create mode 100644 sdk/src/contexts/services.rs create mode 100644 sdk/src/contexts/tangle_client.rs delete mode 100644 sdk/src/ctx.rs diff --git a/Cargo.lock b/Cargo.lock index a4611c87..b0744cb5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2016,6 +2016,7 @@ dependencies = [ "blueprint-manager", "cargo-tangle", "cargo_metadata", + "cargo_toml", "color-eyre", "futures", "gadget-io", @@ -2298,6 +2299,16 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cargo_toml" +version = "0.20.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88da5a13c620b4ca0078845707ea9c3faf11edbc3ffd8497d11d686211cd1ac0" +dependencies = [ + "serde", + "toml", +] + [[package]] name = "cbor4ii" version = "0.3.3" @@ -4771,6 +4782,8 @@ dependencies = [ "gadget-sdk", "proc-macro2", "quote", + "round-based", + "serde", "syn 2.0.87", "trybuild", ] diff --git a/Cargo.toml b/Cargo.toml index ef20b058..b3705d0f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -157,6 +157,7 @@ lazy_static = "1.5.0" jsonrpc-core = "18.0.0" jsonrpc-http-server = "18.0.0" tempfile = "3.10.1" +cargo_toml = { version = "0.20.5" } alloy-primitives = "0.7.2" alloy-json-abi = "0.7.2" diff --git a/blueprint-test-utils/Cargo.toml b/blueprint-test-utils/Cargo.toml index 958c2d80..b9900b98 100644 --- a/blueprint-test-utils/Cargo.toml +++ b/blueprint-test-utils/Cargo.toml @@ -58,6 +58,7 @@ alloy-transport = { workspace = true } testcontainers = { workspace = true } uuid = { workspace = true, features = ["v4"] } tempfile = { workspace = true } +cargo_toml = { workspace = true } [dev-dependencies] cargo_metadata = { workspace = true } diff --git a/blueprint-test-utils/src/anvil.rs b/blueprint-test-utils/src/anvil.rs index 56773c6f..74bbff07 100644 --- a/blueprint-test-utils/src/anvil.rs +++ b/blueprint-test-utils/src/anvil.rs @@ -101,3 +101,16 @@ pub async fn mine_anvil_blocks(container: &ContainerAsync, n: u32) output.stdout_to_vec().await.unwrap(); assert_eq!(output.exit_code().await.unwrap().unwrap(), 0); } + +pub const ANVIL_PRIVATE_KEYS: [&str; 10] = [ + "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", + "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d", + "0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a", + "0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6", + "0x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a", + "0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba", + "0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e", + "0x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356", + "0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97", + "0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6", +]; diff --git a/blueprint-test-utils/src/lib.rs b/blueprint-test-utils/src/lib.rs index 0b500477..480bd8af 100644 --- a/blueprint-test-utils/src/lib.rs +++ b/blueprint-test-utils/src/lib.rs @@ -1,5 +1,5 @@ #![allow(unused_imports)] -use crate::test_ext::{ANVIL_PRIVATE_KEYS, NAME_IDS}; +use crate::test_ext::NAME_IDS; use api::services::events::JobResultSubmitted; use blueprint_manager::config::BlueprintManagerConfig; use blueprint_manager::executor::BlueprintManagerHandle; @@ -8,7 +8,7 @@ use gadget_sdk::clients::tangle::runtime::{TangleClient, TangleConfig}; use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api; use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::runtime_types; use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::runtime_types::sp_arithmetic::per_things::Percent; -use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::services::calls::types::call::{Args, Job}; +pub use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::services::calls::types::call::{Args, Job}; use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::services::calls::types::create_blueprint::Blueprint; use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::services::calls::types::register::{Preferences, RegistrationArgs}; use gadget_sdk::keystore; @@ -24,17 +24,17 @@ use std::net::IpAddr; use std::path::{Path, PathBuf}; use std::time::Duration; use alloy_primitives::hex; +use cargo_toml::Manifest; use color_eyre::eyre::eyre; use subxt::tx::{Signer, TxProgress}; use subxt::utils::AccountId32; use url::Url; use uuid::Uuid; -use gadget_sdk::{info, error}; +use gadget_sdk::{error, info}; +pub use cargo_tangle::deploy::Opts; pub use gadget_sdk::logging::setup_log; -use cargo_tangle::deploy::Opts; - pub type InputValue = runtime_types::tangle_primitives::services::field::Field; pub type OutputValue = runtime_types::tangle_primitives::services::field::Field; @@ -42,10 +42,16 @@ pub mod anvil; pub mod binding; pub mod eigenlayer_test_env; pub mod helpers; +pub mod mpc; pub mod symbiotic_test_env; pub mod sync; pub mod tangle; pub mod test_ext; +use anvil::ANVIL_PRIVATE_KEYS; +pub use gadget_sdk; +pub use gadget_sdk::ext::blueprint_serde::BoundedVec; +pub use tangle::transactions::{get_next_call_id, submit_job, wait_for_completion_of_tangle_job}; +pub use tempfile; pub type TestClient = TangleClient; @@ -358,419 +364,38 @@ pub fn inject_random_key>(keystore_path: P) -> color_eyre::Result Ok(()) } -pub async fn create_blueprint( - client: &TestClient, - account_id: &TanglePairSigner, - blueprint: Blueprint, -) -> Result<(), Box> { - let call = api::tx().services().create_blueprint(blueprint); - let res = client - .tx() - .sign_and_submit_then_watch_default(&call, account_id) - .await?; - wait_for_in_block_success(res).await?; - Ok(()) -} - -pub async fn join_delegators( - client: &TestClient, - account_id: &TanglePairSigner, -) -> Result<(), Box> { - info!("Joining delegators ..."); - let call_pre = api::tx() - .multi_asset_delegation() - .join_operators(1_000_000_000_000_000); - let res_pre = client - .tx() - .sign_and_submit_then_watch_default(&call_pre, account_id) - .await?; - - wait_for_in_block_success(res_pre).await?; - Ok(()) -} - -pub async fn register_blueprint( - client: &TestClient, - account_id: &TanglePairSigner, - blueprint_id: u64, - preferences: Preferences, - registration_args: RegistrationArgs, -) -> Result<(), Box> { - info!("Registering to blueprint {blueprint_id} to become an operator ..."); - let call = api::tx() - .services() - .register(blueprint_id, preferences, registration_args); - let res = client - .tx() - .sign_and_submit_then_watch_default(&call, account_id) - .await?; - wait_for_in_block_success(res).await?; - Ok(()) -} - -pub async fn submit_job( - client: &TestClient, - user: &TanglePairSigner, - service_id: u64, - job_type: Job, - job_params: Args, -) -> Result<(), Box> { - let call = api::tx().services().call(service_id, job_type, job_params); - let res = client - .tx() - .sign_and_submit_then_watch_default(&call, user) - .await?; - wait_for_in_block_success(res).await?; - Ok(()) -} - -/// Requests a service with a given blueprint. This is meant for testing, and will allow any node -/// to make a call to run a service, and will have all nodes running the service. -pub async fn request_service( - client: &TestClient, - user: &TanglePairSigner, - blueprint_id: u64, - test_nodes: Vec, -) -> Result<(), Box> { - let call = api::tx().services().request( - blueprint_id, - test_nodes.clone(), - test_nodes, - Default::default(), - vec![0], - 1000, - ); - let res = client - .tx() - .sign_and_submit_then_watch_default(&call, user) - .await?; - wait_for_in_block_success(res).await?; - Ok(()) -} - -pub async fn wait_for_in_block_success( - mut res: TxProgress, -) -> Result<(), Box> { - while let Some(Ok(event)) = res.next().await { - let Some(block) = event.as_in_block() else { - continue; - }; - block.wait_for_success().await?; - } - Ok(()) -} - -pub async fn wait_for_completion_of_tangle_job( - client: &TestClient, - service_id: u64, - call_id: u64, - required_count: usize, -) -> Result> { - let mut count = 0; - let mut blocks = client.blocks().subscribe_best().await?; - while let Some(Ok(block)) = blocks.next().await { - let events = block.events().await?; - let results = events.find::().collect::>(); - info!( - %service_id, - %call_id, - %required_count, - %count, - "Waiting for job completion. Found {} results ...", - results.len() - ); - for result in results { - match result { - Ok(result) => { - if result.service_id == service_id && result.call_id == call_id { - count += 1; - if count == required_count { - return Ok(result); - } - } - } - Err(err) => { - error!("Failed to get job result: {err}"); - } - } - } +/// Returns the output of "git rev-parse --show-toplevel" to get the root of the git repository as a PathBuf. +/// If it's not in a git repo, default to return the current directory +pub fn get_blueprint_base_dir() -> PathBuf { + let output = std::process::Command::new("git") + .arg("rev-parse") + .arg("--show-toplevel") + .output() + .expect("Failed to run git command"); + + if output.status.success() { + let path = std::str::from_utf8(&output.stdout) + .expect("Failed to convert output to string") + .trim(); + PathBuf::from(path) + } else { + std::env::current_dir().expect("Failed to get current directory") } - Err("Failed to get job result".into()) -} - -pub async fn get_next_blueprint_id(client: &TestClient) -> Result> { - let call = api::storage().services().next_blueprint_id(); - let res = client - .storage() - .at_latest() - .await? - .fetch_or_default(&call) - .await?; - Ok(res) } -pub async fn get_next_service_id(client: &TestClient) -> Result> { - let call = api::storage().services().next_instance_id(); - let res = client - .storage() - .at_latest() - .await? - .fetch_or_default(&call) - .await?; - Ok(res) -} - -pub async fn get_next_call_id(client: &TestClient) -> Result> { - let call = api::storage().services().next_job_call_id(); - let res = client - .storage() - .at_latest() - .await? - .fetch_or_default(&call) - .await?; - Ok(res) -} - -/// Approves a service request. This is meant for testing, and will always approve the request. -pub async fn approve_service( - client: &TestClient, - caller: &TanglePairSigner, - request_id: u64, - restaking_percent: u8, -) -> Result<(), Box> { - gadget_sdk::info!("Approving service request ..."); - let call = api::tx() - .services() - .approve(request_id, Percent(restaking_percent)); - let res = client - .tx() - .sign_and_submit_then_watch_default(&call, caller) - .await?; - res.wait_for_finalized_success().await?; - Ok(()) -} - -pub async fn get_next_request_id(client: &TestClient) -> Result> { - gadget_sdk::info!("Fetching next request ID ..."); - let next_request_id_addr = api::storage().services().next_service_request_id(); - let next_request_id = client - .storage() - .at_latest() - .await - .expect("Failed to fetch latest block") - .fetch_or_default(&next_request_id_addr) - .await - .expect("Failed to fetch next request ID"); - Ok(next_request_id) -} - -#[macro_export] -macro_rules! test_blueprint { - ( - $blueprint_path:expr, - $blueprint_name:expr, - $N:expr, - [$($input:expr),+], - [$($expected_output:expr),+] - ) => { - use $crate::{ - get_next_call_id, run_test_blueprint_manager, - submit_job, wait_for_completion_of_tangle_job, Opts, setup_log, - }; - - use $crate::test_ext::new_test_ext_blueprint_manager; - - #[tokio::test(flavor = "multi_thread")] - async fn test_externalities_standard() { - setup_log(); - let mut base_path = std::env::current_dir().expect("Failed to get current directory"); - - let tmp_dir = tempfile::TempDir::new().unwrap(); // Create a temporary directory for the keystores - let tmp_dir_path = format!("{}", tmp_dir.path().display()); - - base_path.push($blueprint_path); - base_path - .canonicalize() - .expect("File could not be normalized"); - - let manifest_path = base_path.join("Cargo.toml"); - - - let http_addr = "http://127.0.0.1:9944"; - let ws_addr = "ws://127.0.0.1:9944"; - - let opts = Opts { - pkg_name: Some($blueprint_name.to_string()), - http_rpc_url: http_addr.to_string(), - ws_rpc_url: ws_addr.to_string(), - manifest_path, - signer: None, - signer_evm: None, - }; - - new_test_ext_blueprint_manager::<$N, 1, String, _, _>( - tmp_dir_path, - opts, - run_test_blueprint_manager, - ) - .await - .execute_with_async(move |client, handles, blueprint| async move { - let keypair = handles[0].sr25519_id().clone(); - let selected_service = &blueprint.services[0]; - let service_id = selected_service.id; - let call_id = get_next_call_id(client) - .await - .expect("Failed to get next job id"); - - info!( - "Submitting job with params service ID: {service_id}, call ID: {call_id}" - ); - - let mut job_args = Args::new(); - for input in [$($input),+] { - job_args.push(input); - } - - submit_job( - client, - &keypair, - service_id, - Job::from(call_id as u8), - job_args, - ) - .await - .expect("Failed to submit job"); - - let job_results = wait_for_completion_of_tangle_job(client, service_id, call_id, $N) - .await - .expect("Failed to wait for job completion"); - - assert_eq!(job_results.service_id, service_id); - assert_eq!(job_results.call_id, call_id); - - let expected_outputs = vec![$($expected_output),+]; - assert_eq!(job_results.result.len(), expected_outputs.len(), "Number of outputs doesn't match expected"); - - for (result, expected) in job_results.result.into_iter().zip(expected_outputs.into_iter()) { - assert_eq!(result, expected); - } - }) - .await - } - }; -} - -#[cfg(test)] -mod test_macros { - use super::*; - - test_blueprint!( - "./blueprints/incredible-squaring-eigen/", // Path to the blueprint's dir - "incredible-squaring-blueprint", // Name of the package - 5, // Number of nodes - [InputValue::Uint64(5)], - [OutputValue::Uint64(25)] // Expected output: each input squared - ); -} - -#[cfg(test)] -mod tests_standard { - use super::*; - use crate::test_ext::new_test_ext_blueprint_manager; - - use cargo_tangle::deploy::Opts; - use gadget_sdk::config::protocol::EigenlayerContractAddresses; - use gadget_sdk::config::Protocol; - use gadget_sdk::logging::setup_log; - use gadget_sdk::{error, info}; - use helpers::BlueprintProcessManager; - use std::sync::Arc; - use tokio::sync::Mutex; - - /// This test requires that `yarn install` has been executed inside the - /// `./blueprints/incredible-squaring/` directory - /// The other requirement is that there is a locally-running tangle node - #[tokio::test(flavor = "multi_thread")] - #[allow(clippy::needless_return)] - async fn test_externalities_gadget_starts() { - setup_log(); - let mut base_path = std::env::current_dir().expect("Failed to get current directory"); - let tmp_dir = tempfile::TempDir::new().unwrap(); // Create a temporary directory for the keystores - let tmp_dir_path = format!("{}", tmp_dir.path().display()); - - base_path.push("../blueprints/incredible-squaring"); - base_path - .canonicalize() - .expect("File could not be normalized"); - - let manifest_path = base_path.join("Cargo.toml"); - - let opts = Opts { - pkg_name: Some("incredible-squaring-blueprint".to_string()), - http_rpc_url: "http://127.0.0.1:9944".to_string(), - ws_rpc_url: "ws://127.0.0.1:9944".to_string(), - manifest_path, - signer: None, - signer_evm: None, - }; - // --ws-external - const INPUT: u64 = 10; - const OUTPUT: u64 = INPUT.pow(2); - - new_test_ext_blueprint_manager::<5, 1, String, _, _>( - tmp_dir_path, - opts, - run_test_blueprint_manager, +pub fn read_cargo_toml_file>(path: P) -> std::io::Result { + let manifest = cargo_toml::Manifest::from_path(path).map_err(|err| { + std::io::Error::new( + std::io::ErrorKind::Other, + format!("Failed to read Cargo.toml: {err}"), ) - .await - .execute_with_async(move |client, handles, blueprint| async move { - // At this point, blueprint has been deployed, every node has registered - // as an operator for the relevant services, and, all gadgets are running - - // What's left: Submit a job, wait for the job to finish, then assert the job results - let keypair = handles[0].sr25519_id().clone(); - let selected_service = &blueprint.services[0]; - let service_id = selected_service.id; - let call_id = get_next_call_id(client) - .await - .expect("Failed to get next job id") - .saturating_sub(1); - - info!("Submitting job with params service ID: {service_id}, call ID: {call_id}"); - - // Pass the arguments - let mut job_args = Args::new(); - let input = - api::runtime_types::tangle_primitives::services::field::Field::Uint64(INPUT); - job_args.push(input); - - // Next step: submit a job under that service/job id - if let Err(err) = submit_job( - client, - &keypair, - service_id, - Job::from(call_id as u8), - job_args, - ) - .await - { - error!("Failed to submit job: {err}"); - panic!("Failed to submit job: {err}"); - } - - // Step 2: wait for the job to complete - let job_results = - wait_for_completion_of_tangle_job(client, service_id, call_id, handles.len()) - .await - .expect("Failed to wait for job completion"); - - // Step 3: Get the job results, compare to expected value(s) - let expected_result = - api::runtime_types::tangle_primitives::services::field::Field::Uint64(OUTPUT); - assert_eq!(job_results.service_id, service_id); - assert_eq!(job_results.call_id, call_id); - assert_eq!(job_results.result[0], expected_result); - }) - .await + })?; + if manifest.package.is_none() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "No package section found in Cargo.toml", + )); } + + Ok(manifest) } diff --git a/blueprint-test-utils/src/mpc.rs b/blueprint-test-utils/src/mpc.rs new file mode 100644 index 00000000..5f95fd34 --- /dev/null +++ b/blueprint-test-utils/src/mpc.rs @@ -0,0 +1,101 @@ +#[macro_export] +macro_rules! mpc_generate_keygen_and_signing_tests { + ( + $blueprint_path:literal, + $N:tt, + $T:tt, + $keygen_job_id:tt, + [$($keygen_inputs:expr),+], + [$($expected_keygen_outputs:expr),*], + $signing_job_id:tt, + [$($signing_inputs:expr),+], + [$($expected_signing_outputs:expr),*], + $atomic_keygen_call_id_store:expr, + ) => { + $crate::tangle_blueprint_test_template!( + $blueprint_path, + $N, + |client, handles, blueprint| async move { + let keypair = handles[0].sr25519_id().clone(); + let service = &blueprint.services[$keygen_job_id as usize]; + + let service_id = service.id; + gadget_sdk::info!( + "Submitting KEYGEN job {} with service ID {service_id}", $keygen_job_id + ); + + let job_args = vec![$($keygen_inputs),+]; + + let job = submit_job( + client, + &keypair, + service_id, + Job::from($keygen_job_id as u8), + job_args, + ) + .await + .expect("Failed to submit job"); + + let keygen_call_id = job.call_id; + $atomic_keygen_call_id_store.store(keygen_call_id, std::sync::atomic::Ordering::Relaxed); + + gadget_sdk::info!( + "Submitted KEYGEN job {} with service ID {service_id} has call id {keygen_call_id}", $keygen_job_id, + ); + + let job_results = wait_for_completion_of_tangle_job(client, service_id, keygen_call_id, $T) + .await + .expect("Failed to wait for job completion"); + + assert_eq!(job_results.service_id, service_id); + assert_eq!(job_results.call_id, keygen_call_id); + + let expected_outputs = vec![$($expected_keygen_outputs),*]; + assert_eq!(job_results.result.len(), expected_outputs.len(), "Number of keygen outputs doesn't match expected"); + + for (result, expected) in job_results.result.into_iter().zip(expected_outputs.into_iter()) { + assert_eq!(result, expected); + } + + // ~~~~~ Now, run a signing job ~~~~~ + let service = &blueprint.services[$signing_job_id as usize]; + + let service_id = service.id; + gadget_sdk::info!( + "Submitting SIGNING job {} with service ID {service_id}", $signing_job_id + ); + + // Pass the arguments + let job_args = vec![$($signing_inputs),+]; + + let job = submit_job( + client, + &keypair, + service_id, + Job::from($signing_job_id as u8), + job_args, + ) + .await + .expect("Failed to submit job"); + + let signing_call_id = job.call_id; + + gadget_sdk::info!( + "Submitted SIGNING job {} with service ID {service_id} has call id {signing_call_id}", $keygen_job_id + ); + + let job_results = + wait_for_completion_of_tangle_job(client, service_id, signing_call_id, $T) + .await + .expect("Failed to wait for job completion"); + + let expected_outputs = vec![$($expected_signing_outputs),*]; + assert_eq!(job_results.result.len(), expected_outputs.len(), "Number of signing outputs doesn't match expected"); + + for (result, expected) in job_results.result.into_iter().zip(expected_outputs.into_iter()) { + assert_eq!(result, expected); + } + }, + ); + }; +} diff --git a/blueprint-test-utils/src/tangle/mod.rs b/blueprint-test-utils/src/tangle/mod.rs new file mode 100644 index 00000000..7985bad7 --- /dev/null +++ b/blueprint-test-utils/src/tangle/mod.rs @@ -0,0 +1,166 @@ +use crate::tangle::node::{Error, SubstrateNode, TANGLE_NODE_ENV}; + +pub mod node; +pub mod transactions; + +/// Run a Tangle node with the default settings. +/// The node will shut down when the returned handle is dropped. +pub fn run() -> Result { + let tangle_from_env = std::env::var(TANGLE_NODE_ENV).unwrap_or_else(|_| "tangle".to_string()); + let builder = SubstrateNode::builder() + .binary_paths([ + "../tangle/target/release/tangle", + "../../tangle/target/release/tangle", + &tangle_from_env, + ]) + .arg("validator") + .arg_val("rpc-cors", "all") + .arg_val("rpc-methods", "unsafe") + .arg("rpc-external") + .arg_val("sealing", "manual") + .clone(); + builder.spawn() +} + +#[macro_export] +/// A template that makes creating domain-specific macros for tangle-based blueprints easier +macro_rules! tangle_blueprint_test_template { + ( + $blueprint_path:expr, + $N:tt, + $test_logic:expr, + ) => { + pub use $crate::{ + run_test_blueprint_manager, + Opts, setup_log, + tangle, get_blueprint_base_dir, read_cargo_toml_file, + submit_job, wait_for_completion_of_tangle_job, Job, Args, + }; + + use $crate::test_ext::new_test_ext_blueprint_manager; + + #[tokio::test(flavor = "multi_thread")] + async fn test_blueprint() { + setup_log(); + let tangle_node = tangle::run().expect("Failed to start tangle node"); + let mut base_path = get_blueprint_base_dir(); + + let tmp_dir = $crate::tempfile::TempDir::new().unwrap(); + let tmp_dir_path = format!("{}", tmp_dir.path().display()); + + base_path.push($blueprint_path); + base_path + .canonicalize() + .expect("File could not be found/normalized"); + + let manifest_path = base_path.join("Cargo.toml"); + log::info!(target: "gadget", "Manifest path: {manifest_path:?}"); + let manifest = read_cargo_toml_file(&manifest_path).expect("Failed to read blueprint's Cargo.toml"); + let blueprint_name = manifest.package.as_ref().unwrap().name.clone(); + + let ws_port = tangle_node.ws_port(); + let http_rpc_url = format!("http://127.0.0.1:{ws_port}"); + let ws_rpc_url = format!("ws://127.0.0.1:{ws_port}"); + + let opts = Opts { + pkg_name: Some(blueprint_name), + http_rpc_url, + ws_rpc_url, + manifest_path, + signer: None, + signer_evm: None, + }; + + new_test_ext_blueprint_manager::<$N, 1, String, _, _>( + tmp_dir_path, + opts, + run_test_blueprint_manager, + ) + .await + .execute_with_async($test_logic) + .await + } + }; +} + +#[macro_export] +macro_rules! test_tangle_blueprint { + ( + $blueprint_path:expr, + $N:tt, + $T:tt, + $job_id:tt, + [$($inputs:expr),+], + [$($expected_output:expr),+] + ) => { + tangle_blueprint_test_template!( + $blueprint_path, + $N, + |client, handles, blueprint| async move { + let keypair = handles[0].sr25519_id().clone(); + let selected_service = &blueprint.services[$job_id]; + let service_id = selected_service.id; + + gadget_sdk::info!( + "Submitting job {} with service ID {service_id}", $job_id + ); + + let job_args = vec![$($inputs),+]; + + let job = submit_job( + client, + &keypair, + service_id, + Job::from($job_id as u8), + job_args, + ) + .await + .expect("Failed to submit job"); + + let call_id = job.call_id; + + gadget_sdk::info!( + "Submitted job {} with service ID {service_id} has call id {call_id}", $job_id + ); + + let job_results = wait_for_completion_of_tangle_job(client, service_id, call_id, $T) + .await + .expect("Failed to wait for job completion"); + + assert_eq!(job_results.service_id, service_id); + assert_eq!(job_results.call_id, call_id); + + let expected_outputs = vec![$($expected_output),+]; + assert_eq!(job_results.result.len(), expected_outputs.len(), "Number of outputs doesn't match expected"); + + for (result, expected) in job_results.result.into_iter().zip(expected_outputs.into_iter()) { + assert_eq!(result, expected); + } + }, + ); + }; + ( + $blueprint_path:expr, + $N:tt, + $job_id:tt, + [$($input:expr),+], + [$($expected_output:expr),+] + ) => { + test_tangle_blueprint!($blueprint_path, $N, $N, $job_id, [$($input),+], [$($expected_output),+]); + }; +} + +#[cfg(test)] +mod test_incredible_squaring { + use crate::{InputValue, OutputValue}; + + const KEYGEN_JOB_ID: usize = 0; + const N: usize = 5; + test_tangle_blueprint!( + "./blueprints/incredible-squaring/", // Path to the blueprint's dir relative to the git repo root, or, if not in a git repo, the current working directory + N, // Number of nodes + KEYGEN_JOB_ID, // Job ID + [InputValue::Uint64(5)], // Inputs + [OutputValue::Uint64(25)] // Expected output: input squared + ); +} diff --git a/blueprint-test-utils/src/tangle.rs b/blueprint-test-utils/src/tangle/node.rs similarity index 94% rename from blueprint-test-utils/src/tangle.rs rename to blueprint-test-utils/src/tangle/node.rs index f6511749..4fa8e71a 100644 --- a/blueprint-test-utils/src/tangle.rs +++ b/blueprint-test-utils/src/tangle/node.rs @@ -12,7 +12,7 @@ pub const TANGLE_NODE_ENV: &str = "TANGLE_NODE"; #[derive(Debug)] pub enum Error { - Io(std::io::Error), + Io(io::Error), CouldNotExtractPort(String), CouldNotExtractP2pAddress(String), CouldNotExtractP2pPort(String), @@ -108,6 +108,10 @@ impl SubstrateNodeBuilder { let path = String::from_utf8(path.stdout).expect("bad path"); let mut bin_path = OsString::new(); for binary_path in &self.binary_paths { + let binary_path = &std::path::absolute(binary_path) + .expect("bad path") + .into_os_string(); + log::info!(target: "gadget", "Trying to spawn binary at {:?}", binary_path); self.custom_flags .insert("base-path".into(), Some(path.clone().into())); @@ -369,18 +373,3 @@ impl SubstrateNodeInfo { .ok_or_else(|| Error::CouldNotExtractP2pPort(self.log.clone())) } } - -/// Run a Tangle node with the default settings. -/// The node will shut down when the returned handle is dropped. -pub fn run() -> Result { - let tangle_from_env = std::env::var(TANGLE_NODE_ENV).unwrap_or_else(|_| "tangle".to_string()); - let builder = SubstrateNode::builder() - .binary_paths(["../tangle/target/release/tangle", &tangle_from_env]) - .arg("validator") - .arg_val("rpc-cors", "all") - .arg_val("rpc-methods", "unsafe") - .arg("rpc-external") - .arg_val("sealing", "manual") - .clone(); - builder.spawn() -} diff --git a/blueprint-test-utils/src/tangle/transactions.rs b/blueprint-test-utils/src/tangle/transactions.rs new file mode 100644 index 00000000..7d2b4eae --- /dev/null +++ b/blueprint-test-utils/src/tangle/transactions.rs @@ -0,0 +1,233 @@ +use gadget_sdk::keystore::TanglePairSigner; +use std::error::Error; +use gadget_sdk::event_listener::tangle::AccountId32; +use gadget_sdk::{error, info}; +use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api; +use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::runtime_types::sp_arithmetic::per_things::Percent; +use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::services::calls::types::call::{Args, Job}; +use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::services::calls::types::create_blueprint::Blueprint; +use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::services::calls::types::register::{Preferences, RegistrationArgs}; +use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::services::events::{JobCalled, JobResultSubmitted}; +use subxt::tx::TxProgress; +use gadget_sdk::clients::tangle::runtime::TangleConfig; +use gadget_sdk::subxt_core::tx::signer::Signer; +use crate::TestClient; + +pub async fn create_blueprint( + client: &TestClient, + account_id: &TanglePairSigner, + blueprint: Blueprint, +) -> Result<(), Box> { + let call = api::tx().services().create_blueprint(blueprint); + let res = client + .tx() + .sign_and_submit_then_watch_default(&call, account_id) + .await?; + wait_for_in_block_success(res).await?; + Ok(()) +} + +pub async fn join_operators( + client: &TestClient, + account_id: &TanglePairSigner, +) -> Result<(), Box> { + info!("Joining delegators ..."); + let call_pre = api::tx() + .multi_asset_delegation() + .join_operators(1_000_000_000_000_000); + let res_pre = client + .tx() + .sign_and_submit_then_watch_default(&call_pre, account_id) + .await?; + + wait_for_in_block_success(res_pre).await?; + Ok(()) +} + +pub async fn register_blueprint( + client: &TestClient, + account_id: &TanglePairSigner, + blueprint_id: u64, + preferences: Preferences, + registration_args: RegistrationArgs, +) -> Result<(), Box> { + info!("Registering to blueprint {blueprint_id} to become an operator ..."); + let call = api::tx() + .services() + .register(blueprint_id, preferences, registration_args); + let res = client + .tx() + .sign_and_submit_then_watch_default(&call, account_id) + .await?; + wait_for_in_block_success(res).await?; + Ok(()) +} + +pub async fn submit_job( + client: &TestClient, + user: &TanglePairSigner, + service_id: u64, + job_id: Job, + job_params: Args, +) -> Result> { + let call = api::tx().services().call(service_id, job_id, job_params); + let events = client + .tx() + .sign_and_submit_then_watch_default(&call, user) + .await? + .wait_for_finalized_success() + .await?; + + let job_called_events = events.find::().collect::>(); + for job_called in job_called_events { + let job_called = job_called?; + if job_called.service_id == service_id + && job_called.job == job_id + && user.account_id() == job_called.caller + { + return Ok(job_called); + } + } + + Err("Failed to find JobCalled event".into()) +} + +/// Requests a service with a given blueprint. This is meant for testing, and will allow any node +/// to make a call to run a service, and will have all nodes running the service. +pub async fn request_service( + client: &TestClient, + user: &TanglePairSigner, + blueprint_id: u64, + test_nodes: Vec, +) -> Result<(), Box> { + let call = api::tx().services().request( + blueprint_id, + test_nodes.clone(), + test_nodes, + Default::default(), + vec![0], + 1000, + ); + let res = client + .tx() + .sign_and_submit_then_watch_default(&call, user) + .await?; + wait_for_in_block_success(res).await?; + Ok(()) +} + +pub async fn wait_for_in_block_success( + mut res: TxProgress, +) -> Result<(), Box> { + while let Some(Ok(event)) = res.next().await { + let Some(block) = event.as_in_block() else { + continue; + }; + block.wait_for_success().await?; + } + Ok(()) +} + +pub async fn wait_for_completion_of_tangle_job( + client: &TestClient, + service_id: u64, + call_id: u64, + required_count: usize, +) -> Result> { + let mut count = 0; + let mut blocks = client.blocks().subscribe_best().await?; + while let Some(Ok(block)) = blocks.next().await { + let events = block.events().await?; + let results = events.find::().collect::>(); + info!( + %service_id, + %call_id, + %required_count, + %count, + "Waiting for job completion. Found {} results ...", + results.len() + ); + for result in results { + match result { + Ok(result) => { + if result.service_id == service_id && result.call_id == call_id { + count += 1; + if count == required_count { + return Ok(result); + } + } + } + Err(err) => { + error!("Failed to get job result: {err}"); + } + } + } + } + Err("Failed to get job result".into()) +} + +pub async fn get_next_blueprint_id(client: &TestClient) -> Result> { + let call = api::storage().services().next_blueprint_id(); + let res = client + .storage() + .at_latest() + .await? + .fetch_or_default(&call) + .await?; + Ok(res) +} + +pub async fn get_next_service_id(client: &TestClient) -> Result> { + let call = api::storage().services().next_instance_id(); + let res = client + .storage() + .at_latest() + .await? + .fetch_or_default(&call) + .await?; + Ok(res) +} + +pub async fn get_next_call_id(client: &TestClient) -> Result> { + let call = api::storage().services().next_job_call_id(); + let res = client + .storage() + .at_latest() + .await? + .fetch_or_default(&call) + .await?; + Ok(res) +} + +/// Approves a service request. This is meant for testing, and will always approve the request. +pub async fn approve_service( + client: &TestClient, + caller: &TanglePairSigner, + request_id: u64, + restaking_percent: u8, +) -> Result<(), Box> { + info!("Approving service request ..."); + let call = api::tx() + .services() + .approve(request_id, Percent(restaking_percent)); + let res = client + .tx() + .sign_and_submit_then_watch_default(&call, caller) + .await?; + res.wait_for_finalized_success().await?; + Ok(()) +} + +pub async fn get_next_request_id(client: &TestClient) -> Result> { + info!("Fetching next request ID ..."); + let next_request_id_addr = api::storage().services().next_service_request_id(); + let next_request_id = client + .storage() + .at_latest() + .await + .expect("Failed to fetch latest block") + .fetch_or_default(&next_request_id_addr) + .await + .expect("Failed to fetch next request ID"); + Ok(next_request_id) +} diff --git a/blueprint-test-utils/src/test_ext.rs b/blueprint-test-utils/src/test_ext.rs index 1237c2dc..3ae44daa 100644 --- a/blueprint-test-utils/src/test_ext.rs +++ b/blueprint-test-utils/src/test_ext.rs @@ -39,21 +39,10 @@ use gadget_sdk::{error, info, warn}; use gadget_sdk::clients::tangle::services::{RpcServicesWithBlueprint, ServicesClient}; use gadget_sdk::subxt_core::config::Header; use gadget_sdk::utils::test_utils::get_client; +use crate::tangle::transactions; const LOCAL_BIND_ADDR: &str = "127.0.0.1"; pub const NAME_IDS: [&str; 5] = ["Alice", "Bob", "Charlie", "Dave", "Eve"]; -pub const ANVIL_PRIVATE_KEYS: [&str; 10] = [ - "0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80", - "0x59c6995e998f97a5a0044966f0945389dc9e86dae88c7a8412f4603b6b78690d", - "0x5de4111afa1a4b94908f83103eb1f1706367c2e68ca870fc3fb9a804cdab365a", - "0x7c852118294e51e653712a81e05800f419141751be58f605c371e15141b007a6", - "0x47e179ec197488593b187f80a00eb0da91f1b9d0b13f8733639f19c30a34926a", - "0x8b3a350cf5c34c9194ca85829a2df0ec3153be0318b5e2d3348e872092edffba", - "0x92db14e403b83dfe3df233f83dfa3a0d7096f21ca9b0d6d6b8d88b2b4ec1564e", - "0x4bbbf85ce3377467afe5d46f804f221813b2bb87f24d81f60f1fcdbf7cbf4356", - "0xdbda1821b80551c9d65939329250298aa3472ba22feea921c0cf5d620ea67b97", - "0x2a871d0798f97d79848a013d4936a73bf4cc922c825d33c1cf7073dff6d409c6", -]; /// - `N`: number of nodes /// - `K`: Number of networks accessible per node (should be equal to the number of services in a given blueprint) @@ -181,7 +170,7 @@ pub async fn new_test_ext_blueprint_manager< }, }; - if let Err(err) = super::join_delegators(&client, &keypair).await { + if let Err(err) = transactions::join_operators(&client, &keypair).await { let _span = handle.span().enter(); let err_str = format!("{err}"); @@ -193,7 +182,7 @@ pub async fn new_test_ext_blueprint_manager< } } - if let Err(err) = super::register_blueprint( + if let Err(err) = transactions::register_blueprint( &client, &keypair, blueprint_id, @@ -226,13 +215,14 @@ pub async fn new_test_ext_blueprint_manager< info!("Requesting service for blueprint ID {blueprint_id} using Alice's keys ..."); if let Err(err) = - super::request_service(&client, handles[0].sr25519_id(), blueprint_id, all_nodes).await + transactions::request_service(&client, handles[0].sr25519_id(), blueprint_id, all_nodes) + .await { error!("Failed to register service: {err}"); panic!("Failed to register service: {err}"); } - let next_request_id = super::get_next_request_id(&client) + let next_request_id = transactions::get_next_request_id(&client) .await .expect("Failed to get next request ID") .saturating_sub(1); @@ -244,7 +234,9 @@ pub async fn new_test_ext_blueprint_manager< let client = client.clone(); let task = async move { let keypair = handle.sr25519_id().clone(); - if let Err(err) = super::approve_service(&client, &keypair, next_request_id, 20).await { + if let Err(err) = + transactions::approve_service(&client, &keypair, next_request_id, 20).await + { let _span = handle.span().enter(); error!("Failed to approve service request {next_request_id}: {err}"); panic!("Failed to approve service request {next_request_id}: {err}"); diff --git a/blueprints/examples/src/eigen_context.rs b/blueprints/examples/src/eigen_context.rs index 92a9900d..85d5c78a 100644 --- a/blueprints/examples/src/eigen_context.rs +++ b/blueprints/examples/src/eigen_context.rs @@ -4,7 +4,7 @@ use gadget_sdk::event_listener::evm::contracts::EvmContractEventListener; use gadget_sdk::event_utils::InitializableEventHandler; use gadget_sdk::subxt_core::ext::sp_runtime::traits::Zero; use gadget_sdk::utils::evm::get_provider_http; -use gadget_sdk::{config::StdGadgetConfiguration, ctx::EigenlayerContext, job, load_abi}; +use gadget_sdk::{config::StdGadgetConfiguration, contexts::EigenlayerContext, job, load_abi}; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::env; diff --git a/blueprints/examples/src/services_context.rs b/blueprints/examples/src/services_context.rs index 7e1b295d..6ff4255e 100644 --- a/blueprints/examples/src/services_context.rs +++ b/blueprints/examples/src/services_context.rs @@ -1,11 +1,11 @@ -use gadget_sdk::ctx::TangleClientContext; +use gadget_sdk::config::StdGadgetConfiguration; +use gadget_sdk::contexts::{ServicesContext, TangleClientContext}; use gadget_sdk::event_listener::tangle::jobs::{services_post_processor, services_pre_processor}; use gadget_sdk::event_listener::tangle::TangleEventListener; use gadget_sdk::event_utils::InitializableEventHandler; use gadget_sdk::job; use gadget_sdk::subxt_core::utils::AccountId32; use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::services::events::JobCalled; -use gadget_sdk::{config::StdGadgetConfiguration, ctx::ServicesContext}; #[derive(Clone, ServicesContext, TangleClientContext)] pub struct ExampleServiceContext { diff --git a/blueprints/incredible-squaring-eigenlayer/src/contexts/aggregator.rs b/blueprints/incredible-squaring-eigenlayer/src/contexts/aggregator.rs index d79a67b1..dcde2585 100644 --- a/blueprints/incredible-squaring-eigenlayer/src/contexts/aggregator.rs +++ b/blueprints/incredible-squaring-eigenlayer/src/contexts/aggregator.rs @@ -15,7 +15,7 @@ use eigensdk::{ }; use gadget_sdk::{ config::StdGadgetConfiguration, - ctx::{EigenlayerContext, KeystoreContext}, + contexts::{EigenlayerContext, KeystoreContext}, debug, error, info, runners::{BackgroundService, RunnerError}, }; diff --git a/blueprints/incredible-squaring-eigenlayer/src/contexts/x_square.rs b/blueprints/incredible-squaring-eigenlayer/src/contexts/x_square.rs index 0011574b..1a216ca3 100644 --- a/blueprints/incredible-squaring-eigenlayer/src/contexts/x_square.rs +++ b/blueprints/incredible-squaring-eigenlayer/src/contexts/x_square.rs @@ -1,5 +1,5 @@ use crate::contexts::client::AggregatorClient; -use gadget_sdk::{config::StdGadgetConfiguration, ctx::KeystoreContext}; +use gadget_sdk::{config::StdGadgetConfiguration, contexts::KeystoreContext}; #[derive(Clone, KeystoreContext)] pub struct EigenSquareContext { diff --git a/blueprints/incredible-squaring-eigenlayer/src/jobs/compute_x_square.rs b/blueprints/incredible-squaring-eigenlayer/src/jobs/compute_x_square.rs index d4b57b53..ae05883b 100644 --- a/blueprints/incredible-squaring-eigenlayer/src/jobs/compute_x_square.rs +++ b/blueprints/incredible-squaring-eigenlayer/src/jobs/compute_x_square.rs @@ -8,7 +8,7 @@ use alloy_sol_types::SolType; use color_eyre::Result; use eigensdk::crypto_bls::BlsKeyPair; use eigensdk::crypto_bls::OperatorId; -use gadget_sdk::ctx::KeystoreContext; +use gadget_sdk::contexts::KeystoreContext; use gadget_sdk::event_listener::evm::contracts::EvmContractEventListener; use gadget_sdk::keystore::BackendExt; use gadget_sdk::{error, info, job}; diff --git a/macros/context-derive/Cargo.toml b/macros/context-derive/Cargo.toml index 93e8997a..7bf82ec2 100644 --- a/macros/context-derive/Cargo.toml +++ b/macros/context-derive/Cargo.toml @@ -27,6 +27,8 @@ gadget-sdk = { path = "../../sdk", features = ["std"] } alloy-network = { workspace = true } alloy-provider = { workspace = true } alloy-transport = { workspace = true } +round-based = { workspace = true } +serde = { workspace = true } [features] default = ["std"] diff --git a/macros/context-derive/src/eigenlayer.rs b/macros/context-derive/src/eigenlayer.rs index 82920ffe..c81734da 100644 --- a/macros/context-derive/src/eigenlayer.rs +++ b/macros/context-derive/src/eigenlayer.rs @@ -29,11 +29,11 @@ pub fn generate_context_impl( use eigensdk::client_elcontracts::reader::ELChainReader; use eigensdk::logging::get_test_logger; use gadget_sdk::utils::evm::get_slasher_address; - use gadget_sdk::ctx::BigInt; + use gadget_sdk::contexts::BigInt; use alloy_primitives::{U256, FixedBytes}; #[async_trait::async_trait] - impl #impl_generics gadget_sdk::ctx::EigenlayerContext for #name #ty_generics #where_clause { + impl #impl_generics gadget_sdk::contexts::EigenlayerContext for #name #ty_generics #where_clause { async fn avs_registry_reader(&self) -> Result { let http_rpc_endpoint = #field_access.http_rpc_endpoint.clone(); let gadget_sdk::config::ProtocolSpecificSettings::Eigenlayer(contract_addresses) = &#field_access.protocol_specific else { diff --git a/macros/context-derive/src/evm.rs b/macros/context-derive/src/evm.rs index 59eb8082..d6238101 100644 --- a/macros/context-derive/src/evm.rs +++ b/macros/context-derive/src/evm.rs @@ -20,7 +20,7 @@ pub fn generate_context_impl( let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); quote! { - impl #impl_generics gadget_sdk::ctx::EVMProviderContext for #name #ty_generics #where_clause { + impl #impl_generics gadget_sdk::contexts::EVMProviderContext for #name #ty_generics #where_clause { type Network = alloy_network::Ethereum; type Transport = alloy_transport::BoxTransport; type Provider = alloy_provider::fillers::FillProvider< diff --git a/macros/context-derive/src/keystore.rs b/macros/context-derive/src/keystore.rs index 689081c7..b0cddbfc 100644 --- a/macros/context-derive/src/keystore.rs +++ b/macros/context-derive/src/keystore.rs @@ -30,14 +30,14 @@ pub fn generate_context_impl( quote! { #[cfg(not(feature = "std"))] - impl #impl_generics_without_rwlock gadget_sdk::ctx::KeystoreContext for #name #ty_generics #where_clause_without_rwlock { + impl #impl_generics_without_rwlock gadget_sdk::contexts::KeystoreContext for #name #ty_generics #where_clause_without_rwlock { fn keystore(&self) -> Result, gadget_sdk::config::Error> { #field_access.keystore() } } #[cfg(feature = "std")] - impl #impl_generics gadget_sdk::ctx::KeystoreContext for #name #ty_generics #where_clause { + impl #impl_generics gadget_sdk::contexts::KeystoreContext for #name #ty_generics #where_clause { fn keystore(&self) -> Result, gadget_sdk::config::Error> { #field_access.keystore() } diff --git a/macros/context-derive/src/lib.rs b/macros/context-derive/src/lib.rs index 6bf0263a..04a41650 100644 --- a/macros/context-derive/src/lib.rs +++ b/macros/context-derive/src/lib.rs @@ -19,6 +19,8 @@ mod eigenlayer; mod evm; /// Keystore context extension implementation. mod keystore; +/// MPC context extension implementation. +mod mpc; /// Services context extension implementation. mod services; /// Tangle Subxt Client context extension implementation. @@ -88,3 +90,16 @@ pub fn derive_eigenlayer_context(input: TokenStream) -> TokenStream { Err(err) => TokenStream::from(err.to_compile_error()), } } + +/// Derive macro for generating Context Extensions trait implementation for `MPCContext`. +#[proc_macro_derive(MPCContext, attributes(config))] +pub fn derive_mpc_context(input: TokenStream) -> TokenStream { + let input = syn::parse_macro_input!(input as syn::DeriveInput); + let result = cfg::find_config_field(&input.ident, &input.data) + .map(|config_field| mpc::generate_context_impl(input, config_field)); + + match result { + Ok(expanded) => TokenStream::from(expanded), + Err(err) => TokenStream::from(err.to_compile_error()), + } +} diff --git a/macros/context-derive/src/mpc.rs b/macros/context-derive/src/mpc.rs new file mode 100644 index 00000000..f0636193 --- /dev/null +++ b/macros/context-derive/src/mpc.rs @@ -0,0 +1,158 @@ +use quote::quote; +use syn::DeriveInput; + +use crate::cfg::FieldInfo; + +/// Generate the `MPCContext` implementation for the given struct. +#[allow(clippy::too_many_lines)] +pub fn generate_context_impl( + DeriveInput { + ident: name, + generics, + .. + }: DeriveInput, + config_field: FieldInfo, +) -> proc_macro2::TokenStream { + let _field_access = match config_field { + FieldInfo::Named(ident) => quote! { self.#ident }, + FieldInfo::Unnamed(index) => quote! { self.#index }, + }; + + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + + quote! { + #[gadget_sdk::async_trait::async_trait] + impl #impl_generics gadget_sdk::contexts::MPCContext for #name #ty_generics #where_clause { + /// Returns a reference to the configuration + #[inline] + fn config(&self) -> &gadget_sdk::config::StdGadgetConfiguration { + &self.config + } + + /// Returns the network protocol identifier for this context + #[inline] + fn network_protocol(&self) -> String { + let name = stringify!(#name).to_string(); + format!("/{}/1.0.0", name.to_lowercase()) + } + + fn create_network_delivery_wrapper( + &self, + mux: std::sync::Arc, + party_index: gadget_sdk::round_based::PartyIndex, + task_hash: [u8; 32], + parties: std::collections::BTreeMap, + ) -> Result, gadget_sdk::Error> + where + M: Clone + Send + Unpin + 'static + gadget_sdk::serde::Serialize + gadget_sdk::serde::de::DeserializeOwned + gadget_sdk::round_based::ProtocolMessage, + { + Ok(gadget_sdk::network::round_based_compat::NetworkDeliveryWrapper::new(mux, party_index, task_hash, parties)) + } + + async fn get_party_index( + &self, + ) -> Result { + Ok(self.get_party_index_and_operators().await?.0 as _) + } + + async fn get_participants( + &self, + client: &gadget_sdk::ext::subxt::OnlineClient, + ) -> Result< + std::collections::BTreeMap, + gadget_sdk::Error, + > { + Ok(self.get_party_index_and_operators().await?.1.into_iter().enumerate().map(|(i, (id, _))| (i as _, id)).collect()) + } + + /// Retrieves the current blueprint ID from the configuration + /// + /// # Errors + /// Returns an error if the blueprint ID is not found in the configuration + fn blueprint_id(&self) -> gadget_sdk::color_eyre::Result { + self.config() + .protocol_specific + .tangle() + .map(|c| c.blueprint_id) + .map_err(|err| gadget_sdk::color_eyre::Report::msg("Blueprint ID not found in configuration: {err}")) + } + + /// Retrieves the current party index and operator mapping + /// + /// # Errors + /// Returns an error if: + /// - Failed to retrieve operator keys + /// - Current party is not found in the operator list + async fn get_party_index_and_operators( + &self, + ) -> gadget_sdk::color_eyre::Result<(usize, std::collections::BTreeMap)> { + let parties = self.current_service_operators_ecdsa_keys().await?; + let my_id = self.config.first_sr25519_signer()?.account_id(); + + gadget_sdk::trace!( + "Looking for {my_id:?} in parties: {:?}", + parties.keys().collect::>() + ); + + let index_of_my_id = parties + .iter() + .position(|(id, _)| id == &my_id) + .ok_or_else(|| gadget_sdk::color_eyre::Report::msg("Party not found in operator list"))?; + + Ok((index_of_my_id, parties)) + } + + /// Retrieves the ECDSA keys for all current service operators + /// + /// # Errors + /// Returns an error if: + /// - Failed to connect to the Tangle client + /// - Failed to retrieve operator information + /// - Missing ECDSA key for any operator + async fn current_service_operators_ecdsa_keys( + &self, + ) -> gadget_sdk::color_eyre::Result> { + let client = self.tangle_client().await?; + let current_blueprint = self.blueprint_id()?; + let current_service_op = self.current_service_operators(&client).await?; + let storage = client.storage().at_latest().await?; + + let mut map = std::collections::BTreeMap::new(); + for (operator, _) in current_service_op { + let addr = gadget_sdk::ext::tangle_subxt::tangle_testnet_runtime::api::storage() + .services() + .operators(current_blueprint, &operator); + + let maybe_pref = storage.fetch(&addr).await.map_err(|err| { + gadget_sdk::color_eyre::Report::msg("Failed to fetch operator storage for {operator}: {err}") + })?; + + if let Some(pref) = maybe_pref { + map.insert(operator, gadget_sdk::subxt_core::ext::sp_core::ecdsa::Public(pref.key)); + } else { + return Err(gadget_sdk::color_eyre::Report::msg("Missing ECDSA key for operator {operator}")); + } + } + + Ok(map) + } + + /// Retrieves the current call ID for this job + /// + /// # Errors + /// Returns an error if failed to retrieve the call ID from storage + async fn current_call_id(&self) -> gadget_sdk::color_eyre::Result { + let client = self.tangle_client().await?; + let addr = gadget_sdk::ext::tangle_subxt::tangle_testnet_runtime::api::storage().services().next_job_call_id(); + let storage = client.storage().at_latest().await?; + + let maybe_call_id = storage + .fetch_or_default(&addr) + .await + .map_err(|err| gadget_sdk::color_eyre::Report::msg("Failed to fetch current call ID: {err}"))?; + + Ok(maybe_call_id.saturating_sub(1)) + } + } + } +} diff --git a/macros/context-derive/src/services.rs b/macros/context-derive/src/services.rs index ce33b281..4b3e8ea0 100644 --- a/macros/context-derive/src/services.rs +++ b/macros/context-derive/src/services.rs @@ -21,7 +21,7 @@ pub fn generate_context_impl( let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); quote! { - impl #impl_generics gadget_sdk::ctx::ServicesContext for #name #ty_generics #where_clause { + impl #impl_generics gadget_sdk::contexts::ServicesContext for #name #ty_generics #where_clause { type Config = gadget_sdk::ext::subxt::PolkadotConfig; fn current_blueprint( &self, diff --git a/macros/context-derive/src/subxt.rs b/macros/context-derive/src/subxt.rs index 60052f1e..b8e7f299 100644 --- a/macros/context-derive/src/subxt.rs +++ b/macros/context-derive/src/subxt.rs @@ -20,7 +20,7 @@ pub fn generate_context_impl( let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); quote! { - impl #impl_generics gadget_sdk::ctx::TangleClientContext for #name #ty_generics #where_clause { + impl #impl_generics gadget_sdk::contexts::TangleClientContext for #name #ty_generics #where_clause { type Config = gadget_sdk::ext::subxt::PolkadotConfig; fn tangle_client(&self) -> impl core::future::Future, gadget_sdk::ext::subxt::Error>> { use gadget_sdk::ext::subxt; diff --git a/macros/context-derive/tests/tests.rs b/macros/context-derive/tests/tests.rs index 436f77a5..df21859f 100644 --- a/macros/context-derive/tests/tests.rs +++ b/macros/context-derive/tests/tests.rs @@ -1,13 +1,15 @@ +mod ui; + #[cfg(test)] mod tests { #[test] fn test_derive_context() { let t = trybuild::TestCases::new(); - t.pass("tests/ui/01_basic.rs"); - t.pass("tests/ui/02_unnamed_fields.rs"); - t.pass("tests/ui/03_generic_struct.rs"); - t.compile_fail("tests/ui/04_missing_config_attr.rs"); - t.compile_fail("tests/ui/05_not_a_struct.rs"); - t.compile_fail("tests/ui/06_unit_struct.rs"); + t.pass("tests/ui/basic.rs"); + t.pass("tests/ui/unnamed_fields.rs"); + t.pass("tests/ui/generic_struct.rs"); + t.compile_fail("tests/ui/missing_config_attr.rs"); + t.compile_fail("tests/ui/not_a_struct.rs"); + t.compile_fail("tests/ui/unit_struct.rs"); } } diff --git a/macros/context-derive/tests/ui/01_basic.rs b/macros/context-derive/tests/ui/01_basic.rs deleted file mode 100644 index d73d7a0d..00000000 --- a/macros/context-derive/tests/ui/01_basic.rs +++ /dev/null @@ -1,24 +0,0 @@ -use gadget_sdk::config::StdGadgetConfiguration; -use gadget_sdk::ctx::{EVMProviderContext, KeystoreContext, ServicesContext, TangleClientContext}; - -#[derive(KeystoreContext, EVMProviderContext, TangleClientContext, ServicesContext)] -struct MyContext { - foo: String, - #[config] - sdk_config: StdGadgetConfiguration, -} - -fn main() { - let body = async { - let ctx = MyContext { - foo: "bar".to_string(), - sdk_config: Default::default(), - }; - let _keystore = ctx.keystore(); - let _evm_provider = ctx.evm_provider().await; - let tangle_client = ctx.tangle_client().await.unwrap(); - let _services = ctx.current_service_operators(&tangle_client).await.unwrap(); - }; - - let _ = body; -} diff --git a/macros/context-derive/tests/ui/basic.rs b/macros/context-derive/tests/ui/basic.rs new file mode 100644 index 00000000..a07eeb6b --- /dev/null +++ b/macros/context-derive/tests/ui/basic.rs @@ -0,0 +1,98 @@ +use gadget_sdk::async_trait::async_trait; +use gadget_sdk::config::{GadgetConfiguration, StdGadgetConfiguration}; +use gadget_sdk::contexts::{ + EVMProviderContext, KeystoreContext, MPCContext, ServicesContext, TangleClientContext, +}; +use gadget_sdk::network::{Network, NetworkMultiplexer, ProtocolMessage}; +use gadget_sdk::store::LocalDatabase; +use gadget_sdk::subxt_core::ext::sp_core::ecdsa::Public; +use gadget_sdk::subxt_core::tx::signer::Signer; +use gadget_sdk::Error; +use round_based::ProtocolMessage as RoundBasedProtocolMessage; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; +use std::sync::Arc; + +#[derive(KeystoreContext, EVMProviderContext, TangleClientContext, ServicesContext, MPCContext)] +#[allow(dead_code)] +struct MyContext { + foo: String, + #[config] + config: StdGadgetConfiguration, + store: Arc>, +} + +#[allow(dead_code)] +fn main() { + let body = async { + let ctx = MyContext { + foo: "bar".to_string(), + config: GadgetConfiguration::default(), + store: Arc::new(LocalDatabase::open("test.json")), + }; + + // Test existing context functions + let _keystore = ctx.keystore(); + let _evm_provider = ctx.evm_provider().await; + let tangle_client = ctx.tangle_client().await.unwrap(); + let _services = ctx.current_service_operators(&tangle_client).await.unwrap(); + + // Test MPC context utility functions + let _config = ctx.config(); + let _protocol = ctx.network_protocol(); + + // Test MPC context functions + + let mux = Arc::new(NetworkMultiplexer::new(StubNetwork)); + let party_index = 0; + let task_hash = [0u8; 32]; + let mut parties = BTreeMap::::new(); + parties.insert(0, Public([0u8; 33])); + + // Test network delivery wrapper creation + let _network_wrapper = ctx.create_network_delivery_wrapper::( + mux.clone(), + party_index, + task_hash, + parties.clone(), + ); + + // Test party index retrieval + let _party_idx = ctx.get_party_index().await; + + // Test participants retrieval + let _participants = ctx.get_participants(&tangle_client).await; + + // Test blueprint ID retrieval + let _blueprint_id = ctx.blueprint_id(); + + // Test party index and operators retrieval + let _party_idx_ops = ctx.get_party_index_and_operators().await; + + // Test service operators ECDSA keys retrieval + let _operator_keys = ctx.current_service_operators_ecdsa_keys().await; + + // Test current call ID retrieval + let _call_id = ctx.current_call_id().await; + }; + + drop(body); +} + +#[derive(RoundBasedProtocolMessage, Clone, Serialize, Deserialize)] +enum StubMessage {} + +#[allow(dead_code)] +struct StubNetwork; + +#[async_trait] +impl Network for StubNetwork { + async fn next_message(&self) -> Option { + None + } + + async fn send_message(&self, message: ProtocolMessage) -> Result<(), Error> { + drop(message); + Ok(()) + } +} diff --git a/macros/context-derive/tests/ui/03_generic_struct.rs b/macros/context-derive/tests/ui/generic_struct.rs similarity index 66% rename from macros/context-derive/tests/ui/03_generic_struct.rs rename to macros/context-derive/tests/ui/generic_struct.rs index e7c8c176..c0eecfd4 100644 --- a/macros/context-derive/tests/ui/03_generic_struct.rs +++ b/macros/context-derive/tests/ui/generic_struct.rs @@ -1,7 +1,10 @@ -use gadget_sdk::config::StdGadgetConfiguration; -use gadget_sdk::ctx::{EVMProviderContext, KeystoreContext, ServicesContext, TangleClientContext}; +use gadget_sdk::config::{GadgetConfiguration, StdGadgetConfiguration}; +use gadget_sdk::contexts::{ + EVMProviderContext, KeystoreContext, ServicesContext, TangleClientContext, +}; #[derive(KeystoreContext, EVMProviderContext, TangleClientContext, ServicesContext)] +#[allow(dead_code)] struct MyContext { foo: T, bar: U, @@ -9,12 +12,13 @@ struct MyContext { sdk_config: StdGadgetConfiguration, } +#[allow(dead_code)] fn main() { let body = async { let ctx = MyContext { foo: "bar".to_string(), bar: 42, - sdk_config: Default::default(), + sdk_config: GadgetConfiguration::default(), }; let _keystore = ctx.keystore(); let _evm_provider = ctx.evm_provider().await.unwrap(); @@ -22,5 +26,5 @@ fn main() { let _services = ctx.current_service_operators(&tangle_client).await.unwrap(); }; - let _ = body; + drop(body); } diff --git a/macros/context-derive/tests/ui/04_missing_config_attr.rs b/macros/context-derive/tests/ui/missing_config_attr.rs similarity index 79% rename from macros/context-derive/tests/ui/04_missing_config_attr.rs rename to macros/context-derive/tests/ui/missing_config_attr.rs index 4910f45b..bf04065e 100644 --- a/macros/context-derive/tests/ui/04_missing_config_attr.rs +++ b/macros/context-derive/tests/ui/missing_config_attr.rs @@ -1,5 +1,5 @@ use gadget_sdk::config::StdGadgetConfiguration; -use gadget_sdk::ctx::KeystoreContext; +use gadget_sdk::contexts::KeystoreContext; #[derive(KeystoreContext)] struct MyContext { diff --git a/macros/context-derive/tests/ui/04_missing_config_attr.stderr b/macros/context-derive/tests/ui/missing_config_attr.stderr similarity index 80% rename from macros/context-derive/tests/ui/04_missing_config_attr.stderr rename to macros/context-derive/tests/ui/missing_config_attr.stderr index 52300b24..b7d5f451 100644 --- a/macros/context-derive/tests/ui/04_missing_config_attr.stderr +++ b/macros/context-derive/tests/ui/missing_config_attr.stderr @@ -1,5 +1,5 @@ error: No field with #[config] attribute found, please add #[config] to the field that holds the `gadget_sdk::config::GadgetConfiguration` - --> tests/ui/04_missing_config_attr.rs:5:8 + --> tests/ui/missing_config_attr.rs:5:8 | 5 | struct MyContext { | ^^^^^^^^^ diff --git a/macros/context-derive/tests/ui/mod.rs b/macros/context-derive/tests/ui/mod.rs new file mode 100644 index 00000000..c324553c --- /dev/null +++ b/macros/context-derive/tests/ui/mod.rs @@ -0,0 +1,3 @@ +mod basic; +mod generic_struct; +mod unnamed_fields; diff --git a/macros/context-derive/tests/ui/05_not_a_struct.rs b/macros/context-derive/tests/ui/not_a_struct.rs similarity index 67% rename from macros/context-derive/tests/ui/05_not_a_struct.rs rename to macros/context-derive/tests/ui/not_a_struct.rs index fadc6ff2..b765a40b 100644 --- a/macros/context-derive/tests/ui/05_not_a_struct.rs +++ b/macros/context-derive/tests/ui/not_a_struct.rs @@ -1,4 +1,4 @@ -use gadget_sdk::ctx::KeystoreContext; +use gadget_sdk::contexts::KeystoreContext; #[derive(KeystoreContext)] enum MyContext { diff --git a/macros/context-derive/tests/ui/05_not_a_struct.stderr b/macros/context-derive/tests/ui/not_a_struct.stderr similarity index 74% rename from macros/context-derive/tests/ui/05_not_a_struct.stderr rename to macros/context-derive/tests/ui/not_a_struct.stderr index ae741816..b3bbcd81 100644 --- a/macros/context-derive/tests/ui/05_not_a_struct.stderr +++ b/macros/context-derive/tests/ui/not_a_struct.stderr @@ -1,5 +1,5 @@ error: Context Extensions traits can only be derived for structs - --> tests/ui/05_not_a_struct.rs:4:6 + --> tests/ui/not_a_struct.rs:4:6 | 4 | enum MyContext { | ^^^^^^^^^ diff --git a/macros/context-derive/tests/ui/06_unit_struct.rs b/macros/context-derive/tests/ui/unit_struct.rs similarity index 58% rename from macros/context-derive/tests/ui/06_unit_struct.rs rename to macros/context-derive/tests/ui/unit_struct.rs index 49720770..87702c27 100644 --- a/macros/context-derive/tests/ui/06_unit_struct.rs +++ b/macros/context-derive/tests/ui/unit_struct.rs @@ -1,4 +1,4 @@ -use gadget_sdk::ctx::KeystoreContext; +use gadget_sdk::contexts::KeystoreContext; #[derive(KeystoreContext)] struct MyContext; diff --git a/macros/context-derive/tests/ui/06_unit_struct.stderr b/macros/context-derive/tests/ui/unit_struct.stderr similarity index 76% rename from macros/context-derive/tests/ui/06_unit_struct.stderr rename to macros/context-derive/tests/ui/unit_struct.stderr index 69ff6311..a0715d79 100644 --- a/macros/context-derive/tests/ui/06_unit_struct.stderr +++ b/macros/context-derive/tests/ui/unit_struct.stderr @@ -1,5 +1,5 @@ error: Context Extensions traits cannot be derived for unit structs - --> tests/ui/06_unit_struct.rs:4:8 + --> tests/ui/unit_struct.rs:4:8 | 4 | struct MyContext; | ^^^^^^^^^ diff --git a/macros/context-derive/tests/ui/02_unnamed_fields.rs b/macros/context-derive/tests/ui/unnamed_fields.rs similarity index 57% rename from macros/context-derive/tests/ui/02_unnamed_fields.rs rename to macros/context-derive/tests/ui/unnamed_fields.rs index 14c75cb3..fb63f8ae 100644 --- a/macros/context-derive/tests/ui/02_unnamed_fields.rs +++ b/macros/context-derive/tests/ui/unnamed_fields.rs @@ -1,16 +1,20 @@ -use gadget_sdk::config::StdGadgetConfiguration; -use gadget_sdk::ctx::{EVMProviderContext, KeystoreContext, ServicesContext, TangleClientContext}; +use gadget_sdk::config::{GadgetConfiguration, StdGadgetConfiguration}; +use gadget_sdk::contexts::{ + EVMProviderContext, KeystoreContext, ServicesContext, TangleClientContext, +}; #[derive(KeystoreContext, EVMProviderContext, TangleClientContext, ServicesContext)] +#[allow(dead_code)] struct MyContext(String, #[config] StdGadgetConfiguration); +#[allow(dead_code)] fn main() { let body = async { - let ctx = MyContext("bar".to_string(), Default::default()); + let ctx = MyContext("bar".to_string(), GadgetConfiguration::default()); let _keystore = ctx.keystore(); let _evm_provider = ctx.evm_provider().await; let tangle_client = ctx.tangle_client().await.unwrap(); let _services = ctx.current_service_operators(&tangle_client).await; }; - let _ = body; + drop(body); } diff --git a/sdk/src/contexts/eigenlayer.rs b/sdk/src/contexts/eigenlayer.rs new file mode 100644 index 00000000..17f034f3 --- /dev/null +++ b/sdk/src/contexts/eigenlayer.rs @@ -0,0 +1,132 @@ +use alloy_primitives::{Address, Bytes, FixedBytes, U256}; +use eigensdk::client_avsregistry::reader::AvsRegistryChainReader; +use eigensdk::client_avsregistry::writer::AvsRegistryChainWriter; +use eigensdk::client_elcontracts::writer::Operator; +use eigensdk::services_avsregistry::chaincaller::AvsRegistryServiceChainCaller; +use eigensdk::services_blsaggregation::bls_agg::BlsAggregatorService; +use eigensdk::services_operatorsinfo::operatorsinfo_inmemory::OperatorInfoServiceInMemory; +use eigensdk::types::operator::OperatorPubKeys; +use eigensdk::utils::binding::OperatorStateRetriever; +use eigensdk::utils::binding::StakeRegistry::StakeUpdate; +use num_bigint::BigInt; +use std::collections::HashMap; + +/// `EigenlayerContext` trait provides access to Eigenlayer utilities +#[async_trait::async_trait] +pub trait EigenlayerContext { + /// Provides a reader for the AVS registry. + async fn avs_registry_reader( + &self, + ) -> color_eyre::Result; + + /// Provides a writer for the AVS registry. + async fn avs_registry_writer( + &self, + private_key: String, + ) -> color_eyre::Result; + + /// Provides an operator info service. + async fn operator_info_service_in_memory( + &self, + ) -> color_eyre::Result; + + /// Provides an AVS registry service chain caller. + async fn avs_registry_service_chain_caller_in_memory( + &self, + ) -> color_eyre::Result< + AvsRegistryServiceChainCaller, + std::io::Error, + >; + + /// Provides a BLS aggregation service. + async fn bls_aggregation_service_in_memory( + &self, + ) -> color_eyre::Result< + BlsAggregatorService< + AvsRegistryServiceChainCaller, + >, + std::io::Error, + >; + + /// Get Operator stake in Quorums at a given block. + async fn get_operator_stake_in_quorums_at_block( + &self, + block_number: u32, + quorum_numbers: Bytes, + ) -> color_eyre::Result>, std::io::Error>; + + /// Get an Operator's stake in Quorums at current block. + async fn get_operator_stake_in_quorums_at_current_block( + &self, + operator_id: FixedBytes<32>, + ) -> color_eyre::Result, std::io::Error>; + + /// Get an Operator by ID. + async fn get_operator_by_id( + &self, + operator_id: [u8; 32], + ) -> color_eyre::Result; + + /// Get an Operator stake history. + async fn get_operator_stake_history( + &self, + operator_id: FixedBytes<32>, + quorum_number: u8, + ) -> color_eyre::Result, std::io::Error>; + + /// Get an Operator stake update at a given index. + async fn get_operator_stake_update_at_index( + &self, + quorum_number: u8, + operator_id: FixedBytes<32>, + index: U256, + ) -> color_eyre::Result; + + /// Get an Operator's stake at a given block number. + async fn get_operator_stake_at_block_number( + &self, + operator_id: FixedBytes<32>, + quorum_number: u8, + block_number: u32, + ) -> color_eyre::Result; + + /// Get an Operator's [`details`](OperatorDetails). + async fn get_operator_details( + &self, + operator_addr: Address, + ) -> color_eyre::Result; + + /// Get an Operator's latest stake update. + async fn get_latest_stake_update( + &self, + operator_id: FixedBytes<32>, + quorum_number: u8, + ) -> color_eyre::Result; + + /// Get an Operator's ID as [`FixedBytes`] from its [`Address`]. + async fn get_operator_id( + &self, + operator_addr: Address, + ) -> color_eyre::Result, std::io::Error>; + + /// Get the total stake at a given block number from a given index. + async fn get_total_stake_at_block_number_from_index( + &self, + quorum_number: u8, + block_number: u32, + index: U256, + ) -> color_eyre::Result; + + /// Get the total stake history length of a given quorum. + async fn get_total_stake_history_length( + &self, + quorum_number: u8, + ) -> color_eyre::Result; + + /// Provides the public keys of existing registered operators within the provided block range. + async fn query_existing_registered_operator_pub_keys( + &self, + start_block: u64, + to_block: u64, + ) -> color_eyre::Result<(Vec
, Vec), std::io::Error>; +} diff --git a/sdk/src/contexts/evm_provider.rs b/sdk/src/contexts/evm_provider.rs new file mode 100644 index 00000000..757a00ad --- /dev/null +++ b/sdk/src/contexts/evm_provider.rs @@ -0,0 +1,12 @@ +use std::future::Future; + +/// `EVMProviderContext` trait provides access to the EVM provider from the context. +pub trait EVMProviderContext { + type Network: alloy_network::Network; + type Transport: alloy_transport::Transport + Clone; + type Provider: alloy_provider::Provider; + /// Get the EVM provider from the context. + fn evm_provider( + &self, + ) -> impl Future>; +} diff --git a/sdk/src/contexts/gossip_network.rs b/sdk/src/contexts/gossip_network.rs new file mode 100644 index 00000000..e048a3a3 --- /dev/null +++ b/sdk/src/contexts/gossip_network.rs @@ -0,0 +1,5 @@ +/// `GossipNetworkContext` trait provides access to the network client from the context. +pub trait GossipNetworkContext { + /// Get the Goossip client from the context. + fn gossip_network(&self) -> &crate::network::gossip::GossipHandle; +} diff --git a/sdk/src/contexts/keystore.rs b/sdk/src/contexts/keystore.rs new file mode 100644 index 00000000..98c1c67b --- /dev/null +++ b/sdk/src/contexts/keystore.rs @@ -0,0 +1,7 @@ +use crate::keystore::backend::GenericKeyStore; + +/// `KeystoreContext` trait provides access to the generic keystore from the context. +pub trait KeystoreContext { + /// Get the keystore client from the context. + fn keystore(&self) -> color_eyre::Result, crate::config::Error>; +} diff --git a/sdk/src/contexts/mod.rs b/sdk/src/contexts/mod.rs new file mode 100644 index 00000000..8e4a801f --- /dev/null +++ b/sdk/src/contexts/mod.rs @@ -0,0 +1,68 @@ +//! A set of traits and utilities that provide a common interface for interacting with the Gadget SDK. +//! +//! Usually, when you need access to the SDK, you will need to pass the Context to your jobs/functions. In your code, you will create a struct that encapsulates all the things that you would need from outside world from your job. +//! for example, if you need to interact with the network, you will need to have a network client in your struct. If you need to interact with the database storage, you will need to have a db client in your struct. And so on. +//! +//! This module provides a set of traits that you can implement for your struct to make it a context-aware struct by adding new functionalities to it. +//! +//! # Example +//! +//! ```rust,no_run +//! use gadget_sdk::config::StdGadgetConfiguration; +//! use gadget_sdk::contexts::KeystoreContext; +//! use gadget_sdk::event_listener::tangle::jobs::{ +//! services_post_processor, services_pre_processor, +//! }; +//! use gadget_sdk::event_listener::tangle::TangleEventListener; +//! use gadget_sdk::job; +//! use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::services::events::JobCalled; +//! +//! // This your struct that encapsulates all the things you need from outside world. +//! // By deriving KeystoreContext, you can now access the keystore client from your struct. +//! #[derive(Clone, Debug, KeystoreContext)] +//! struct MyContext { +//! foo: String, +//! bar: u64, +//! #[config] +//! sdk_config: StdGadgetConfiguration, +//! } +//! +//! #[job( +//! id = 0, +//! params(who), +//! result(_), +//! event_listener( +//! listener = TangleEventListener, +//! pre_processor = services_pre_processor, +//! post_processor = services_post_processor, +//! ) +//! )] +//! async fn my_job(who: String, ctx: MyContext) -> Result { +//! // Access the keystore client from the context. +//! let keystore = ctx.keystore(); +//! // Do something with the keystore client. +//! // ... +//! Ok(format!("Hello, {}!", who)) +//! } +//! ``` + +// derives +pub use gadget_context_derive::*; +pub use num_bigint::BigInt; + +// A macro that takes an arbitrary mod, like "eigenlayer", then writes that as well as a line saying "pub use eigenlayer::*;" to the file. +// This way, we can easily add new modules to the SDK without having to manually import each time +macro_rules! create_module_derive { + ($module:ident) => { + mod $module; + pub use $module::*; + }; +} + +create_module_derive!(eigenlayer); +create_module_derive!(evm_provider); +create_module_derive!(gossip_network); +create_module_derive!(keystore); +create_module_derive!(mpc); +create_module_derive!(services); +create_module_derive!(tangle_client); diff --git a/sdk/src/contexts/mpc.rs b/sdk/src/contexts/mpc.rs new file mode 100644 index 00000000..73eb0dd2 --- /dev/null +++ b/sdk/src/contexts/mpc.rs @@ -0,0 +1,63 @@ +use crate::network::NetworkMultiplexer; +use round_based::PartyIndex; +use std::collections::BTreeMap; +use std::sync::Arc; +use subxt_core::utils::AccountId32; + +/// `MPCContext` trait provides access to MPC (Multi-Party Computation) functionality from the context. +#[async_trait::async_trait] +pub trait MPCContext { + /// Returns a reference to the configuration + fn config(&self) -> &crate::config::StdGadgetConfiguration; + + /// Returns the network protocol identifier + fn network_protocol(&self) -> String; + + /// Creates a network delivery wrapper for MPC communication + fn create_network_delivery_wrapper( + &self, + mux: Arc, + party_index: PartyIndex, + task_hash: [u8; 32], + parties: BTreeMap, + ) -> color_eyre::Result< + crate::network::round_based_compat::NetworkDeliveryWrapper, + crate::Error, + > + where + M: Clone + + Send + + Unpin + + 'static + + serde::Serialize + + serde::de::DeserializeOwned + + round_based::ProtocolMessage; + + /// Gets the party index from the participants map + async fn get_party_index(&self) -> color_eyre::Result; + + /// Gets the participants in the MPC protocol + async fn get_participants( + &self, + client: &subxt::OnlineClient, + ) -> color_eyre::Result, crate::Error>; + + /// Gets the current blueprint ID + fn blueprint_id(&self) -> color_eyre::Result; + + /// Gets the party index and operator mapping + async fn get_party_index_and_operators( + &self, + ) -> color_eyre::Result<( + usize, + BTreeMap, + )>; + + /// Gets the ECDSA keys for all current service operators + async fn current_service_operators_ecdsa_keys( + &self, + ) -> color_eyre::Result>; + + /// Gets the current call ID for this job + async fn current_call_id(&self) -> color_eyre::Result; +} diff --git a/sdk/src/contexts/services.rs b/sdk/src/contexts/services.rs new file mode 100644 index 00000000..05d0329d --- /dev/null +++ b/sdk/src/contexts/services.rs @@ -0,0 +1,100 @@ +use std::future::Future; +use tangle_subxt::tangle_testnet_runtime::api::runtime_types::tangle_primitives::services::{Service, ServiceBlueprint}; +use tangle_subxt::tangle_testnet_runtime::api::runtime_types::sp_arithmetic::per_things::Percent; +use tangle_subxt::tangle_testnet_runtime::api::runtime_types::pallet_multi_asset_delegation::types::operator::OperatorMetadata; +use tangle_subxt::tangle_testnet_runtime::api::assets::events::burned::Balance; +use tangle_subxt::tangle_testnet_runtime::api::assets::events::accounts_destroyed::AssetId; +use tangle_subxt::tangle_testnet_runtime::api::system::storage::types::number::Number; +use tangle_subxt::tangle_testnet_runtime::api::runtime_types::pallet_multi_asset_delegation::types::delegator::DelegatorMetadata; + +/// `ServicesContext` trait provides access to the current service and current blueprint from the context. +pub trait ServicesContext { + type Config: subxt::Config; + /// Get the current blueprint information from the context. + fn current_blueprint( + &self, + client: &subxt::OnlineClient, + ) -> impl Future>; + + /// Query the current blueprint owner from the context. + fn current_blueprint_owner( + &self, + client: &subxt::OnlineClient, + ) -> impl Future>; + + /// Get the current service operators with their restake exposure from the context. + /// This function will return a list of service operators that are selected to run this service + /// instance. + fn current_service_operators( + &self, + client: &subxt::OnlineClient, + ) -> impl Future, subxt::Error>>; + + #[allow(clippy::type_complexity)] + /// Get metadata for a list of operators from the context. + fn operators_metadata( + &self, + client: &subxt::OnlineClient, + operators: Vec, + ) -> impl Future< + Output = color_eyre::Result< + Vec<( + subxt::utils::AccountId32, + OperatorMetadata, + )>, + subxt::Error, + >, + >; + + /// Get metadata for a single operator from the context. + /// This function will return the metadata for a single operator. + fn operator_metadata( + &self, + client: &subxt::OnlineClient, + operator: subxt::utils::AccountId32, + ) -> impl Future< + Output = color_eyre::Result< + Option>, + subxt::Error, + >, + >; + + /// Get the current service instance from the context. + fn service_instance( + &self, + client: &subxt::OnlineClient, + ) -> impl Future< + Output = color_eyre::Result< + Service, + subxt::Error, + >, + >; + + #[allow(clippy::type_complexity)] + /// Get delegations for a list of operators from the context. + fn operator_delegations( + &self, + client: &subxt::OnlineClient, + operators: Vec, + ) -> impl Future< + Output = color_eyre::Result< + Vec<( + subxt::utils::AccountId32, // operator + Option>, + )>, + subxt::Error, + >, + >; + + /// Get delegations for a single operator from the context. + fn operator_delegation( + &self, + client: &subxt::OnlineClient, + operator: subxt::utils::AccountId32, + ) -> impl Future< + Output = color_eyre::Result< + Option>, + subxt::Error, + >, + >; +} diff --git a/sdk/src/contexts/tangle_client.rs b/sdk/src/contexts/tangle_client.rs new file mode 100644 index 00000000..031dbb5c --- /dev/null +++ b/sdk/src/contexts/tangle_client.rs @@ -0,0 +1,10 @@ +use std::future::Future; + +/// `TangleClientContext` trait provides access to the Tangle client from the context. +pub trait TangleClientContext { + type Config: subxt::Config; + /// Get the Tangle client from the context. + fn tangle_client( + &self, + ) -> impl Future, subxt::Error>>; +} diff --git a/sdk/src/ctx.rs b/sdk/src/ctx.rs deleted file mode 100644 index 358f6827..00000000 --- a/sdk/src/ctx.rs +++ /dev/null @@ -1,307 +0,0 @@ -//! A set of traits and utilities that provide a common interface for interacting with the Gadget SDK. -//! -//! Usually, when you need access to the SDK, you will need to pass the Context to your jobs/functions. In your code, you will create a struct that encapsulates all the things that you would need from outside world from your job. -//! for example, if you need to interact with the network, you will need to have a network client in your struct. If you need to interact with the database storage, you will need to have a db client in your struct. And so on. -//! -//! This module provides a set of traits that you can implement for your struct to make it a context-aware struct by adding new functionalities to it. -//! -//! # Example -//! -//! ```rust,no_run -//! use gadget_sdk::config::StdGadgetConfiguration; -//! use gadget_sdk::ctx::KeystoreContext; -//! use gadget_sdk::event_listener::tangle::jobs::{ -//! services_post_processor, services_pre_processor, -//! }; -//! use gadget_sdk::event_listener::tangle::TangleEventListener; -//! use gadget_sdk::job; -//! use gadget_sdk::tangle_subxt::tangle_testnet_runtime::api::services::events::JobCalled; -//! -//! // This your struct that encapsulates all the things you need from outside world. -//! // By deriving KeystoreContext, you can now access the keystore client from your struct. -//! #[derive(Clone, Debug, KeystoreContext)] -//! struct MyContext { -//! foo: String, -//! bar: u64, -//! #[config] -//! sdk_config: StdGadgetConfiguration, -//! } -//! -//! #[job( -//! id = 0, -//! params(who), -//! result(_), -//! event_listener( -//! listener = TangleEventListener, -//! pre_processor = services_pre_processor, -//! post_processor = services_post_processor, -//! ) -//! )] -//! async fn my_job(who: String, ctx: MyContext) -> Result { -//! // Access the keystore client from the context. -//! let keystore = ctx.keystore(); -//! // Do something with the keystore client. -//! // ... -//! Ok(format!("Hello, {}!", who)) -//! } -//! ``` - -use crate::keystore::backend::GenericKeyStore; -use alloy_primitives::{Address, Bytes, FixedBytes, U256}; -use tangle_subxt::tangle_testnet_runtime::api::assets::events::accounts_destroyed::AssetId; -use tangle_subxt::tangle_testnet_runtime::api::assets::events::burned::Balance; -use tangle_subxt::tangle_testnet_runtime::api::runtime_types::pallet_multi_asset_delegation::types::delegator::DelegatorMetadata; -use tangle_subxt::tangle_testnet_runtime::api::runtime_types::pallet_multi_asset_delegation::types::operator::OperatorMetadata; -use tangle_subxt::tangle_testnet_runtime::api::runtime_types::tangle_primitives::services::Service; -use tangle_subxt::tangle_testnet_runtime::api::system::storage::types::number::Number; -use core::future::Future; -use eigensdk::types::operator::{Operator, OperatorPubKeys}; -use eigensdk::utils::binding::OperatorStateRetriever; -use eigensdk::utils::binding::StakeRegistry::StakeUpdate; -use eigensdk::{ - client_avsregistry::{reader::AvsRegistryChainReader, writer::AvsRegistryChainWriter}, - services_avsregistry::chaincaller::AvsRegistryServiceChainCaller, - services_blsaggregation::bls_agg::BlsAggregatorService, - services_operatorsinfo::operatorsinfo_inmemory::OperatorInfoServiceInMemory, -}; -pub use num_bigint::BigInt; -use std::collections::HashMap; -// derives -pub use gadget_context_derive::*; -use tangle_subxt::tangle_testnet_runtime::api::runtime_types::{ - sp_arithmetic::per_things::Percent, tangle_primitives::services::ServiceBlueprint, -}; - -/// `KeystoreContext` trait provides access to the generic keystore from the context. -pub trait KeystoreContext { - /// Get the keystore client from the context. - fn keystore(&self) -> Result, crate::config::Error>; -} - -/// `GossipNetworkContext` trait provides access to the network client from the context. -pub trait GossipNetworkContext { - /// Get the Goossip client from the context. - fn gossip_network(&self) -> &crate::network::gossip::GossipHandle; -} - -/// `EVMProviderContext` trait provides access to the EVM provider from the context. -pub trait EVMProviderContext { - type Network: alloy_network::Network; - type Transport: alloy_transport::Transport + Clone; - type Provider: alloy_provider::Provider; - /// Get the EVM provider from the context. - fn evm_provider( - &self, - ) -> impl Future>; -} - -/// `TangleClientContext` trait provides access to the Tangle client from the context. -pub trait TangleClientContext { - type Config: subxt::Config; - /// Get the Tangle client from the context. - fn tangle_client( - &self, - ) -> impl Future, subxt::Error>>; -} - -/// `ServicesContext` trait provides access to the current service and current blueprint from the context. -pub trait ServicesContext { - type Config: subxt::Config; - /// Get the current blueprint information from the context. - fn current_blueprint( - &self, - client: &subxt::OnlineClient, - ) -> impl Future>; - - /// Query the current blueprint owner from the context. - fn current_blueprint_owner( - &self, - client: &subxt::OnlineClient, - ) -> impl Future>; - - /// Get the current service operators with their restake exposure from the context. - /// This function will return a list of service operators that are selected to run this service - /// instance. - fn current_service_operators( - &self, - client: &subxt::OnlineClient, - ) -> impl Future, subxt::Error>>; - - #[allow(clippy::type_complexity)] - /// Get metadata for a list of operators from the context. - fn operators_metadata( - &self, - client: &subxt::OnlineClient, - operators: Vec, - ) -> impl Future< - Output = Result< - Vec<( - subxt::utils::AccountId32, - OperatorMetadata, - )>, - subxt::Error, - >, - >; - - /// Get metadata for a single operator from the context. - /// This function will return the metadata for a single operator. - fn operator_metadata( - &self, - client: &subxt::OnlineClient, - operator: subxt::utils::AccountId32, - ) -> impl Future< - Output = Result< - Option>, - subxt::Error, - >, - >; - - /// Get the current service instance from the context. - fn service_instance( - &self, - client: &subxt::OnlineClient, - ) -> impl Future, subxt::Error>>; - - #[allow(clippy::type_complexity)] - /// Get delegations for a list of operators from the context. - fn operator_delegations( - &self, - client: &subxt::OnlineClient, - operators: Vec, - ) -> impl Future< - Output = Result< - Vec<( - subxt::utils::AccountId32, // operator - Option>, - )>, - subxt::Error, - >, - >; - - /// Get delegations for a single operator from the context. - fn operator_delegation( - &self, - client: &subxt::OnlineClient, - operator: subxt::utils::AccountId32, - ) -> impl Future< - Output = Result< - Option>, - subxt::Error, - >, - >; -} - -/// `EigenlayerContext` trait provides access to Eigenlayer utilities -#[async_trait::async_trait] -pub trait EigenlayerContext { - /// Provides a reader for the AVS registry. - async fn avs_registry_reader(&self) -> Result; - - /// Provides a writer for the AVS registry. - async fn avs_registry_writer( - &self, - private_key: String, - ) -> Result; - - /// Provides an operator info service. - async fn operator_info_service_in_memory( - &self, - ) -> Result; - - /// Provides an AVS registry service chain caller. - async fn avs_registry_service_chain_caller_in_memory( - &self, - ) -> Result< - AvsRegistryServiceChainCaller, - std::io::Error, - >; - - /// Provides a BLS aggregation service. - async fn bls_aggregation_service_in_memory( - &self, - ) -> Result< - BlsAggregatorService< - AvsRegistryServiceChainCaller, - >, - std::io::Error, - >; - - /// Get Operator stake in Quorums at a given block. - async fn get_operator_stake_in_quorums_at_block( - &self, - block_number: u32, - quorum_numbers: Bytes, - ) -> Result>, std::io::Error>; - - /// Get an Operator's stake in Quorums at current block. - async fn get_operator_stake_in_quorums_at_current_block( - &self, - operator_id: FixedBytes<32>, - ) -> Result, std::io::Error>; - - /// Get an Operator by ID. - async fn get_operator_by_id(&self, operator_id: [u8; 32]) -> Result; - - /// Get an Operator stake history. - async fn get_operator_stake_history( - &self, - operator_id: FixedBytes<32>, - quorum_number: u8, - ) -> Result, std::io::Error>; - - /// Get an Operator stake update at a given index. - async fn get_operator_stake_update_at_index( - &self, - quorum_number: u8, - operator_id: FixedBytes<32>, - index: U256, - ) -> Result; - - /// Get an Operator's stake at a given block number. - async fn get_operator_stake_at_block_number( - &self, - operator_id: FixedBytes<32>, - quorum_number: u8, - block_number: u32, - ) -> Result; - - /// Get an Operator's [`details`](OperatorDetails). - async fn get_operator_details( - &self, - operator_addr: Address, - ) -> Result; - - /// Get an Operator's latest stake update. - async fn get_latest_stake_update( - &self, - operator_id: FixedBytes<32>, - quorum_number: u8, - ) -> Result; - - /// Get an Operator's ID as [`FixedBytes`] from its [`Address`]. - async fn get_operator_id( - &self, - operator_addr: Address, - ) -> Result, std::io::Error>; - - /// Get the total stake at a given block number from a given index. - async fn get_total_stake_at_block_number_from_index( - &self, - quorum_number: u8, - block_number: u32, - index: U256, - ) -> Result; - - /// Get the total stake history length of a given quorum. - async fn get_total_stake_history_length( - &self, - quorum_number: u8, - ) -> Result; - - /// Provides the public keys of existing registered operators within the provided block range. - async fn query_existing_registered_operator_pub_keys( - &self, - start_block: u64, - to_block: u64, - ) -> Result<(Vec
, Vec), std::io::Error>; -} diff --git a/sdk/src/lib.rs b/sdk/src/lib.rs index e406de43..7d687d50 100644 --- a/sdk/src/lib.rs +++ b/sdk/src/lib.rs @@ -63,8 +63,7 @@ pub mod tracer; pub mod tx; /// Gadget Context and context extensions -pub mod ctx; - +pub mod contexts; pub mod docker; pub mod utils; @@ -73,11 +72,15 @@ pub use alloy_rpc_types; pub use async_trait; pub use blueprint_serde::ByteBuf; pub use clap; +pub use color_eyre; pub use error::Error; pub use futures; pub use gadget_blueprint_proc_macro::*; pub use libp2p; pub use parking_lot; +pub use round_based; +pub use serde; +pub use subxt; pub use subxt_core; pub use tangle_subxt; pub use tokio; From f35d0c118753adff16c21c809e81f44760ed5d9d Mon Sep 17 00:00:00 2001 From: drewstone Date: Tue, 26 Nov 2024 15:07:14 -0700 Subject: [PATCH 7/7] chore: fix log message content --- blueprint-test-utils/src/tangle/transactions.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blueprint-test-utils/src/tangle/transactions.rs b/blueprint-test-utils/src/tangle/transactions.rs index 7d2b4eae..36a57b0b 100644 --- a/blueprint-test-utils/src/tangle/transactions.rs +++ b/blueprint-test-utils/src/tangle/transactions.rs @@ -31,7 +31,7 @@ pub async fn join_operators( client: &TestClient, account_id: &TanglePairSigner, ) -> Result<(), Box> { - info!("Joining delegators ..."); + info!("Joining operators ..."); let call_pre = api::tx() .multi_asset_delegation() .join_operators(1_000_000_000_000_000);