From 247f460a89c429d9bae3df02d4aa1eff1ca14015 Mon Sep 17 00:00:00 2001 From: "Alisher A. Khassanov" Date: Mon, 11 Nov 2024 12:39:02 +0400 Subject: [PATCH] Move aggregator's JSON API structs out of pallet --- .../ddc-verification/src/aggregator_client.rs | 230 +++++++++++- pallets/ddc-verification/src/lib.rs | 349 +++++------------- pallets/ddc-verification/src/tests.rs | 178 ++++----- 3 files changed, 396 insertions(+), 361 deletions(-) diff --git a/pallets/ddc-verification/src/aggregator_client.rs b/pallets/ddc-verification/src/aggregator_client.rs index 3acd0dc38..c7c14b00c 100644 --- a/pallets/ddc-verification/src/aggregator_client.rs +++ b/pallets/ddc-verification/src/aggregator_client.rs @@ -1,6 +1,6 @@ #![allow(dead_code)] -use ddc_primitives::{BucketId, DdcEra}; +use ddc_primitives::{AggregatorInfo, BucketId, DdcEra}; use prost::Message; use sp_io::offchain::timestamp; use sp_runtime::offchain::{http, Duration}; @@ -23,7 +23,7 @@ impl<'a> AggregatorClient<'a> { era_id: DdcEra, limit: Option, prev_token: Option, - ) -> Result, http::Error> { + ) -> Result, http::Error> { let mut url = format!("{}/activity/buckets?eraId={}", self.base_url, era_id); if let Some(limit) = limit { url = format!("{}&limit={}", url, limit); @@ -43,7 +43,7 @@ impl<'a> AggregatorClient<'a> { era_id: DdcEra, limit: Option, prev_token: Option, // node_id hex string - ) -> Result, http::Error> { + ) -> Result, http::Error> { let mut url = format!("{}/activity/nodes?eraId={}", self.base_url, era_id); if let Some(limit) = limit { url = format!("{}&limit={}", url, limit); @@ -102,7 +102,7 @@ impl<'a> AggregatorClient<'a> { Ok(proto_response) } - pub fn eras(&self) -> Result, http::Error> { + pub fn eras(&self) -> Result, http::Error> { let url = format!("{}/activity/eras", self.base_url); let response = self.get(&url, Accept::Any)?; let body = response.body().collect::>(); @@ -118,7 +118,7 @@ impl<'a> AggregatorClient<'a> { node_id: &str, merkle_tree_node_id: u32, levels: u16, - ) -> Result { + ) -> Result { let url = format!( "{}/activity/buckets/{}/traverse?eraId={}&nodeId={}&merkleTreeNodeId={}&levels={}", self.base_url, bucket_id, era_id, node_id, merkle_tree_node_id, levels, @@ -137,7 +137,7 @@ impl<'a> AggregatorClient<'a> { node_id: &str, merkle_tree_node_id: u32, levels: u16, - ) -> Result { + ) -> Result { let url = format!( "{}/activity/nodes/{}/traverse?eraId={}&merkleTreeNodeId={}&levels={}", self.base_url, node_id, era_id, merkle_tree_node_id, levels, @@ -213,3 +213,221 @@ enum Accept { Any, Protobuf, } + +pub(crate) mod json { + use super::*; + + /// Node aggregate response from aggregator. + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub struct NodeAggregateResponse { + /// Node id. + pub node_id: String, + /// Total amount of stored bytes. + pub stored_bytes: i64, + /// Total amount of transferred bytes. + pub transferred_bytes: u64, + /// Total number of puts. + pub number_of_puts: u64, + /// Total number of gets. + pub number_of_gets: u64, + } + + /// DDC aggregation era + #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Encode, Decode)] + pub struct AggregationEraResponse { + pub id: DdcEra, + pub status: String, + pub start: i64, + pub end: i64, + pub processing_time: i64, + pub nodes_total: u32, + pub nodes_processed: u32, + pub records_processed: u32, + pub records_applied: u32, + pub records_discarded: u32, + pub attempt: u32, + } + + /// Bucket aggregate response from aggregator. + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub struct BucketAggregateResponse { + /// Bucket id + pub bucket_id: BucketId, + /// Total amount of stored bytes. + pub stored_bytes: i64, + /// Total amount of transferred bytes. + pub transferred_bytes: u64, + /// Total number of puts. + pub number_of_puts: u64, + /// Total number of gets. + pub number_of_gets: u64, + /// Bucket sub aggregates. + pub sub_aggregates: Vec, + } + + /// Sub aggregates of a bucket. + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + #[allow(non_snake_case)] + pub struct BucketSubAggregateResponse { + /// Node id. + pub NodeID: String, + /// Total amount of stored bytes. + pub stored_bytes: i64, + /// Total amount of transferred bytes. + pub transferred_bytes: u64, + /// Total number of puts. + pub number_of_puts: u64, + /// Total number of gets. + pub number_of_gets: u64, + } + + /// Bucket activity per a DDC node. + #[derive( + Debug, Serialize, Deserialize, Clone, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub struct BucketSubAggregate { + /// Bucket id + pub bucket_id: BucketId, + /// Node id. + pub node_id: String, + /// Total amount of stored bytes. + pub stored_bytes: i64, + /// Total amount of transferred bytes. + pub transferred_bytes: u64, + /// Total number of puts. + pub number_of_puts: u64, + /// Total number of gets. + pub number_of_gets: u64, + /// Aggregator data. + pub aggregator: AggregatorInfo, + } + + #[derive( + Debug, Serialize, Deserialize, Clone, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub struct NodeAggregate { + /// Node id. + pub node_id: String, + /// Total amount of stored bytes. + pub stored_bytes: i64, + /// Total amount of transferred bytes. + pub transferred_bytes: u64, + /// Total number of puts. + pub number_of_puts: u64, + /// Total number of gets. + pub number_of_gets: u64, + /// Node data. + pub aggregator: AggregatorInfo, + } + + /// Challenge Response + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub struct ChallengeAggregateResponse { + /// proofs + pub proofs: Vec, //todo! add optional fields + } + + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub struct Proof { + pub merkle_tree_node_id: u32, + pub usage: Usage, + pub path: Vec, //todo! add base64 deserialization + pub leafs: Vec, + } + + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub struct Usage { + /// Total amount of stored bytes. + pub stored_bytes: i64, + /// Total amount of transferred bytes. + pub transferred_bytes: u64, + /// Total number of puts. + pub number_of_puts: u64, + /// Total number of gets. + pub number_of_gets: u64, + } + + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub struct Leaf { + pub record: Record, + pub transferred_bytes: u64, + pub stored_bytes: i64, + // todo! add links if there is no record + } + + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + #[allow(non_snake_case)] + pub struct Record { + pub id: String, + pub upstream: Upstream, + pub downstream: Vec, + pub timestamp: String, + pub signature: Signature, + } + + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub struct Upstream { + pub request: Request, + } + + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub struct Downstream { + pub request: Request, + } + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + #[allow(non_snake_case)] + pub struct Request { + pub requestId: String, + pub requestType: String, + pub contentType: String, + pub bucketId: String, + pub pieceCid: String, + pub offset: String, + pub size: String, + pub timestamp: String, + pub signature: Signature, + } + + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub struct Signature { + pub algorithm: String, + pub signer: String, + pub value: String, + } + + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub struct MerkleTreeNodeResponse { + pub merkle_tree_node_id: u32, + pub hash: String, + pub stored_bytes: i64, + pub transferred_bytes: u64, + pub number_of_puts: u64, + pub number_of_gets: u64, + } +} diff --git a/pallets/ddc-verification/src/lib.rs b/pallets/ddc-verification/src/lib.rs index d439a1f14..d6dc6ac5c 100644 --- a/pallets/ddc-verification/src/lib.rs +++ b/pallets/ddc-verification/src/lib.rs @@ -554,8 +554,8 @@ pub mod pallet { pub end: i64, } - impl From for EraActivity { - fn from(era: AggregationEraResponse) -> Self { + impl From for EraActivity { + fn from(era: aggregator_client::json::AggregationEraResponse) -> Self { Self { id: era.id, start: era.start, end: era.end } } } @@ -572,220 +572,6 @@ pub mod pallet { pub(crate) batch_proof: MMRProof, } - /// Node aggregate response from aggregator. - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - pub(crate) struct NodeAggregateResponse { - /// Node id. - pub(crate) node_id: String, - /// Total amount of stored bytes. - pub(crate) stored_bytes: i64, - /// Total amount of transferred bytes. - pub(crate) transferred_bytes: u64, - /// Total number of puts. - pub(crate) number_of_puts: u64, - /// Total number of gets. - pub(crate) number_of_gets: u64, - } - - /// DDC aggregation era - #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Encode, Decode)] - pub(crate) struct AggregationEraResponse { - pub(crate) id: DdcEra, - pub(crate) status: String, - pub(crate) start: i64, - pub(crate) end: i64, - pub(crate) processing_time: i64, - pub(crate) nodes_total: u32, - pub(crate) nodes_processed: u32, - pub(crate) records_processed: u32, - pub(crate) records_applied: u32, - pub(crate) records_discarded: u32, - pub(crate) attempt: u32, - } - - /// Bucket aggregate response from aggregator. - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - pub(crate) struct BucketAggregateResponse { - /// Bucket id - pub(crate) bucket_id: BucketId, - /// Total amount of stored bytes. - pub(crate) stored_bytes: i64, - /// Total amount of transferred bytes. - pub(crate) transferred_bytes: u64, - /// Total number of puts. - pub(crate) number_of_puts: u64, - /// Total number of gets. - pub(crate) number_of_gets: u64, - /// Bucket sub aggregates. - pub(crate) sub_aggregates: Vec, - } - - /// Sub aggregates of a bucket. - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - #[allow(non_snake_case)] - pub(crate) struct BucketSubAggregateResponse { - /// Node id. - pub(crate) NodeID: String, - /// Total amount of stored bytes. - pub(crate) stored_bytes: i64, - /// Total amount of transferred bytes. - pub(crate) transferred_bytes: u64, - /// Total number of puts. - pub(crate) number_of_puts: u64, - /// Total number of gets. - pub(crate) number_of_gets: u64, - } - - /// Bucket activity per a DDC node. - #[derive( - Debug, Serialize, Deserialize, Clone, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - pub(crate) struct BucketSubAggregate { - /// Bucket id - pub(crate) bucket_id: BucketId, - /// Node id. - pub(crate) node_id: String, - /// Total amount of stored bytes. - pub(crate) stored_bytes: i64, - /// Total amount of transferred bytes. - pub(crate) transferred_bytes: u64, - /// Total number of puts. - pub(crate) number_of_puts: u64, - /// Total number of gets. - pub(crate) number_of_gets: u64, - /// Aggregator data. - pub(crate) aggregator: AggregatorInfo, - } - - #[derive( - Debug, Serialize, Deserialize, Clone, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - pub(crate) struct NodeAggregate { - /// Node id. - pub(crate) node_id: String, - /// Total amount of stored bytes. - pub(crate) stored_bytes: i64, - /// Total amount of transferred bytes. - pub(crate) transferred_bytes: u64, - /// Total number of puts. - pub(crate) number_of_puts: u64, - /// Total number of gets. - pub(crate) number_of_gets: u64, - /// Node data. - pub(crate) aggregator: AggregatorInfo, - } - - /// Challenge Response - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - pub(crate) struct ChallengeAggregateResponse { - /// proofs - pub proofs: Vec, //todo! add optional fields - } - - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - pub(crate) struct Proof { - pub merkle_tree_node_id: u32, - pub usage: Usage, - pub path: Vec, //todo! add base64 deserialization - pub leafs: Vec, - } - - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - pub(crate) struct Usage { - /// Total amount of stored bytes. - pub stored_bytes: i64, - /// Total amount of transferred bytes. - pub transferred_bytes: u64, - /// Total number of puts. - pub number_of_puts: u64, - /// Total number of gets. - pub number_of_gets: u64, - } - - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - pub(crate) struct Leaf { - pub record: Record, - pub transferred_bytes: u64, - pub stored_bytes: i64, - // todo! add links if there is no record - } - - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - #[allow(non_snake_case)] - pub(crate) struct Record { - pub id: String, - pub upstream: Upstream, - pub downstream: Vec, - pub timestamp: String, - pub signature: Signature, - } - - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - pub(crate) struct Upstream { - pub request: Request, - } - - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - pub(crate) struct Downstream { - pub request: Request, - } - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - #[allow(non_snake_case)] - pub(crate) struct Request { - pub requestId: String, - pub requestType: String, - pub contentType: String, - pub bucketId: String, - pub pieceCid: String, - pub offset: String, - pub size: String, - pub timestamp: String, - pub signature: Signature, - } - - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - pub(crate) struct Signature { - pub algorithm: String, - pub signer: String, - pub value: String, - } - - #[derive( - Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, - )] - pub(crate) struct MerkleTreeNodeResponse { - merkle_tree_node_id: u32, - hash: String, - stored_bytes: i64, - transferred_bytes: u64, - number_of_puts: u64, - number_of_gets: u64, - } - /// The `ConsolidatedAggregate` struct represents a merging result of multiple aggregates /// that have reached consensus on the usage criteria. This result should be taken into /// consideration when choosing the intensity of the challenge. @@ -833,7 +619,7 @@ pub mod pallet { fn get_aggregator(&self) -> AggregatorInfo; } - impl Aggregate for BucketSubAggregate { + impl Aggregate for aggregator_client::json::BucketSubAggregate { fn hash(&self) -> ActivityHash { let mut data = self.bucket_id.encode(); data.extend_from_slice(&self.node_id.encode()); @@ -857,7 +643,7 @@ pub mod pallet { } } - impl Aggregate for NodeAggregate { + impl Aggregate for aggregator_client::json::NodeAggregate { fn hash(&self) -> ActivityHash { let mut data = self.node_id.encode(); data.extend_from_slice(&self.stored_bytes.encode()); @@ -891,7 +677,7 @@ pub mod pallet { fn leaf_hash(&self) -> ActivityHash; } - impl NodeAggregateLeaf for Leaf { + impl NodeAggregateLeaf for aggregator_client::json::Leaf { fn leaf_hash(&self) -> ActivityHash { let mut data = self.record.id.encode(); data.extend_from_slice(&self.record.upstream.request.requestType.encode()); @@ -901,7 +687,7 @@ pub mod pallet { } } - impl BucketSubAggregateLeaf for Leaf { + impl BucketSubAggregateLeaf for aggregator_client::json::Leaf { fn leaf_hash(&self) -> ActivityHash { let mut data = self.record.upstream.request.bucketId.encode(); data.extend_from_slice(&self.record.encode()); @@ -1930,7 +1716,7 @@ pub mod pallet { } pub(crate) fn _get_hash_from_merkle_path( - challenge_response: ChallengeAggregateResponse, + challenge_response: aggregator_client::json::ChallengeAggregateResponse, cluster_id: &ClusterId, era_id: DdcEra, aggregate_key: AggregateKey, @@ -2046,11 +1832,15 @@ pub mod pallet { pub(crate) fn group_buckets_sub_aggregates_by_consistency( cluster_id: &ClusterId, era_id: DdcEra, - buckets_aggregates_by_aggregator: Vec<(AggregatorInfo, Vec)>, + buckets_aggregates_by_aggregator: Vec<( + AggregatorInfo, + Vec, + )>, redundancy_factor: u16, quorum: Percent, - ) -> ConsistencyGroups { - let mut buckets_sub_aggregates: Vec = Vec::new(); + ) -> ConsistencyGroups { + let mut buckets_sub_aggregates: Vec = + Vec::new(); log::info!( "🏠⏳ Starting fetching bucket sub-aggregates for cluster_id: {:?} for era_id: {:?}", @@ -2062,7 +1852,7 @@ pub mod pallet { { for bucket_aggregate_resp in buckets_aggregates_resp { for bucket_sub_aggregate_resp in bucket_aggregate_resp.sub_aggregates.clone() { - let bucket_sub_aggregate = BucketSubAggregate { + let bucket_sub_aggregate = aggregator_client::json::BucketSubAggregate { bucket_id: bucket_aggregate_resp.bucket_id, node_id: bucket_sub_aggregate_resp.NodeID, stored_bytes: bucket_sub_aggregate_resp.stored_bytes, @@ -2108,9 +1898,11 @@ pub mod pallet { PayoutState::Initialized { if let Some((_, _, customers_activity_batch_roots, _, _, _)) = - Self::fetch_validation_activities::( - cluster_id, era_id, - ) { + Self::fetch_validation_activities::< + aggregator_client::json::BucketSubAggregate, + aggregator_client::json::NodeAggregate, + >(cluster_id, era_id) + { Self::fetch_customer_activity( cluster_id, era_id, @@ -2122,9 +1914,11 @@ pub mod pallet { let _ = Self::process_dac_era(cluster_id, Some(era_activity), batch_size)?; if let Some((_, _, customers_activity_batch_roots, _, _, _)) = - Self::fetch_validation_activities::( - cluster_id, era_id, - ) { + Self::fetch_validation_activities::< + aggregator_client::json::BucketSubAggregate, + aggregator_client::json::NodeAggregate, + >(cluster_id, era_id) + { Self::fetch_customer_activity( cluster_id, era_id, @@ -2176,9 +1970,11 @@ pub mod pallet { _, _, _, - )) = Self::fetch_validation_activities::( - cluster_id, era_id, - ) { + )) = Self::fetch_validation_activities::< + aggregator_client::json::BucketSubAggregate, + aggregator_client::json::NodeAggregate, + >(cluster_id, era_id) + { Self::fetch_charging_activities( cluster_id, batch_size, @@ -2198,9 +1994,11 @@ pub mod pallet { _, _, _, - )) = Self::fetch_validation_activities::( - cluster_id, era_id, - ) { + )) = Self::fetch_validation_activities::< + aggregator_client::json::BucketSubAggregate, + aggregator_client::json::NodeAggregate, + >(cluster_id, era_id) + { Self::fetch_charging_activities( cluster_id, batch_size, @@ -2224,7 +2022,7 @@ pub mod pallet { cluster_id: &ClusterId, batch_size: usize, era_id: DdcEra, - customers_total_activity: Vec, + customers_total_activity: Vec, customers_activity_batch_roots: Vec, ) -> Result, Vec> { let batch_index = T::PayoutVisitor::get_next_customer_batch_for_payment( @@ -2330,9 +2128,11 @@ pub mod pallet { PayoutState::CustomersChargedWithFees { if let Some((_, _, _, nodes_total_activity, _, nodes_activity_batch_roots)) = - Self::fetch_validation_activities::( - cluster_id, era_id, - ) { + Self::fetch_validation_activities::< + aggregator_client::json::BucketSubAggregate, + aggregator_client::json::NodeAggregate, + >(cluster_id, era_id) + { Self::fetch_reward_activities( cluster_id, era_id, @@ -2352,9 +2152,11 @@ pub mod pallet { nodes_total_activity, _, nodes_activity_batch_roots, - )) = Self::fetch_validation_activities::( - cluster_id, era_id, - ) { + )) = Self::fetch_validation_activities::< + aggregator_client::json::BucketSubAggregate, + aggregator_client::json::NodeAggregate, + >(cluster_id, era_id) + { Self::fetch_reward_activities( cluster_id, era_id, @@ -2377,7 +2179,7 @@ pub mod pallet { pub(crate) fn fetch_reward_activities( cluster_id: &ClusterId, era_id: DdcEra, - nodes_total_activity: Vec, + nodes_total_activity: Vec, nodes_activity_batch_roots: Vec, current_nodes_total_usage: i64, ) -> Result, Vec> { @@ -2419,9 +2221,11 @@ pub mod pallet { PayoutState::RewardingProviders { if let Some((_, _, _, nodes_total_activity, _, nodes_activity_batch_roots)) = - Self::fetch_validation_activities::( - cluster_id, era_id, - ) { + Self::fetch_validation_activities::< + aggregator_client::json::BucketSubAggregate, + aggregator_client::json::NodeAggregate, + >(cluster_id, era_id) + { Self::fetch_reward_provider_batch( cluster_id, batch_size, @@ -2441,9 +2245,11 @@ pub mod pallet { nodes_total_activity, _, nodes_activity_batch_roots, - )) = Self::fetch_validation_activities::( - cluster_id, era_id, - ) { + )) = Self::fetch_validation_activities::< + aggregator_client::json::BucketSubAggregate, + aggregator_client::json::NodeAggregate, + >(cluster_id, era_id) + { Self::fetch_reward_provider_batch( cluster_id, batch_size, @@ -2467,7 +2273,7 @@ pub mod pallet { cluster_id: &ClusterId, batch_size: usize, era_id: DdcEra, - nodes_total_activity: Vec, + nodes_total_activity: Vec, nodes_activity_batch_roots: Vec, ) -> Result, Vec> { let batch_index = T::PayoutVisitor::get_next_provider_batch_for_payment( @@ -3063,11 +2869,14 @@ pub mod pallet { pub(crate) fn group_nodes_aggregates_by_consistency( cluster_id: &ClusterId, era_id: DdcEra, - nodes_aggregates_by_aggregator: Vec<(AggregatorInfo, Vec)>, + nodes_aggregates_by_aggregator: Vec<( + AggregatorInfo, + Vec, + )>, redundancy_factor: u16, quorum: Percent, - ) -> ConsistencyGroups { - let mut nodes_aggregates: Vec = Vec::new(); + ) -> ConsistencyGroups { + let mut nodes_aggregates: Vec = Vec::new(); log::info!( "🏠⏳ Starting fetching node aggregates for cluster_id: {:?} for era_id: {:?}", @@ -3077,7 +2886,7 @@ pub mod pallet { for (aggregator_info, nodes_aggregates_resp) in nodes_aggregates_by_aggregator.clone() { for node_aggregate_resp in nodes_aggregates_resp.clone() { - let node_aggregate = NodeAggregate { + let node_aggregate = aggregator_client::json::NodeAggregate { node_id: node_aggregate_resp.node_id, stored_bytes: node_aggregate_resp.stored_bytes, transferred_bytes: node_aggregate_resp.transferred_bytes, @@ -3160,7 +2969,7 @@ pub mod pallet { aggregate_key: AggregateKey, merkle_node_identifiers: Vec, aggregator: AggregatorInfo, - ) -> Result { + ) -> Result { let response = Self::_fetch_challenge_response( era_id, aggregate_key.clone(), @@ -3213,7 +3022,7 @@ pub mod pallet { aggregate_key: AggregateKey, merkle_node_identifiers: Vec, node_params: &StorageNodeParams, - ) -> Result { + ) -> Result { let scheme = "http"; let host = str::from_utf8(&node_params.host).map_err(|_| http::Error::Unknown)?; @@ -3291,7 +3100,7 @@ pub mod pallet { merkle_tree_node_id: u32, levels: u16, node_params: &StorageNodeParams, - ) -> Result { + ) -> Result { let host = str::from_utf8(&node_params.host).map_err(|_| http::Error::Unknown)?; let base_url = format!("http://{}:{}", host, node_params.http_port); let client = aggregator_client::AggregatorClient::new( @@ -3323,7 +3132,7 @@ pub mod pallet { #[allow(dead_code)] pub(crate) fn fetch_processed_eras( node_params: &StorageNodeParams, - ) -> Result, http::Error> { + ) -> Result, http::Error> { let host = str::from_utf8(&node_params.host).map_err(|_| http::Error::Unknown)?; let base_url = format!("http://{}:{}", host, node_params.http_port); let client = aggregator_client::AggregatorClient::new( @@ -3346,7 +3155,7 @@ pub mod pallet { _cluster_id: &ClusterId, era_id: DdcEra, node_params: &StorageNodeParams, - ) -> Result, http::Error> { + ) -> Result, http::Error> { let host = str::from_utf8(&node_params.host).map_err(|_| http::Error::Unknown)?; let base_url = format!("http://{}:{}", host, node_params.http_port); let client = aggregator_client::AggregatorClient::new( @@ -3388,7 +3197,7 @@ pub mod pallet { _cluster_id: &ClusterId, era_id: DdcEra, node_params: &StorageNodeParams, - ) -> Result, http::Error> { + ) -> Result, http::Error> { let host = str::from_utf8(&node_params.host).map_err(|_| http::Error::Unknown)?; let base_url = format!("http://{}:{}", host, node_params.http_port); let client = aggregator_client::AggregatorClient::new( @@ -3461,7 +3270,10 @@ pub mod pallet { cluster_id: &ClusterId, era_id: DdcEra, dac_nodes: &[(NodePubKey, StorageNodeParams)], - ) -> Result)>, OCWError> { + ) -> Result< + Vec<(AggregatorInfo, Vec)>, + OCWError, + > { let mut nodes_aggregates = Vec::new(); for (node_key, node_params) in dac_nodes { @@ -3501,9 +3313,14 @@ pub mod pallet { cluster_id: &ClusterId, era_id: DdcEra, dac_nodes: &[(NodePubKey, StorageNodeParams)], - ) -> Result)>, OCWError> { - let mut bucket_aggregates: Vec<(AggregatorInfo, Vec)> = - Vec::new(); + ) -> Result< + Vec<(AggregatorInfo, Vec)>, + OCWError, + > { + let mut bucket_aggregates: Vec<( + AggregatorInfo, + Vec, + )> = Vec::new(); for (node_key, node_params) in dac_nodes { let aggregates_res = Self::fetch_bucket_aggregates(cluster_id, era_id, node_params); diff --git a/pallets/ddc-verification/src/tests.rs b/pallets/ddc-verification/src/tests.rs index b8db476ea..9442324cc 100644 --- a/pallets/ddc-verification/src/tests.rs +++ b/pallets/ddc-verification/src/tests.rs @@ -15,7 +15,7 @@ use sp_io::TestExternalities; use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; use sp_runtime::{offchain::Duration, AccountId32}; -use crate::{mock::*, Error, NodeAggregateResponse, *}; +use crate::{mock::*, Error, *}; #[allow(dead_code)] fn register_validators(validators: Vec) { @@ -42,7 +42,7 @@ fn get_validators() -> Vec { vec![validator1, validator2, validator3, validator4, validator5] } -fn get_node_activities() -> Vec { +fn get_node_activities() -> Vec { let aggregator = AggregatorInfo { node_pub_key: NodePubKey::StoragePubKey(AccountId32::new([1; 32])), node_params: StorageNodeParams { @@ -56,7 +56,7 @@ fn get_node_activities() -> Vec { }, }; - let node1 = NodeAggregate { + let node1 = aggregator_client::json::NodeAggregate { node_id: "0".to_string(), stored_bytes: -100, transferred_bytes: 50, @@ -64,7 +64,7 @@ fn get_node_activities() -> Vec { number_of_gets: 20, aggregator: aggregator.clone(), }; - let node2 = NodeAggregate { + let node2 = aggregator_client::json::NodeAggregate { node_id: "1".to_string(), stored_bytes: -101, transferred_bytes: 51, @@ -72,7 +72,7 @@ fn get_node_activities() -> Vec { number_of_gets: 21, aggregator: aggregator.clone(), }; - let node3 = NodeAggregate { + let node3 = aggregator_client::json::NodeAggregate { node_id: "2".to_string(), stored_bytes: 102, transferred_bytes: 52, @@ -80,7 +80,7 @@ fn get_node_activities() -> Vec { number_of_gets: 22, aggregator: aggregator.clone(), }; - let node4 = NodeAggregate { + let node4 = aggregator_client::json::NodeAggregate { node_id: "3".to_string(), stored_bytes: 103, transferred_bytes: 53, @@ -88,7 +88,7 @@ fn get_node_activities() -> Vec { number_of_gets: 23, aggregator: aggregator.clone(), }; - let node5 = NodeAggregate { + let node5 = aggregator_client::json::NodeAggregate { node_id: "4".to_string(), stored_bytes: 104, transferred_bytes: 54, @@ -116,15 +116,15 @@ fn fetch_node_aggregates_works() { let port = 80; let era_id = 1; - // Create a sample NodeAggregateResponse instance - let node_activity1 = NodeAggregateResponse { + // Create a sample aggregator_client::json::NodeAggregateResponse instance + let node_activity1 = aggregator_client::json::NodeAggregateResponse { node_id: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, }; - let node_activity2 = NodeAggregateResponse { + let node_activity2 = aggregator_client::json::NodeAggregateResponse { node_id: "2".to_string(), stored_bytes: 110, transferred_bytes: 510, @@ -195,14 +195,14 @@ fn fetch_bucket_aggregates_works() { let port = 80; let era_id = 1; - // Create a sample NodeAggregateResponse instance - let bucket_aggregate1 = BucketAggregateResponse { + // Create a sample aggregator_client::json::NodeAggregateResponse instance + let bucket_aggregate1 = aggregator_client::json::BucketAggregateResponse { stored_bytes: 100, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 111, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -210,13 +210,13 @@ fn fetch_bucket_aggregates_works() { number_of_gets: 20, }], }; - let bucket_aggregate2 = BucketAggregateResponse { + let bucket_aggregate2 = aggregator_client::json::BucketAggregateResponse { stored_bytes: 1000, transferred_bytes: 500, number_of_puts: 100, number_of_gets: 200, bucket_id: 222, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 1000, transferred_bytes: 500, @@ -344,13 +344,13 @@ fn buckets_sub_aggregates_in_consensus_merged() { let resp1 = ( aggregator1, - vec![BucketAggregateResponse { + vec![aggregator_client::json::BucketAggregateResponse { stored_bytes: 100, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 1, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -362,13 +362,13 @@ fn buckets_sub_aggregates_in_consensus_merged() { let resp2 = ( aggregator2, - vec![BucketAggregateResponse { + vec![aggregator_client::json::BucketAggregateResponse { stored_bytes: 100, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 1, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -380,13 +380,13 @@ fn buckets_sub_aggregates_in_consensus_merged() { let resp3 = ( aggregator3, - vec![BucketAggregateResponse { + vec![aggregator_client::json::BucketAggregateResponse { stored_bytes: 100, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 1, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -468,13 +468,13 @@ fn buckets_sub_aggregates_in_quorum_merged() { let resp1 = ( aggregator1, - vec![BucketAggregateResponse { + vec![aggregator_client::json::BucketAggregateResponse { stored_bytes: 100, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 1, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -486,13 +486,13 @@ fn buckets_sub_aggregates_in_quorum_merged() { let resp2 = ( aggregator2, - vec![BucketAggregateResponse { + vec![aggregator_client::json::BucketAggregateResponse { stored_bytes: 200, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 1, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 200, transferred_bytes: 50, @@ -504,13 +504,13 @@ fn buckets_sub_aggregates_in_quorum_merged() { let resp3 = ( aggregator3, - vec![BucketAggregateResponse { + vec![aggregator_client::json::BucketAggregateResponse { stored_bytes: 100, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 1, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -592,13 +592,13 @@ fn buckets_sub_aggregates_in_others_merged() { let resp1 = ( aggregator1, - vec![BucketAggregateResponse { + vec![aggregator_client::json::BucketAggregateResponse { stored_bytes: 100, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 1, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -610,13 +610,13 @@ fn buckets_sub_aggregates_in_others_merged() { let resp2 = ( aggregator2, - vec![BucketAggregateResponse { + vec![aggregator_client::json::BucketAggregateResponse { stored_bytes: 200, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 1, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 200, transferred_bytes: 50, @@ -628,13 +628,13 @@ fn buckets_sub_aggregates_in_others_merged() { let resp3 = ( aggregator3, - vec![BucketAggregateResponse { + vec![aggregator_client::json::BucketAggregateResponse { stored_bytes: 100, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 1, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -717,13 +717,13 @@ fn buckets_sub_aggregates_in_others_merged_2() { let resp1 = ( aggregator1, - vec![BucketAggregateResponse { + vec![aggregator_client::json::BucketAggregateResponse { stored_bytes: 100, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 1, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -735,13 +735,13 @@ fn buckets_sub_aggregates_in_others_merged_2() { let resp2 = ( aggregator2, - vec![BucketAggregateResponse { + vec![aggregator_client::json::BucketAggregateResponse { stored_bytes: 200, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 2, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 200, transferred_bytes: 500, @@ -753,13 +753,13 @@ fn buckets_sub_aggregates_in_others_merged_2() { let resp3 = ( aggregator3, - vec![BucketAggregateResponse { + vec![aggregator_client::json::BucketAggregateResponse { stored_bytes: 100, transferred_bytes: 50, number_of_puts: 10, number_of_gets: 20, bucket_id: 1, - sub_aggregates: vec![BucketSubAggregateResponse { + sub_aggregates: vec![aggregator_client::json::BucketSubAggregateResponse { NodeID: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -848,7 +848,7 @@ fn nodes_aggregates_in_consensus_merged() { let resp1 = ( aggregator1, - vec![NodeAggregateResponse { + vec![aggregator_client::json::NodeAggregateResponse { node_id: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -859,7 +859,7 @@ fn nodes_aggregates_in_consensus_merged() { let resp2 = ( aggregator2, - vec![NodeAggregateResponse { + vec![aggregator_client::json::NodeAggregateResponse { node_id: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -870,7 +870,7 @@ fn nodes_aggregates_in_consensus_merged() { let resp3 = ( aggregator3, - vec![NodeAggregateResponse { + vec![aggregator_client::json::NodeAggregateResponse { node_id: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -951,7 +951,7 @@ fn nodes_aggregates_in_quorum_merged() { let resp1 = ( aggregator1, - vec![NodeAggregateResponse { + vec![aggregator_client::json::NodeAggregateResponse { node_id: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -962,7 +962,7 @@ fn nodes_aggregates_in_quorum_merged() { let resp2 = ( aggregator2, - vec![NodeAggregateResponse { + vec![aggregator_client::json::NodeAggregateResponse { node_id: "1".to_string(), stored_bytes: 200, transferred_bytes: 50, @@ -973,7 +973,7 @@ fn nodes_aggregates_in_quorum_merged() { let resp3 = ( aggregator3, - vec![NodeAggregateResponse { + vec![aggregator_client::json::NodeAggregateResponse { node_id: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -1054,7 +1054,7 @@ fn nodes_aggregates_in_others_merged() { let resp1 = ( aggregator1, - vec![NodeAggregateResponse { + vec![aggregator_client::json::NodeAggregateResponse { node_id: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -1065,7 +1065,7 @@ fn nodes_aggregates_in_others_merged() { let resp2 = ( aggregator2, - vec![NodeAggregateResponse { + vec![aggregator_client::json::NodeAggregateResponse { node_id: "1".to_string(), stored_bytes: 200, transferred_bytes: 50, @@ -1076,7 +1076,7 @@ fn nodes_aggregates_in_others_merged() { let resp3 = ( aggregator3, - vec![NodeAggregateResponse { + vec![aggregator_client::json::NodeAggregateResponse { node_id: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -1158,7 +1158,7 @@ fn nodes_aggregates_in_others_merged_2() { let resp1 = ( aggregator1, - vec![NodeAggregateResponse { + vec![aggregator_client::json::NodeAggregateResponse { node_id: "2".to_string(), stored_bytes: 1000, transferred_bytes: 500, @@ -1169,7 +1169,7 @@ fn nodes_aggregates_in_others_merged_2() { let resp2 = ( aggregator2, - vec![NodeAggregateResponse { + vec![aggregator_client::json::NodeAggregateResponse { node_id: "1".to_string(), stored_bytes: 200, transferred_bytes: 50, @@ -1180,7 +1180,7 @@ fn nodes_aggregates_in_others_merged_2() { let resp3 = ( aggregator3, - vec![NodeAggregateResponse { + vec![aggregator_client::json::NodeAggregateResponse { node_id: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -1241,7 +1241,7 @@ fn buckets_sub_aggregates_grouped_by_consistency() { }; let buckets_sub_aggregates = vec![ - BucketSubAggregate { + aggregator_client::json::BucketSubAggregate { bucket_id: 1, node_id: "1".to_string(), stored_bytes: 100, @@ -1250,7 +1250,7 @@ fn buckets_sub_aggregates_grouped_by_consistency() { number_of_gets: 20, aggregator: aggregator.clone(), }, - BucketSubAggregate { + aggregator_client::json::BucketSubAggregate { bucket_id: 1, node_id: "1".to_string(), stored_bytes: 100, @@ -1259,7 +1259,7 @@ fn buckets_sub_aggregates_grouped_by_consistency() { number_of_gets: 20, aggregator: aggregator.clone(), }, - BucketSubAggregate { + aggregator_client::json::BucketSubAggregate { bucket_id: 1, node_id: "1".to_string(), stored_bytes: 100, @@ -1308,7 +1308,7 @@ fn buckets_sub_aggregates_grouped_by_consistency_2() { }; let buckets_sub_aggregates = vec![ - BucketSubAggregate { + aggregator_client::json::BucketSubAggregate { bucket_id: 1, node_id: "1".to_string(), stored_bytes: 100, @@ -1317,7 +1317,7 @@ fn buckets_sub_aggregates_grouped_by_consistency_2() { number_of_gets: 20, aggregator: aggregator.clone(), }, - BucketSubAggregate { + aggregator_client::json::BucketSubAggregate { bucket_id: 1, node_id: "1".to_string(), stored_bytes: 100, @@ -1326,7 +1326,7 @@ fn buckets_sub_aggregates_grouped_by_consistency_2() { number_of_gets: 20, aggregator: aggregator.clone(), }, - BucketSubAggregate { + aggregator_client::json::BucketSubAggregate { bucket_id: 1, node_id: "1".to_string(), stored_bytes: 100, @@ -1335,7 +1335,7 @@ fn buckets_sub_aggregates_grouped_by_consistency_2() { number_of_gets: 20, aggregator: aggregator.clone(), }, - BucketSubAggregate { + aggregator_client::json::BucketSubAggregate { bucket_id: 2, node_id: "2".to_string(), stored_bytes: 110, @@ -1344,7 +1344,7 @@ fn buckets_sub_aggregates_grouped_by_consistency_2() { number_of_gets: 20, aggregator: aggregator.clone(), }, - BucketSubAggregate { + aggregator_client::json::BucketSubAggregate { bucket_id: 2, node_id: "2".to_string(), stored_bytes: 110, @@ -1353,7 +1353,7 @@ fn buckets_sub_aggregates_grouped_by_consistency_2() { number_of_gets: 20, aggregator: aggregator.clone(), }, - BucketSubAggregate { + aggregator_client::json::BucketSubAggregate { bucket_id: 2, node_id: "2".to_string(), stored_bytes: 110, @@ -1412,7 +1412,7 @@ fn nodes_aggregates_grouped_by_consistency() { }; let nodes_aggregates = vec![ - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "0".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -1420,7 +1420,7 @@ fn nodes_aggregates_grouped_by_consistency() { number_of_gets: 20, aggregator: aggregator.clone(), }, - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "0".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -1428,7 +1428,7 @@ fn nodes_aggregates_grouped_by_consistency() { number_of_gets: 20, aggregator: aggregator.clone(), }, - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "0".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -1475,7 +1475,7 @@ fn nodes_aggregates_grouped_by_consistency_2() { }; let nodes_aggregates = vec![ - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -1483,7 +1483,7 @@ fn nodes_aggregates_grouped_by_consistency_2() { number_of_gets: 20, aggregator: aggregator.clone(), }, - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -1491,7 +1491,7 @@ fn nodes_aggregates_grouped_by_consistency_2() { number_of_gets: 20, aggregator: aggregator.clone(), }, - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "1".to_string(), stored_bytes: 100, transferred_bytes: 50, @@ -1499,7 +1499,7 @@ fn nodes_aggregates_grouped_by_consistency_2() { number_of_gets: 20, aggregator: aggregator.clone(), }, - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "2".to_string(), stored_bytes: 110, transferred_bytes: 50, @@ -1507,7 +1507,7 @@ fn nodes_aggregates_grouped_by_consistency_2() { number_of_gets: 20, aggregator: aggregator.clone(), }, - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "2".to_string(), stored_bytes: 110, transferred_bytes: 50, @@ -1515,7 +1515,7 @@ fn nodes_aggregates_grouped_by_consistency_2() { number_of_gets: 20, aggregator: aggregator.clone(), }, - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "2".to_string(), stored_bytes: 110, transferred_bytes: 50, @@ -1556,7 +1556,7 @@ fn empty_bucket_sub_aggregates() { let redundancy_factor = 3; let quorum = Percent::from_percent(67); - let empty = Vec::::new(); + let empty = Vec::::new(); let groups = DdcVerification::group_by_consistency(empty, redundancy_factor, quorum); assert_eq!(groups.consensus.len(), 0); @@ -1726,7 +1726,7 @@ fn bucket_sub_aggregates_are_fetched_and_grouped() { // Sub aggregates which are in consensus - let bucket_sub_aggregate_in_consensus = BucketSubAggregate { + let bucket_sub_aggregate_in_consensus = aggregator_client::json::BucketSubAggregate { bucket_id: 90235, node_id: "0xb6186f80dce7190294665ab53860de2841383bb202c562bb8b81a624351fa318" .to_string(), @@ -1757,7 +1757,7 @@ fn bucket_sub_aggregates_are_fetched_and_grouped() { ); // Sub aggregates which are in quorum - let bucket_sub_aggregate_in_quorum = BucketSubAggregate { + let bucket_sub_aggregate_in_quorum = aggregator_client::json::BucketSubAggregate { bucket_id: 90235, node_id: "0xb6186f80dce7190294665ab53860de2841383bb202c562bb8b81a624351fa319" .to_string(), @@ -1785,7 +1785,7 @@ fn bucket_sub_aggregates_are_fetched_and_grouped() { ); // Others sub aggregates - let bucket_sub_aggregate1_in_others = BucketSubAggregate { + let bucket_sub_aggregate1_in_others = aggregator_client::json::BucketSubAggregate { bucket_id: 90235, node_id: "0xb6186f80dce7190294665ab53860de2841383bb202c562bb8b81a624351fa319" .to_string(), @@ -1799,7 +1799,7 @@ fn bucket_sub_aggregates_are_fetched_and_grouped() { }, }; - let bucket_sub_aggregate2_in_others = BucketSubAggregate { + let bucket_sub_aggregate2_in_others = aggregator_client::json::BucketSubAggregate { bucket_id: 90235, node_id: "0xb6186f80dce7190294665ab53860de2841383bb202c562bb8b81a624351fa320" .to_string(), @@ -1988,7 +1988,7 @@ fn node_aggregates_are_fetched_and_grouped() { let groups = DdcVerification::group_nodes_aggregates_by_consistency(&cluster_id, era_id, aggregates_by_aggregator, redundancy_factor, aggregators_quorum); // Node aggregates which are in consensus - let node_aggregate_in_consensus = NodeAggregate { + let node_aggregate_in_consensus = aggregator_client::json::NodeAggregate { node_id: "0x48594f1fd4f05135914c42b03e63b61f6a3e4c537ccee3dbac555ef6df371b7e" .to_string(), stored_bytes: 675613289, @@ -2010,7 +2010,7 @@ fn node_aggregates_are_fetched_and_grouped() { ); // Node aggregates which are in quorum - let node_aggregate_in_quorum = NodeAggregate { + let node_aggregate_in_quorum = aggregator_client::json::NodeAggregate { node_id: "0x9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a" .to_string(), stored_bytes: 0, @@ -2031,7 +2031,7 @@ fn node_aggregates_are_fetched_and_grouped() { ); // Others nodes aggregates - let node_aggregate1_in_others = NodeAggregate { + let node_aggregate1_in_others = aggregator_client::json::NodeAggregate { node_id: "0x9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a" .to_string(), stored_bytes: 0, @@ -2044,7 +2044,7 @@ fn node_aggregates_are_fetched_and_grouped() { }, }; - let node_aggregate2_in_others = NodeAggregate { + let node_aggregate2_in_others = aggregator_client::json::NodeAggregate { node_id: "0xfc28d5f5bb10212077a8654f62c4f8f0b5ab985fc322a51f5a3c75943b29194b" .to_string(), stored_bytes: 675613289, @@ -2109,7 +2109,7 @@ fn test_convert_to_batch_merkle_roots_empty() { let result_roots = DdcVerification::convert_to_batch_merkle_roots( &cluster_id, era_id_1, - Vec::>::new(), + Vec::>::new(), ) .unwrap(); let expected_roots: Vec = Vec::::new(); @@ -2119,9 +2119,9 @@ fn test_convert_to_batch_merkle_roots_empty() { #[test] fn test_split_to_batches_empty_activities() { - let activities: Vec = vec![]; + let activities: Vec = vec![]; let result = DdcVerification::split_to_batches(&activities, 3); - assert_eq!(result, Vec::>::new()); + assert_eq!(result, Vec::>::new()); } #[test] @@ -2171,7 +2171,7 @@ fn test_split_to_batches_non_exact_batches() { ]; sorted_activities.sort(); let result = DdcVerification::split_to_batches(&activities, 2); - let mut expected: Vec> = Vec::new(); + let mut expected: Vec> = Vec::new(); expected.push(vec![sorted_activities[0].clone(), sorted_activities[1].clone()]); expected.push(vec![sorted_activities[2].clone(), sorted_activities[3].clone()]); expected.push(vec![sorted_activities[4].clone()]); @@ -2863,7 +2863,7 @@ fn fetch_reward_activities_works() { &cluster_id, era_id, vec![ - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "0".to_string(), stored_bytes: -100, transferred_bytes: 50, @@ -2874,7 +2874,7 @@ fn fetch_reward_activities_works() { node_params: node_params.clone(), }, }, - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "1".to_string(), stored_bytes: -101, transferred_bytes: 51, @@ -2885,7 +2885,7 @@ fn fetch_reward_activities_works() { node_params: node_params.clone(), }, }, - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "2".to_string(), stored_bytes: 102, transferred_bytes: 52, @@ -2896,7 +2896,7 @@ fn fetch_reward_activities_works() { node_params: node_params.clone(), }, }, - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "3".to_string(), stored_bytes: 103, transferred_bytes: 53, @@ -2907,7 +2907,7 @@ fn fetch_reward_activities_works() { node_params: node_params.clone(), }, }, - NodeAggregate { + aggregator_client::json::NodeAggregate { node_id: "4".to_string(), stored_bytes: 104, transferred_bytes: 54, @@ -2967,7 +2967,7 @@ fn test_find_random_merkle_node_ids() { }; ext.execute_with(|| { - let deffective_bucket_sub_aggregate = BucketSubAggregate { + let deffective_bucket_sub_aggregate = aggregator_client::json::BucketSubAggregate { bucket_id: 90235, node_id: "0xb6186f80dce7190294665ab53860de2841383bb202c562bb8b81a624351fa319" .to_string(), @@ -3070,7 +3070,7 @@ fn challenge_bucket_sub_aggregate_works() { domain: b"example2.com".to_vec(), }; - let deffective_bucket_sub_aggregate = BucketSubAggregate { + let deffective_bucket_sub_aggregate = aggregator_client::json::BucketSubAggregate { bucket_id: 123229, node_id: "0x1f50f1455f60f5774564233d321a116ca45ae3188b2200999445706d04839d72" .to_string(),