diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 07675f22f..01936e674 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -36,6 +36,8 @@ Please select the branch type you are merging and fill in the relevant template. ## Checklist for Hotfix +- [ ] Change has been deployed to Testnet. +- [ ] Change has been tested in Testnet. - [ ] Changelog has been updated. - [ ] Crate version has been updated. - [ ] `spec_version` has been incremented. diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index b3c5a9b39..82de34749 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -54,10 +54,10 @@ jobs: - name: Check Build run: | cargo build --release --features try-runtime - - name: Check Try-Runtime - run: | - try-runtime --runtime ./target/release/wbuild/cere-dev-runtime/cere_dev_runtime.compact.compressed.wasm \ - on-runtime-upgrade --disable-idempotency-checks live --uri wss://rpc.devnet.cere.network:443 +# - name: Check Try-Runtime +# run: | +# try-runtime --runtime ./target/release/wbuild/cere-dev-runtime/cere_dev_runtime.compact.compressed.wasm \ +# on-runtime-upgrade --disable-idempotency-checks live --uri wss://rpc.devnet.cere.network:443 - name: Run dev chain run: | timeout --preserve-status 30s ./target/release/cere --dev @@ -65,6 +65,9 @@ jobs: run: > pushd node && cargo check --features=runtime-benchmarks --release + - name: Check Build for Try-Runtime + run: | + cargo check --features=try-runtime --release clippy: name: Run Clippy diff --git a/.github/workflows/docker-image-berlin.yml b/.github/workflows/docker-image-berlin.yml index 4c8c9a716..d05758520 100644 --- a/.github/workflows/docker-image-berlin.yml +++ b/.github/workflows/docker-image-berlin.yml @@ -47,4 +47,4 @@ jobs: file: Dockerfile.tests push: true build-args: | - "ECR_REGISTRY=${{ steps.login-ecr-org.outputs.registry }}-berlin" \ No newline at end of file + "ECR_REGISTRY=${{ steps.login-ecr-org.outputs.registry }}-berlin" diff --git a/CHANGELOG.md b/CHANGELOG.md index b32593f6d..df784a084 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,16 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - [C] Changes is `Cere` Runtime - [D] Changes is `Cere Dev` Runtime +## [5.4.1] + +### Changed + +- [C,D] `pallet-ddc-verification`: Introduction of the Verification pallet to ensure the secure posting and retrieval of verification keys to and from the blockchain. ## [5.4.0] +### Changed + - [C,D] Introduce new events to the DDC Payouts Pallet - [C,D] `pallet-ddc-clusters-gov`: Introduction of the Cluster Governance pallet for managing clusters protocol parameters. - [C,D] `WhitelistOrigin` is set to the Technical Committee Collective Body @@ -19,6 +26,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [5.3.0] +### Changed + - [C,D] Updated Substrate to polkadot-v1.1.0 - [C,D] Introduction of the OpenGov - [C,D] `pallet-ddc-clusters`: Added Erasure coding and Replication in cluster params @@ -35,6 +44,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [5.2.0] +- DAC ddc node mode + ### Added - [C,D] Missing storage migrations to Staking pallet diff --git a/Cargo.lock b/Cargo.lock index afccbe899..ab61dff01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -198,7 +198,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d1da02abba9f9063d786eab1509833ebb2fac0f966862ca59439c76b9c566760" dependencies = [ "include_dir", - "itertools", + "itertools 0.10.5", "proc-macro-error", "proc-macro2", "quote", @@ -229,7 +229,7 @@ dependencies = [ "ark-std", "derivative", "hashbrown 0.13.2", - "itertools", + "itertools 0.10.5", "num-traits", "zeroize", ] @@ -258,7 +258,7 @@ dependencies = [ "ark-std", "derivative", "digest 0.10.7", - "itertools", + "itertools 0.10.5", "num-bigint", "num-traits", "paste", @@ -1002,6 +1002,7 @@ dependencies = [ "pallet-ddc-nodes", "pallet-ddc-payouts", "pallet-ddc-staking", + "pallet-ddc-verification", "pallet-democracy", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", @@ -1126,6 +1127,7 @@ dependencies = [ "pallet-ddc-nodes", "pallet-ddc-payouts", "pallet-ddc-staking", + "pallet-ddc-verification", "pallet-democracy", "pallet-election-provider-multi-phase", "pallet-election-provider-support-benchmarking", @@ -1216,6 +1218,7 @@ dependencies = [ "cere-rpc", "cere-runtime", "cere-runtime-common", + "ddc-primitives", "futures", "jsonrpsee", "node-primitives", @@ -1631,7 +1634,7 @@ dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "itertools", + "itertools 0.10.5", "log", "smallvec", "wasmparser", @@ -1858,11 +1861,14 @@ dependencies = [ name = "ddc-primitives" version = "5.4.0" dependencies = [ + "blake2", "frame-support", "frame-system", "parity-scale-codec", + "polkadot-ckb-merkle-mountain-range", "scale-info", "serde", + "sp-application-crypto", "sp-core", "sp-runtime", "sp-std", @@ -2513,7 +2519,7 @@ dependencies = [ "frame-system", "gethostname", "handlebars", - "itertools", + "itertools 0.10.5", "lazy_static", "linked-hash-map", "log", @@ -2676,7 +2682,7 @@ dependencies = [ "derive-syn-parse", "expander", "frame-support-procedural-tools", - "itertools", + "itertools 0.10.5", "macro_magic", "proc-macro-warning", "proc-macro2", @@ -3540,6 +3546,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -5335,6 +5350,37 @@ dependencies = [ "substrate-test-utils", ] +[[package]] +name = "pallet-ddc-verification" +version = "5.4.0" +dependencies = [ + "array-bytes", + "ddc-primitives", + "frame-benchmarking", + "frame-election-provider-support", + "frame-support", + "frame-system", + "itertools 0.13.0", + "log", + "pallet-balances", + "pallet-session", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", + "parity-scale-codec", + "polkadot-ckb-merkle-mountain-range", + "scale-info", + "serde", + "serde_json", + "sp-application-crypto", + "sp-core", + "sp-io", + "sp-keystore", + "sp-runtime", + "sp-staking", + "sp-std", +] + [[package]] name = "pallet-democracy" version = "4.0.0-dev" @@ -6291,6 +6337,16 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" +[[package]] +name = "polkadot-ckb-merkle-mountain-range" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4b44320e5f7ce2c18227537a3032ae5b2c476a7e8eddba45333e1011fc31b92" +dependencies = [ + "cfg-if", + "itertools 0.10.5", +] + [[package]] name = "polling" version = "3.6.0" @@ -6355,7 +6411,7 @@ checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", - "itertools", + "itertools 0.10.5", "normalize-line-endings", "predicates-core", "regex", @@ -6539,7 +6595,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", "heck 0.4.1", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -6560,7 +6616,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", diff --git a/Cargo.toml b/Cargo.toml index dda53ab89..4f4948cad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ members = [ "pallets/erc721", "pallets/ddc-clusters-gov", "pallets/origins", + "pallets/ddc-verification", "primitives", "runtime/cere", "runtime/cere-dev", @@ -29,6 +30,7 @@ resolver = "2" [workspace.dependencies] # 3rd-party dependencies +blake2 = { version = "0.10.4", default-features = false } byte-unit = { version = "4.0.19", default-features = false, features = ["u128"] } chrono = { version = "0.4.31", default-features = false } clap = { version = "4.2.5", features = ["derive"] } @@ -39,13 +41,16 @@ jsonrpsee = { version = "0.16.2", default-features = false, features = ["server" lazy_static = { version = "1.4.0", default-features = false } log = { version = "0.4.17", default-features = false } parking_lot = { version = "0.12.1", default-features = false } +polkadot-ckb-merkle-mountain-range = { version = "0.7.0", default-features = false } rand = { version = "0.8", default-features = false } rand_chacha = { version = "0.2", default-features = false } scale-info = { version = "2.1.2", default-features = false, features = ["derive"] } serde = { version = "1.0.136", default-features = false, features = ["derive"] } +serde_json = { version = "1.0", default-features = false } static_assertions = { version = "1.1.0" } url = { version = "2.4.1" } - +array-bytes = { version = "6.1" } +itertools = { version = "0.13.0", default-features = false, features = ["use_alloc"] } # Substrate Dependencies # Please keey format such that: # dependency-name = { git = "X", tag = "Y", default-features = false } @@ -132,6 +137,7 @@ sc-transaction-pool = { git = "https://github.com/paritytech/polkadot-sdk.git", sc-transaction-pool-api = { git = "https://github.com/paritytech/polkadot-sdk", tag = "polkadot-v1.1.0", default-features = false } sc-offchain = { git = "https://github.com/paritytech/polkadot-sdk", tag = "polkadot-v1.1.0", default-features = false } sp-api = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.1.0", default-features = false } +sp-application-crypto = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.1.0", default-features = false } sp-arithmetic = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.1.0", default-features = false } sp-authority-discovery = { git = "https://github.com/paritytech/polkadot-sdk.git", tag = "polkadot-v1.1.0", default-features = false } sp-block-builder = { git = "https://github.com/paritytech/polkadot-sdk", tag = "polkadot-v1.1.0", default-features = false } @@ -182,6 +188,7 @@ pallet-ddc-customers = { path = "pallets/ddc-customers", default-features = fals pallet-ddc-nodes = { path = "pallets/ddc-nodes", default-features = false } pallet-ddc-payouts = { path = "pallets/ddc-payouts", default-features = false } pallet-ddc-staking = { path = "pallets/ddc-staking", default-features = false } +pallet-ddc-verification = { path = "pallets/ddc-verification", default-features = false } pallet-erc20 = { path = "pallets/erc20", default-features = false } pallet-erc721 = { path = "pallets/erc721", default-features = false } pallet-origins = { path = "pallets/origins", default-features = false } diff --git a/Dockerfile.tests b/Dockerfile.tests index 265a9e333..2d50e54b4 100644 --- a/Dockerfile.tests +++ b/Dockerfile.tests @@ -32,3 +32,4 @@ RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && \ export PATH=$PATH:$HOME/.cargo/bin && \ scripts/init.sh && \ TRYBUILD=overwrite cargo test --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path node/cli/Cargo.toml + diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index 5b26c016c..e2e3f30c4 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -55,6 +55,7 @@ cere-runtime-common = { workspace = true, optional = true } cere-dev-runtime = { workspace = true, optional = true } cere-runtime = { workspace = true, optional = true } +ddc-primitives = { workspace = true } [features] default = ["cere-native"] diff --git a/node/service/chain-specs/example.json b/node/service/chain-specs/example.json index 008ca543f..e36f3d835 100644 --- a/node/service/chain-specs/example.json +++ b/node/service/chain-specs/example.json @@ -95,6 +95,36 @@ ] ] }, + "democracy": { + "phantom": null + }, + "council": { + "phantom": null, + "members": [] + }, + "technicalCommittee": { + "phantom": null, + "members": [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty" + ] + }, + "elections": { + "members": [ + [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + 10000000000 + ], + [ + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty", + 10000000000 + ] + ] + }, + "technicalMembership": { + "members": [], + "phantom": null + }, "grandpa": { "authorities": [] }, @@ -108,6 +138,14 @@ "authorityDiscovery": { "keys": [] }, + "society": { + "pot": 0, + "members": [ + "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "5FHneW46xGXgs5mUiveU4sbTyGBzmstUspZC92UhjJM694ty" + ], + "maxMembers": 999 + }, "vesting": { "vesting": [] }, diff --git a/node/service/src/chain_spec.rs b/node/service/src/chain_spec.rs index 4050b00cf..3b3d9efe8 100644 --- a/node/service/src/chain_spec.rs +++ b/node/service/src/chain_spec.rs @@ -4,6 +4,7 @@ use cere_dev_runtime as cere_dev; use cere_runtime as cere; #[cfg(feature = "cere-dev-native")] use cere_runtime_common::constants::currency::DOLLARS as TEST_UNITS; +use ddc_primitives::sr25519::AuthorityId as DdcVerificationId; use jsonrpsee::core::__reexports::serde_json; pub use node_primitives::{AccountId, Balance, Block, Signature}; use pallet_im_online::sr25519::AuthorityId as ImOnlineId; @@ -74,7 +75,8 @@ where // Helper function to generate stash, controller and session key from seed pub fn authority_keys_from_seed( seed: &str, -) -> (AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId) { +) -> (AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId, DdcVerificationId) +{ ( get_account_id_from_seed::(&format!("{}//stash", seed)), get_account_id_from_seed::(seed), @@ -82,6 +84,7 @@ pub fn authority_keys_from_seed( get_from_seed::(seed), get_from_seed::(seed), get_from_seed::(seed), + get_from_seed::(seed), ) } @@ -91,8 +94,9 @@ fn cere_dev_session_keys( babe: BabeId, im_online: ImOnlineId, authority_discovery: AuthorityDiscoveryId, + ddc_verification: DdcVerificationId, ) -> cere_dev::SessionKeys { - cere_dev::SessionKeys { grandpa, babe, im_online, authority_discovery } + cere_dev::SessionKeys { grandpa, babe, im_online, authority_discovery, ddc_verification } } /// Helper function to create Cere Dev `RuntimeGenesisConfig` for testing @@ -106,6 +110,7 @@ pub fn cere_dev_genesis( BabeId, ImOnlineId, AuthorityDiscoveryId, + DdcVerificationId, )>, initial_nominators: Vec, root_key: AccountId, @@ -178,7 +183,13 @@ pub fn cere_dev_genesis( ( x.0.clone(), x.0.clone(), - cere_dev_session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()), + cere_dev_session_keys( + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + x.6.clone(), + ), ) }) .collect::>(), @@ -279,7 +290,11 @@ fn cere_dev_local_testnet_genesis(wasm_binary: &[u8]) -> cere_dev::RuntimeGenesi cere_dev_genesis( wasm_binary, // Initial authorities - vec![authority_keys_from_seed("Alice"), authority_keys_from_seed("Bob")], + vec![ + authority_keys_from_seed("Alice"), + authority_keys_from_seed("Bob"), + authority_keys_from_seed("Charlie"), + ], // Initial nominators vec![], // Sudo account diff --git a/pallets/ddc-clusters-gov/src/weights.rs b/pallets/ddc-clusters-gov/src/weights.rs index 98da73725..5bddb6ffc 100644 --- a/pallets/ddc-clusters-gov/src/weights.rs +++ b/pallets/ddc-clusters-gov/src/weights.rs @@ -362,4 +362,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } -} \ No newline at end of file +} diff --git a/pallets/ddc-clusters/src/lib.rs b/pallets/ddc-clusters/src/lib.rs index e9c1cd123..16f3eb7e1 100644 --- a/pallets/ddc-clusters/src/lib.rs +++ b/pallets/ddc-clusters/src/lib.rs @@ -135,6 +135,7 @@ pub mod pallet { ClusterProtocolParamsNotSet, ArithmeticOverflow, NodeIsNotAssignedToCluster, + ControllerDoesNotExist, } #[pallet::storage] @@ -821,6 +822,17 @@ pub mod pallet { Ok(cluster.manager_id) } + fn get_nodes(cluster_id: &ClusterId) -> Result, DispatchError> { + let mut nodes = Vec::new(); + + // Iterate through all nodes associated with the cluster_id + for (node_pubkey, _) in ClustersNodes::::iter_prefix(cluster_id) { + nodes.push(node_pubkey); + } + + Ok(nodes) + } + fn contains_node( cluster_id: &ClusterId, node_pub_key: &NodePubKey, @@ -903,6 +915,7 @@ pub mod pallet { match error { StakingVisitorError::NodeStakeDoesNotExist => Error::::NodeHasNoActivatedStake, StakingVisitorError::NodeStakeIsInBadState => Error::::NodeStakeIsInvalid, + StakingVisitorError::ControllerDoesNotExist => Error::::ControllerDoesNotExist, } } } diff --git a/pallets/ddc-clusters/src/mock.rs b/pallets/ddc-clusters/src/mock.rs index cf738191f..8a7aafbed 100644 --- a/pallets/ddc-clusters/src/mock.rs +++ b/pallets/ddc-clusters/src/mock.rs @@ -226,6 +226,9 @@ impl StakingVisitor for TestStakingVisitor { fn has_chilling_attempt(_node_pub_key: &NodePubKey) -> Result { Ok(false) } + fn stash_by_ctrl(_controller: &T::AccountId) -> Result { + todo!() + } } impl StakerCreator> for TestStaker { diff --git a/pallets/ddc-clusters/src/weights.rs b/pallets/ddc-clusters/src/weights.rs index 6746bf18a..df5c5957e 100644 --- a/pallets/ddc-clusters/src/weights.rs +++ b/pallets/ddc-clusters/src/weights.rs @@ -204,4 +204,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(3_u64)) .saturating_add(RocksDbWeight::get().writes(2_u64)) } -} \ No newline at end of file +} diff --git a/pallets/ddc-customers/src/benchmarking.rs b/pallets/ddc-customers/src/benchmarking.rs index 59585d476..04b1b1525 100644 --- a/pallets/ddc-customers/src/benchmarking.rs +++ b/pallets/ddc-customers/src/benchmarking.rs @@ -162,6 +162,7 @@ benchmarks! { cluster_id, is_public: false, is_removed: false, + total_customers_usage: None, }; >::set(bucket_id); @@ -190,6 +191,7 @@ benchmarks! { cluster_id, is_public: false, is_removed: false, + total_customers_usage: None, }; >::set(bucket_id); diff --git a/pallets/ddc-customers/src/lib.rs b/pallets/ddc-customers/src/lib.rs index 35fbfb4e5..2d6a24877 100644 --- a/pallets/ddc-customers/src/lib.rs +++ b/pallets/ddc-customers/src/lib.rs @@ -15,10 +15,11 @@ mod tests; use codec::{Decode, Encode}; use ddc_primitives::{ traits::{ + bucket::{BucketManager, BucketVisitor}, cluster::{ClusterCreator, ClusterProtocol, ClusterQuery}, customer::{CustomerCharger, CustomerDepositor}, }, - BucketId, ClusterId, + BucketId, BucketVisitorError, ClusterId, CustomerUsage, }; use frame_support::{ parameter_types, @@ -66,6 +67,7 @@ pub struct Bucket { cluster_id: ClusterId, is_public: bool, is_removed: bool, + total_customers_usage: Option, } #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] @@ -135,7 +137,7 @@ pub mod pallet { /// The current storage version. const STORAGE_VERSION: frame_support::traits::StorageVersion = - frame_support::traits::StorageVersion::new(1); + frame_support::traits::StorageVersion::new(2); #[pallet::pallet] #[pallet::storage_version(STORAGE_VERSION)] @@ -194,9 +196,27 @@ pub mod pallet { /// The account has been charged for the usage Charged { owner_id: T::AccountId, charged: BalanceOf, expected_to_charge: BalanceOf }, /// Bucket with specific id created - BucketCreated { bucket_id: BucketId }, + BucketCreated { cluster_id: ClusterId, bucket_id: BucketId }, /// Bucket with specific id updated - BucketUpdated { bucket_id: BucketId }, + BucketUpdated { cluster_id: ClusterId, bucket_id: BucketId }, + /// Bucket nodes usage with specific id updated + BucketTotalNodesUsageUpdated { + cluster_id: ClusterId, + bucket_id: BucketId, + transferred_bytes: u64, + stored_bytes: u64, + number_of_puts: u64, + number_of_gets: u64, + }, + /// Bucket customers usage with specific id updated + BucketTotalCustomersUsageUpdated { + cluster_id: ClusterId, + bucket_id: BucketId, + transferred_bytes: u64, + stored_bytes: u64, + number_of_puts: u64, + number_of_gets: u64, + }, /// Bucket with specific id marked as removed BucketRemoved { bucket_id: BucketId }, } @@ -316,12 +336,13 @@ pub mod pallet { cluster_id, is_public: bucket_params.is_public, is_removed: false, + total_customers_usage: None, }; >::set(cur_bucket_id); >::insert(cur_bucket_id, bucket); - Self::deposit_event(Event::::BucketCreated { bucket_id: cur_bucket_id }); + Self::deposit_event(Event::::BucketCreated { cluster_id, bucket_id: cur_bucket_id }); Ok(()) } @@ -511,8 +532,9 @@ pub mod pallet { ensure!(bucket.owner_id == owner, Error::::NotBucketOwner); bucket.is_public = bucket_params.is_public; + let cluster_id = bucket.cluster_id; >::insert(bucket_id, bucket); - Self::deposit_event(Event::::BucketUpdated { bucket_id }); + Self::deposit_event(Event::::BucketUpdated { cluster_id, bucket_id }); Ok(()) } @@ -523,6 +545,7 @@ pub mod pallet { #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::remove_bucket())] pub fn remove_bucket(origin: OriginFor, bucket_id: BucketId) -> DispatchResult { + // todo! can we set total_usage to None and save bytes let owner = ensure_signed(origin)?; >::try_mutate(bucket_id, |maybe_bucket| -> DispatchResult { @@ -624,10 +647,68 @@ pub mod pallet { } } + impl BucketVisitor for Pallet { + fn get_total_customer_usage( + cluster_id: &ClusterId, + bucket_id: BucketId, + content_owner: &T::AccountId, + ) -> Result, BucketVisitorError> { + let bucket = Self::buckets(bucket_id).ok_or(BucketVisitorError::NoBucketWithId)?; + ensure!(bucket.owner_id == *content_owner, BucketVisitorError::NotBucketOwner); + ensure!(bucket.cluster_id == *cluster_id, BucketVisitorError::NoBucketWithId); + + Ok(bucket.total_customers_usage) + } + } + + impl BucketManager for Pallet { + fn inc_total_customer_usage( + cluster_id: &ClusterId, + bucket_id: BucketId, + content_owner: T::AccountId, + customer_usage: &CustomerUsage, + ) -> DispatchResult { + let mut bucket = Self::buckets(bucket_id).ok_or(Error::::NoBucketWithId)?; + ensure!(bucket.owner_id == content_owner, Error::::NotBucketOwner); + + // Update or initialize total_customers_usage + match &mut bucket.total_customers_usage { + Some(total_customers_usage) => { + total_customers_usage.transferred_bytes += customer_usage.transferred_bytes; + total_customers_usage.stored_bytes += customer_usage.stored_bytes; + total_customers_usage.number_of_puts += customer_usage.number_of_puts; + total_customers_usage.number_of_gets += customer_usage.number_of_gets; + }, + None => { + bucket.total_customers_usage = Some(CustomerUsage { + transferred_bytes: customer_usage.transferred_bytes, + stored_bytes: customer_usage.stored_bytes, + number_of_puts: customer_usage.number_of_puts, + number_of_gets: customer_usage.number_of_gets, + }); + }, + } + + Self::deposit_event(Event::::BucketTotalCustomersUsageUpdated { + cluster_id: *cluster_id, + bucket_id, + transferred_bytes: customer_usage.transferred_bytes, + stored_bytes: customer_usage.stored_bytes, + number_of_puts: customer_usage.number_of_puts, + number_of_gets: customer_usage.number_of_gets, + }); + + Ok(()) + } + } + impl CustomerCharger for Pallet { fn charge_content_owner( + cluster_id: &ClusterId, + bucket_id: BucketId, content_owner: T::AccountId, billing_vault: T::AccountId, + customer_usage: &CustomerUsage, amount: u128, ) -> Result { let actually_charged: BalanceOf; @@ -662,6 +743,13 @@ pub mod pallet { actually_charged.checked_add(&charged).ok_or(Error::::ArithmeticUnderflow)?; } + Self::inc_total_customer_usage( + cluster_id, + bucket_id, + content_owner.clone(), + customer_usage, + )?; + ::Currency::transfer( &Self::account_id(), &billing_vault, diff --git a/pallets/ddc-customers/src/migration.rs b/pallets/ddc-customers/src/migration.rs index b95225a3e..ee03c912d 100644 --- a/pallets/ddc-customers/src/migration.rs +++ b/pallets/ddc-customers/src/migration.rs @@ -39,6 +39,33 @@ pub mod v0 { >; } +pub mod v1 { + use frame_support::pallet_prelude::*; + + use super::*; + + #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] + pub struct Bucket { + pub bucket_id: BucketId, + pub owner_id: AccountId, + pub cluster_id: ClusterId, + pub is_public: bool, + pub is_removed: bool, + } + + #[storage_alias] + pub(super) type BucketsCount = StorageValue, BucketId, ValueQuery>; + + #[storage_alias] + pub(super) type Buckets = StorageMap< + crate::Pallet, + Twox64Concat, + BucketId, + Bucket<::AccountId>, + OptionQuery, + >; +} + // Migrate to removable buckets pub fn migrate_to_v1() -> Weight { let on_chain_version = Pallet::::on_chain_storage_version(); @@ -49,11 +76,11 @@ pub fn migrate_to_v1() -> Weight { " >>> Updating DDC Customers storage. Migrating {} buckets...", count ); - Buckets::::translate::, _>( + v1::Buckets::::translate::, _>( |bucket_id: BucketId, bucket: v0::Bucket| { info!(target: LOG_TARGET, " Migrating bucket for bucket ID {:?}...", bucket_id); - Some(Bucket { + Some(v1::Bucket { bucket_id: bucket.bucket_id, owner_id: bucket.owner_id, cluster_id: bucket.cluster_id, @@ -128,3 +155,94 @@ impl OnRuntimeUpgrade for MigrateToV1 { Ok(()) } } + +// New migration to add total_customers_usage field +pub fn migrate_to_v2() -> Weight { + let on_chain_version = Pallet::::on_chain_storage_version(); + if on_chain_version == 1 { + let count = v1::BucketsCount::::get(); + info!( + target: LOG_TARGET, + " >>> Updating DDC Customers storage to v2. Migrating {} buckets...", count + ); + + Buckets::::translate::, _>( + |bucket_id: BucketId, bucket: v1::Bucket| { + info!(target: LOG_TARGET, " Migrating bucket for bucket ID {:?}...", bucket_id); + + Some(Bucket { + bucket_id: bucket.bucket_id, + owner_id: bucket.owner_id, + cluster_id: bucket.cluster_id, + is_public: bucket.is_public, + is_removed: bucket.is_removed, + total_customers_usage: None, + }) + }, + ); + + // Update storage version. + StorageVersion::new(2).put::>(); + let count = v1::BucketsCount::::get(); + info!( + target: LOG_TARGET, + " <<< DDC Customers storage updated to v2! Migrated {} buckets ✅", count + ); + + T::DbWeight::get().reads_writes(count + 2, count + 1) + } else { + info!(target: LOG_TARGET, " >>> Unused migration to v2!"); + T::DbWeight::get().reads(1) + } +} + +pub struct MigrateToV2(sp_std::marker::PhantomData); +impl OnRuntimeUpgrade for MigrateToV2 { + fn on_runtime_upgrade() -> Weight { + migrate_to_v2::() + } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result, DispatchError> { + let prev_bucket_id = v1::BucketsCount::::get(); + let prev_count = v1::Buckets::::iter().count(); + + Ok((prev_bucket_id, prev_count as u64).encode()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade(prev_state: Vec) -> Result<(), DispatchError> { + let (prev_bucket_id, prev_count): (u64, u64) = + Decode::decode(&mut &prev_state[..]).expect("pre_upgrade provides a valid state; qed"); + + let post_bucket_id = Pallet::::buckets_count(); + ensure!( + prev_bucket_id == post_bucket_id, + "the last bucket ID before and after the migration should be the same" + ); + + let post_count = Buckets::::iter().count() as u64; + ensure!( + prev_count == post_count, + "the bucket count before and after the migration should be the same" + ); + + let current_version = Pallet::::current_storage_version(); + let on_chain_version = Pallet::::on_chain_storage_version(); + + frame_support::ensure!(current_version == 2, "must_upgrade"); + ensure!( + current_version == on_chain_version, + "after migration, the current_version and on_chain_version should be the same" + ); + + Buckets::::iter().try_for_each(|(_id, bucket)| -> Result<(), &'static str> { + ensure!( + bucket.total_customers_usage.is_none(), + "At this point all the bucket should have total_customers_usage set to None" + ); + Ok(()) + })?; + Ok(()) + } +} diff --git a/pallets/ddc-customers/src/mock.rs b/pallets/ddc-customers/src/mock.rs index d301e366a..ca95f5626 100644 --- a/pallets/ddc-customers/src/mock.rs +++ b/pallets/ddc-customers/src/mock.rs @@ -234,6 +234,10 @@ impl ClusterManager for TestClusterManager { unimplemented!() } + fn get_nodes(_cluster_id: &ClusterId) -> Result, DispatchError> { + unimplemented!() + } + fn contains_node( _cluster_id: &ClusterId, _node_pub_key: &NodePubKey, diff --git a/pallets/ddc-customers/src/tests.rs b/pallets/ddc-customers/src/tests.rs index 86d1e0bf9..9f31edcf5 100644 --- a/pallets/ddc-customers/src/tests.rs +++ b/pallets/ddc-customers/src/tests.rs @@ -30,12 +30,13 @@ fn create_bucket_works() { cluster_id, is_public: bucket_params.is_public, is_removed: false, + total_customers_usage: None, }) ); // Checking that event was emitted assert_eq!(System::events().len(), 1); - System::assert_last_event(Event::BucketCreated { bucket_id: 1u64 }.into()) + System::assert_last_event(Event::BucketCreated { cluster_id, bucket_id: 1u64 }.into()) }) } @@ -56,14 +57,14 @@ fn create_two_buckets_works() { bucket_1_params.clone() )); assert_eq!(System::events().len(), 1); - System::assert_last_event(Event::BucketCreated { bucket_id: 1u64 }.into()); + System::assert_last_event(Event::BucketCreated { cluster_id, bucket_id: 1u64 }.into()); assert_ok!(DdcCustomers::create_bucket( RuntimeOrigin::signed(account_1), cluster_id, bucket_2_params.clone() )); assert_eq!(System::events().len(), 2); - System::assert_last_event(Event::BucketCreated { bucket_id: 2u64 }.into()); + System::assert_last_event(Event::BucketCreated { cluster_id, bucket_id: 2u64 }.into()); // Check storage assert_eq!(DdcCustomers::buckets_count(), 2); @@ -75,6 +76,7 @@ fn create_two_buckets_works() { cluster_id, is_public: bucket_1_params.is_public, is_removed: false, + total_customers_usage: None, }) ); assert_eq!( @@ -85,6 +87,7 @@ fn create_two_buckets_works() { cluster_id, is_public: bucket_2_params.is_public, is_removed: false, + total_customers_usage: None, }) ); }) @@ -175,10 +178,27 @@ fn charge_content_owner_works() { ExtBuilder.build_and_execute(|| { System::set_block_number(1); + let bucket_id1: BucketId = 1; + let bucket_id2: BucketId = 2; + let cluster_id = ClusterId::from([1; 20]); + let bucket_1_params = BucketParams { is_public: false }; + let customer_usage = CustomerUsage { + transferred_bytes: 1, + stored_bytes: 2, + number_of_puts: 3, + number_of_gets: 4, + }; + let account_2: u128 = 2; let account_3: u128 = 3; let vault: u128 = 4; let deposit = 100_u128; + assert_ok!(DdcCustomers::create_bucket( + RuntimeOrigin::signed(account_3), + cluster_id, + bucket_1_params.clone() + )); + let balance_before_deposit = Balances::free_balance(account_3); // Deposited assert_ok!(DdcCustomers::deposit(RuntimeOrigin::signed(account_3), deposit)); @@ -204,9 +224,29 @@ fn charge_content_owner_works() { // successful transfer let charge1 = 10; - let charged = DdcCustomers::charge_content_owner(account_3, vault, charge1).unwrap(); + let charged = DdcCustomers::charge_content_owner( + &cluster_id, + bucket_id1, + account_3, + vault, + &customer_usage, + charge1, + ) + .unwrap(); assert_eq!(charge1, charged); + System::assert_has_event( + Event::BucketTotalCustomersUsageUpdated { + cluster_id, + bucket_id: bucket_id1, + transferred_bytes: customer_usage.transferred_bytes, + stored_bytes: customer_usage.stored_bytes, + number_of_puts: customer_usage.number_of_puts, + number_of_gets: customer_usage.number_of_gets, + } + .into(), + ); + let vault_balance = Balances::free_balance(vault); assert_eq!(charged, vault_balance); @@ -234,7 +274,15 @@ fn charge_content_owner_works() { // failed transfer let charge2 = 100u128; - let charge_result = DdcCustomers::charge_content_owner(account_3, vault, charge2).unwrap(); + let charge_result = DdcCustomers::charge_content_owner( + &cluster_id, + bucket_id1, + account_3, + vault, + &customer_usage, + charge2, + ) + .unwrap(); assert_eq!( DdcCustomers::ledger(account_3), Some(AccountsLedger { @@ -276,6 +324,31 @@ fn charge_content_owner_works() { deposit, Balances::free_balance(DdcCustomers::account_id()) - Balances::minimum_balance() ); + + assert_ok!(DdcCustomers::deposit(RuntimeOrigin::signed(account_2), 50_u128)); + assert_noop!( + DdcCustomers::charge_content_owner( + &cluster_id, + bucket_id1, + account_2, + vault, + &customer_usage, + charge1, + ), + Error::::NotBucketOwner + ); + + assert_noop!( + DdcCustomers::charge_content_owner( + &cluster_id, + bucket_id2, + account_3, + vault, + &customer_usage, + charge1, + ), + Error::::NoBucketWithId + ); }) } @@ -363,7 +436,7 @@ fn set_bucket_params_works() { // Checking that event was emitted assert_eq!(System::events().len(), 1); - System::assert_last_event(Event::BucketCreated { bucket_id: 1u64 }.into()); + System::assert_last_event(Event::BucketCreated { cluster_id, bucket_id: 1u64 }.into()); let bucket_id = 1; let update_bucket_params = BucketParams { is_public: true }; @@ -382,12 +455,13 @@ fn set_bucket_params_works() { cluster_id, is_public: update_bucket_params.is_public, is_removed: false, + total_customers_usage: None, }) ); // Checking that event was emitted assert_eq!(System::events().len(), 2); - System::assert_last_event(Event::BucketUpdated { bucket_id }.into()); + System::assert_last_event(Event::BucketUpdated { cluster_id, bucket_id }.into()); }) } @@ -409,7 +483,7 @@ fn set_bucket_params_checks_work() { // Checking that event was emitted assert_eq!(System::events().len(), 1); - System::assert_last_event(Event::BucketCreated { bucket_id: 1u64 }.into()); + System::assert_last_event(Event::BucketCreated { cluster_id, bucket_id: 1u64 }.into()); let bucket_id = 1; let non_existent_bucket_id = 2; @@ -475,6 +549,7 @@ fn remove_bucket_works() { cluster_id, is_public: bucket_params.is_public, is_removed: false, + total_customers_usage: None, }) ); @@ -491,6 +566,7 @@ fn remove_bucket_works() { cluster_id, is_public: bucket_params.is_public, is_removed: true, + total_customers_usage: None, }) ); @@ -565,6 +641,7 @@ fn remove_bucket_checks_with_multiple_buckets_works() { cluster_id, is_public: private_bucket_params.is_public, is_removed: true, + total_customers_usage: None, }) ); @@ -576,6 +653,7 @@ fn remove_bucket_checks_with_multiple_buckets_works() { cluster_id, is_public: public_bucket_params.is_public, is_removed: false, + total_customers_usage: None, }) ); diff --git a/pallets/ddc-customers/src/weights.rs b/pallets/ddc-customers/src/weights.rs index da33741d9..893b116d8 100644 --- a/pallets/ddc-customers/src/weights.rs +++ b/pallets/ddc-customers/src/weights.rs @@ -181,4 +181,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } -} \ No newline at end of file +} diff --git a/pallets/ddc-nodes/src/lib.rs b/pallets/ddc-nodes/src/lib.rs index 83e7ee60b..54af6aa03 100644 --- a/pallets/ddc-nodes/src/lib.rs +++ b/pallets/ddc-nodes/src/lib.rs @@ -32,7 +32,7 @@ use ddc_primitives::{ node::{NodeCreator, NodeVisitor}, staking::StakingVisitor, }, - ClusterId, NodeParams, NodePubKey, StorageNodePubKey, + ClusterId, NodeParams, NodePubKey, StorageNodeParams, StorageNodePubKey, }; use frame_support::pallet_prelude::*; use frame_system::pallet_prelude::*; @@ -48,6 +48,7 @@ pub use crate::{ #[frame_support::pallet] pub mod pallet { + use self::node::NodeProps; use super::*; /// The current storage version. @@ -238,6 +239,26 @@ pub mod pallet { let node = Self::get(node_pub_key.clone()).map_err(|_| Error::::NodeDoesNotExist)?; Ok(node.get_provider_id().clone()) } + + fn get_node_params(node_pub_key: &NodePubKey) -> Result { + let node = Self::get(node_pub_key.clone()).map_err(|_| Error::::NodeDoesNotExist)?; + let node_props = node.get_props().clone(); + + match node_pub_key { + NodePubKey::StoragePubKey(_) => match node_props { + NodeProps::StorageProps(node_props) => + Ok(ddc_primitives::NodeParams::StorageParams(StorageNodeParams { + mode: node_props.mode, + host: node_props.host.into(), + domain: node_props.domain.into(), + ssl: node_props.ssl, + http_port: node_props.http_port, + grpc_port: node_props.grpc_port, + p2p_port: node_props.p2p_port, + })), + }, + } + } } impl NodeCreator for Pallet { diff --git a/pallets/ddc-nodes/src/mock.rs b/pallets/ddc-nodes/src/mock.rs index ab5fb34f7..1e8933b8d 100644 --- a/pallets/ddc-nodes/src/mock.rs +++ b/pallets/ddc-nodes/src/mock.rs @@ -110,6 +110,10 @@ impl StakingVisitor for TestStakingVisitor { fn has_chilling_attempt(_node_pub_key: &NodePubKey) -> Result { Ok(false) } + + fn stash_by_ctrl(_controller: &T::AccountId) -> Result { + todo!() + } } pub(crate) type TestRuntimeCall = ::RuntimeCall; diff --git a/pallets/ddc-nodes/src/weights.rs b/pallets/ddc-nodes/src/weights.rs index a046a9eb0..917b5c671 100644 --- a/pallets/ddc-nodes/src/weights.rs +++ b/pallets/ddc-nodes/src/weights.rs @@ -86,4 +86,4 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().reads(1_u64)) .saturating_add(RocksDbWeight::get().writes(1_u64)) } -} \ No newline at end of file +} diff --git a/pallets/ddc-payouts/src/benchmarking.rs b/pallets/ddc-payouts/src/benchmarking.rs index 8b7877fc7..750d3c52b 100644 --- a/pallets/ddc-payouts/src/benchmarking.rs +++ b/pallets/ddc-payouts/src/benchmarking.rs @@ -1,6 +1,6 @@ //! DdcPayouts pallet benchmarking. -use ddc_primitives::{ClusterId, ClusterParams, ClusterProtocolParams}; +use ddc_primitives::{traits::ValidatorVisitor, ClusterId, ClusterParams, ClusterProtocolParams}; pub use frame_benchmarking::{account, benchmarks, whitelist_account}; use frame_system::RawOrigin; use sp_runtime::Perquintill; @@ -14,6 +14,7 @@ const CERE: u128 = 10000000000; fn create_dac_account() -> T::AccountId { let dac_account = create_account::("dac_account", 0, 0); authorize_account::(dac_account.clone()); + T::ValidatorVisitor::setup_validators(vec![dac_account.clone()]); dac_account } @@ -90,7 +91,7 @@ fn create_default_cluster(cluster_id: ClusterId) { struct BillingReportParams { cluster_id: ClusterId, era: DdcEra, - state: State, + state: PayoutState, total_customer_charge: CustomerCharge, total_distributed_reward: u128, total_node_usage: NodeUsage, @@ -148,13 +149,13 @@ benchmarks! { verify { assert!(ActiveBillingReports::::contains_key(cluster_id, era)); let billing_report = ActiveBillingReports::::get(cluster_id, era).unwrap(); - assert_eq!(billing_report.state, State::Initialized); + assert_eq!(billing_report.state, PayoutState::Initialized); } begin_charging_customers { let cluster_id = ClusterId::from([1; 20]); let era : DdcEra = 1; - let state = State::Initialized; + let state = PayoutState::Initialized; let total_customer_charge = CustomerCharge::default(); let total_distributed_reward : u128= 0; let total_node_usage = NodeUsage::default(); @@ -186,7 +187,7 @@ benchmarks! { verify { assert!(ActiveBillingReports::::contains_key(cluster_id, era)); let billing_report = ActiveBillingReports::::get(cluster_id, era).unwrap(); - assert_eq!(billing_report.state, State::ChargingCustomers); + assert_eq!(billing_report.state, PayoutState::ChargingCustomers); assert_eq!(billing_report.charging_max_batch_index, max_batch_index); } @@ -195,7 +196,7 @@ benchmarks! { let cluster_id = ClusterId::from([1; 20]); let era : DdcEra = 1; - let state = State::ChargingCustomers; + let state = PayoutState::ChargingCustomers; let total_customer_charge = CustomerCharge::default(); let total_distributed_reward : u128 = 0; let total_node_usage = NodeUsage::default(); @@ -222,7 +223,7 @@ benchmarks! { }); let batch_index: BatchIndex = 0; - let payers: Vec<(T::AccountId, CustomerUsage)> = (0..b).map(|i| { + let payers: Vec<(T::AccountId, BucketId, CustomerUsage)> = (0..b).map(|i| { let customer = create_account::("customer", i, i); if b % 2 == 0 { @@ -239,22 +240,23 @@ benchmarks! { number_of_gets: 10, // 10 gets number_of_puts: 5, // 5 puts }; + let bucket_id: BucketId = 1; - (customer, customer_usage) + (customer, bucket_id, customer_usage) }).collect(); - }: _(RawOrigin::Signed(dac_account.clone()), cluster_id, era, batch_index, payers) + }: _(RawOrigin::Signed(dac_account.clone()), cluster_id, era, batch_index, payers, MMRProof::default()) verify { assert!(ActiveBillingReports::::contains_key(cluster_id, era)); let billing_report = ActiveBillingReports::::get(cluster_id, era).unwrap(); - assert_eq!(billing_report.state, State::ChargingCustomers); + assert_eq!(billing_report.state, PayoutState::ChargingCustomers); assert!(billing_report.charging_processed_batches.contains(&batch_index)); } end_charging_customers { let cluster_id = ClusterId::from([1; 20]); let era : DdcEra = 1; - let state = State::ChargingCustomers; + let state = PayoutState::ChargingCustomers; let total_customer_charge = CustomerCharge { transfer: 200 * CERE, // price for 200 mb storage: 100 * CERE, // price for 100 mb @@ -293,14 +295,14 @@ benchmarks! { }: _(RawOrigin::Signed(dac_account.clone()), cluster_id, era) verify { let billing_report = ActiveBillingReports::::get(cluster_id, era).unwrap(); - assert_eq!(billing_report.state, State::CustomersChargedWithFees); + assert_eq!(billing_report.state, PayoutState::CustomersChargedWithFees); assert!(billing_report.charging_processed_batches.contains(&charging_max_batch_index)); } begin_rewarding_providers { let cluster_id = ClusterId::from([1; 20]); let era : DdcEra = 1; - let state = State::CustomersChargedWithFees; + let state = PayoutState::CustomersChargedWithFees; let total_customer_charge = CustomerCharge { transfer: 200 * CERE, // price for 200 mb storage: 100 * CERE, // price for 100 mb @@ -343,7 +345,7 @@ benchmarks! { }: _(RawOrigin::Signed(dac_account.clone()), cluster_id, era, max_batch_index, total_node_usage) verify { let billing_report = ActiveBillingReports::::get(cluster_id, era).unwrap(); - assert_eq!(billing_report.state, State::RewardingProviders); + assert_eq!(billing_report.state, PayoutState::RewardingProviders); assert_eq!(billing_report.rewarding_max_batch_index, max_batch_index); } @@ -352,7 +354,7 @@ benchmarks! { let cluster_id = ClusterId::from([1; 20]); let era : DdcEra = 1; - let state = State::RewardingProviders; + let state = PayoutState::RewardingProviders; let total_customer_charge = CustomerCharge { transfer: (200 * CERE).saturating_mul(b.into()), // price for 200 mb per customer storage: (100 * CERE).saturating_mul(b.into()), // price for 100 mb per customer @@ -406,18 +408,18 @@ benchmarks! { (provider, node_usage) }).collect(); - }: _(RawOrigin::Signed(dac_account.clone()), cluster_id, era, batch_index, payees) + }: _(RawOrigin::Signed(dac_account.clone()), cluster_id, era, batch_index, payees, MMRProof::default()) verify { assert!(ActiveBillingReports::::contains_key(cluster_id, era)); let billing_report = ActiveBillingReports::::get(cluster_id, era).unwrap(); - assert_eq!(billing_report.state, State::RewardingProviders); + assert_eq!(billing_report.state, PayoutState::RewardingProviders); assert!(billing_report.rewarding_processed_batches.contains(&batch_index)); } end_rewarding_providers { let cluster_id = ClusterId::from([1; 20]); let era : DdcEra = 1; - let state = State::RewardingProviders; + let state = PayoutState::RewardingProviders; let total_customer_charge = CustomerCharge { transfer: 200 * CERE, // price for 200 mb storage: 100 * CERE, // price for 100 mb @@ -459,13 +461,13 @@ benchmarks! { verify { assert!(ActiveBillingReports::::contains_key(cluster_id, era)); let billing_report = ActiveBillingReports::::get(cluster_id, era).unwrap(); - assert_eq!(billing_report.state, State::ProvidersRewarded); + assert_eq!(billing_report.state, PayoutState::ProvidersRewarded); } end_billing_report { let cluster_id = ClusterId::from([1; 20]); let era : DdcEra = 1; - let state = State::ProvidersRewarded; + let state = PayoutState::ProvidersRewarded; let total_customer_charge = CustomerCharge { transfer: 200 * CERE, // price for 200 mb storage: 100 * CERE, // price for 100 mb @@ -507,7 +509,7 @@ benchmarks! { verify { assert!(ActiveBillingReports::::contains_key(cluster_id, era)); let billing_report = ActiveBillingReports::::get(cluster_id, era).unwrap(); - assert_eq!(billing_report.state, State::Finalized); + assert_eq!(billing_report.state, PayoutState::Finalized); } } diff --git a/pallets/ddc-payouts/src/lib.rs b/pallets/ddc-payouts/src/lib.rs index f543db269..68b17795c 100644 --- a/pallets/ddc-payouts/src/lib.rs +++ b/pallets/ddc-payouts/src/lib.rs @@ -15,6 +15,7 @@ #![recursion_limit = "256"] pub mod weights; + use crate::weights::WeightInfo; #[cfg(feature = "runtime-benchmarks")] @@ -27,13 +28,16 @@ mod tests; use ddc_primitives::{ traits::{ + bucket::BucketVisitor as BucketVisitorType, cluster::{ClusterCreator as ClusterCreatorType, ClusterProtocol as ClusterProtocolType}, customer::{ CustomerCharger as CustomerChargerType, CustomerDepositor as CustomerDepositorType, }, pallet::PalletVisitor as PalletVisitorType, + payout::PayoutVisitor, }, - ClusterId, DdcEra, MILLICENTS, + BatchIndex, BucketId, ClusterId, CustomerUsage, DdcEra, MMRProof, NodeUsage, PayoutError, + PayoutState, MAX_PAYOUT_BATCH_COUNT, MAX_PAYOUT_BATCH_SIZE, MILLICENTS, }; use frame_election_provider_support::SortedListProvider; use frame_support::{ @@ -48,26 +52,6 @@ pub use pallet::*; use sp_runtime::{traits::Convert, PerThing, Perquintill}; use sp_std::prelude::*; -type BatchIndex = u16; - -/// Stores usage of customers -#[derive(PartialEq, Encode, Decode, RuntimeDebug, TypeInfo, Default, Clone)] -pub struct CustomerUsage { - pub transferred_bytes: u64, - pub stored_bytes: u64, - pub number_of_puts: u64, - pub number_of_gets: u64, -} - -/// Stores usage of node provider -#[derive(PartialEq, Encode, Decode, RuntimeDebug, TypeInfo, Default, Clone)] -pub struct NodeUsage { - pub transferred_bytes: u64, - pub stored_bytes: u64, - pub number_of_puts: u64, - pub number_of_gets: u64, -} - /// Stores reward in tokens(units) of node provider as per NodeUsage #[derive(PartialEq, Encode, Decode, RuntimeDebug, TypeInfo, Default, Clone)] pub struct NodeReward { @@ -104,13 +88,14 @@ pub type VoteScoreOf = >>::Score; parameter_types! { - pub MaxBatchesCount: u16 = 1000; + pub MaxBatchesCount: u16 = MAX_PAYOUT_BATCH_COUNT; pub MaxDust: u128 = MILLICENTS; - pub MaxBatchSize: u16 = 1000; + pub MaxBatchSize: u16 = MAX_PAYOUT_BATCH_SIZE; } #[frame_support::pallet] pub mod pallet { + use ddc_primitives::traits::ValidatorVisitor; use frame_support::PalletId; use sp_io::hashing::blake2_128; use sp_runtime::traits::{AccountIdConversion, Zero}; @@ -133,6 +118,7 @@ pub mod pallet { type PalletId: Get; type Currency: LockableCurrency>; type CustomerCharger: CustomerChargerType; + type BucketVisitor: BucketVisitorType; type CustomerDepositor: CustomerDepositorType; type TreasuryVisitor: PalletVisitorType; type ClusterProtocol: ClusterProtocolType>; @@ -140,6 +126,7 @@ pub mod pallet { type ClusterCreator: ClusterCreatorType>; type WeightInfo: WeightInfo; type VoteScoreToU64: Convert, u64>; + type ValidatorVisitor: ValidatorVisitor; } #[pallet::event] @@ -158,6 +145,7 @@ pub mod pallet { era: DdcEra, batch_index: BatchIndex, customer_id: T::AccountId, + bucket_id: BucketId, amount: u128, }, ChargeFailed { @@ -165,6 +153,7 @@ pub mod pallet { era: DdcEra, batch_index: BatchIndex, customer_id: T::AccountId, + bucket_id: BucketId, charged: u128, expected_to_charge: u128, }, @@ -173,6 +162,7 @@ pub mod pallet { era: DdcEra, batch_index: BatchIndex, customer_id: T::AccountId, + bucket_id: BucketId, amount: u128, }, ChargingFinished { @@ -198,14 +188,10 @@ pub mod pallet { cluster_id: ClusterId, era: DdcEra, }, - ProviderRewarded { + Rewarded { cluster_id: ClusterId, era: DdcEra, batch_index: BatchIndex, - stored_bytes: u64, - transferred_bytes: u64, - number_of_puts: u64, - number_of_gets: u64, node_provider_id: T::AccountId, rewarded: u128, expected_to_reward: u128, @@ -264,9 +250,11 @@ pub mod pallet { BoundedVecOverflow, ArithmeticOverflow, NotExpectedClusterState, + NotExpectedBucketState, BatchSizeIsOutOfBounds, ScoreRetrievalError, BadRequest, + BatchValidationFailed, } #[pallet::storage] @@ -297,7 +285,7 @@ pub mod pallet { #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo, PartialEq)] #[scale_info(skip_type_params(T))] pub struct BillingReport { - pub state: State, + pub state: PayoutState, pub vault: T::AccountId, pub start_era: i64, pub end_era: i64, @@ -315,7 +303,7 @@ pub mod pallet { impl Default for BillingReport { fn default() -> Self { Self { - state: State::default(), + state: PayoutState::default(), vault: T::PalletId::get().into_account_truncating(), start_era: Zero::zero(), end_era: Zero::zero(), @@ -346,6 +334,8 @@ pub mod pallet { #[pallet::call] impl Pallet { + // todo! remove extrensics from payout pallet and factor the extrensics implementation into + // PayoutProcessor trait #[pallet::call_index(0)] #[pallet::weight(T::WeightInfo::set_authorised_caller())] pub fn set_authorised_caller( @@ -371,7 +361,9 @@ pub mod pallet { end_era: i64, ) -> DispatchResult { let caller = ensure_signed(origin)?; - ensure!(Self::authorised_caller() == Some(caller), Error::::Unauthorised); + ensure!(Self::authorised_caller() == Some(caller.clone()), Error::::Unauthorised); + ensure!(T::ValidatorVisitor::is_ocw_validator(caller), Error::::Unauthorised); // + // todo! need to refactor this ensure!( ActiveBillingReports::::try_get(cluster_id, era).is_err(), @@ -382,7 +374,7 @@ pub mod pallet { let billing_report = BillingReport:: { vault: Self::account_id(), - state: State::Initialized, + state: PayoutState::Initialized, start_era, end_era, ..Default::default() @@ -394,6 +386,8 @@ pub mod pallet { Ok(()) } + // todo! remove extrensics from payout pallet and factor the extrensics implementation into + // PayoutProcessor trait #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::begin_charging_customers())] pub fn begin_charging_customers( @@ -403,17 +397,19 @@ pub mod pallet { max_batch_index: BatchIndex, ) -> DispatchResult { let caller = ensure_signed(origin)?; - ensure!(Self::authorised_caller() == Some(caller), Error::::Unauthorised); + ensure!(Self::authorised_caller() == Some(caller.clone()), Error::::Unauthorised); + ensure!(T::ValidatorVisitor::is_ocw_validator(caller), Error::::Unauthorised); // + // todo! need to refactor this ensure!(max_batch_index < MaxBatchesCount::get(), Error::::BatchIndexOverflow); let mut billing_report = ActiveBillingReports::::try_get(cluster_id, era) .map_err(|_| Error::::BillingReportDoesNotExist)?; - ensure!(billing_report.state == State::Initialized, Error::::NotExpectedState); + ensure!(billing_report.state == PayoutState::Initialized, Error::::NotExpectedState); billing_report.charging_max_batch_index = max_batch_index; - billing_report.state = State::ChargingCustomers; + billing_report.state = PayoutState::ChargingCustomers; ActiveBillingReports::::insert(cluster_id, era, billing_report); Self::deposit_event(Event::::ChargingStarted { cluster_id, era }); @@ -421,6 +417,8 @@ pub mod pallet { Ok(()) } + // todo! remove extrensics from payout pallet and factor the extrensics implementation into + // + pass values by reference PayoutProcessor trait #[pallet::call_index(3)] #[pallet::weight(T::WeightInfo::send_charging_customers_batch(payers.len().saturated_into()))] pub fn send_charging_customers_batch( @@ -428,10 +426,12 @@ pub mod pallet { cluster_id: ClusterId, era: DdcEra, batch_index: BatchIndex, - payers: Vec<(T::AccountId, CustomerUsage)>, + payers: Vec<(T::AccountId, BucketId, CustomerUsage)>, + batch_proof: MMRProof, ) -> DispatchResult { let caller = ensure_signed(origin)?; - ensure!(Self::authorised_caller() == Some(caller), Error::::Unauthorised); + ensure!(Self::authorised_caller() == Some(caller.clone()), Error::::Unauthorised); + ensure!(T::ValidatorVisitor::is_ocw_validator(caller), Error::::Unauthorised); ensure!( !payers.is_empty() && payers.len() <= MaxBatchSize::get() as usize, @@ -441,7 +441,10 @@ pub mod pallet { let billing_report = ActiveBillingReports::::try_get(cluster_id, era) .map_err(|_| Error::::BillingReportDoesNotExist)?; - ensure!(billing_report.state == State::ChargingCustomers, Error::::NotExpectedState); + ensure!( + billing_report.state == PayoutState::ChargingCustomers, + Error::::NotExpectedState + ); ensure!( billing_report.charging_max_batch_index >= batch_index, Error::::BatchIndexIsOutOfRange @@ -451,11 +454,24 @@ pub mod pallet { Error::::BatchIndexAlreadyProcessed ); + ensure!( + T::ValidatorVisitor::is_customers_batch_valid( + cluster_id, + era, + batch_index, + &payers, + &batch_proof + ), + Error::::BatchValidationFailed + ); + let mut updated_billing_report = billing_report; - for payer in payers { + for (customer_id, bucket_id, customer_usage) in payers { let mut customer_charge = get_customer_charge::( - cluster_id, - &payer.1, + &cluster_id, + &customer_usage, + bucket_id, + &customer_id, updated_billing_report.start_era, updated_billing_report.end_era, )?; @@ -468,10 +484,12 @@ pub mod pallet { })() .ok_or(Error::::ArithmeticOverflow)?; - let customer_id = payer.0.clone(); let amount_actually_charged = match T::CustomerCharger::charge_content_owner( + &cluster_id, + bucket_id, customer_id.clone(), updated_billing_report.vault.clone(), + &customer_usage, total_customer_charge, ) { Ok(actually_charged) => actually_charged, @@ -508,6 +526,7 @@ pub mod pallet { era, batch_index, customer_id: customer_id.clone(), + bucket_id, amount: debt, }); @@ -516,6 +535,7 @@ pub mod pallet { era, batch_index, customer_id, + bucket_id, charged: amount_actually_charged, expected_to_charge: total_customer_charge, }); @@ -535,6 +555,7 @@ pub mod pallet { era, batch_index, customer_id, + bucket_id, amount: total_customer_charge, }); } @@ -574,6 +595,8 @@ pub mod pallet { Ok(()) } + // todo! remove extrensics from payout pallet and factor the extrensics implementation into + // PayoutProcessor trait #[pallet::call_index(4)] #[pallet::weight(T::WeightInfo::end_charging_customers())] pub fn end_charging_customers( @@ -582,13 +605,17 @@ pub mod pallet { era: DdcEra, ) -> DispatchResult { let caller = ensure_signed(origin)?; - ensure!(Self::authorised_caller() == Some(caller), Error::::Unauthorised); + ensure!(Self::authorised_caller() == Some(caller.clone()), Error::::Unauthorised); + ensure!(T::ValidatorVisitor::is_ocw_validator(caller), Error::::Unauthorised); let mut billing_report = ActiveBillingReports::::try_get(cluster_id, era) .map_err(|_| Error::::BillingReportDoesNotExist)?; - ensure!(billing_report.state == State::ChargingCustomers, Error::::NotExpectedState); - validate_batches::( + ensure!( + billing_report.state == PayoutState::ChargingCustomers, + Error::::NotExpectedState + ); + Self::validate_batches( &billing_report.charging_processed_batches, &billing_report.charging_max_batch_index, )?; @@ -667,12 +694,14 @@ pub mod pallet { total_left_from_one * billing_report.total_customer_charge.gets; } - billing_report.state = State::CustomersChargedWithFees; + billing_report.state = PayoutState::CustomersChargedWithFees; ActiveBillingReports::::insert(cluster_id, era, billing_report); Ok(()) } + // todo! remove extrensics from payout pallet and factor the extrensics implementation into + // PayoutProcessor trait #[pallet::call_index(5)] #[pallet::weight(T::WeightInfo::begin_rewarding_providers())] pub fn begin_rewarding_providers( @@ -683,7 +712,8 @@ pub mod pallet { total_node_usage: NodeUsage, ) -> DispatchResult { let caller = ensure_signed(origin)?; - ensure!(Self::authorised_caller() == Some(caller), Error::::Unauthorised); + ensure!(Self::authorised_caller() == Some(caller.clone()), Error::::Unauthorised); + ensure!(T::ValidatorVisitor::is_ocw_validator(caller), Error::::Unauthorised); ensure!(max_batch_index < MaxBatchesCount::get(), Error::::BatchIndexOverflow); @@ -691,13 +721,13 @@ pub mod pallet { .map_err(|_| Error::::BillingReportDoesNotExist)?; ensure!( - billing_report.state == State::CustomersChargedWithFees, + billing_report.state == PayoutState::CustomersChargedWithFees, Error::::NotExpectedState ); billing_report.total_node_usage = total_node_usage; billing_report.rewarding_max_batch_index = max_batch_index; - billing_report.state = State::RewardingProviders; + billing_report.state = PayoutState::RewardingProviders; ActiveBillingReports::::insert(cluster_id, era, billing_report); Self::deposit_event(Event::::RewardingStarted { cluster_id, era }); @@ -705,6 +735,8 @@ pub mod pallet { Ok(()) } + // todo! remove extrensics from payout pallet and factor the extrensics implementation into + // + pass values by reference PayoutProcessor trait #[pallet::call_index(6)] #[pallet::weight(T::WeightInfo::send_rewarding_providers_batch(payees.len().saturated_into()))] pub fn send_rewarding_providers_batch( @@ -713,9 +745,11 @@ pub mod pallet { era: DdcEra, batch_index: BatchIndex, payees: Vec<(T::AccountId, NodeUsage)>, + batch_proof: MMRProof, ) -> DispatchResult { let caller = ensure_signed(origin)?; - ensure!(Self::authorised_caller() == Some(caller), Error::::Unauthorised); + ensure!(Self::authorised_caller() == Some(caller.clone()), Error::::Unauthorised); + ensure!(T::ValidatorVisitor::is_ocw_validator(caller), Error::::Unauthorised); ensure!( !payees.is_empty() && payees.len() <= MaxBatchSize::get() as usize, @@ -726,7 +760,7 @@ pub mod pallet { .map_err(|_| Error::::BillingReportDoesNotExist)?; ensure!( - billing_report.state == State::RewardingProviders, + billing_report.state == PayoutState::RewardingProviders, Error::::NotExpectedState ); ensure!( @@ -738,11 +772,22 @@ pub mod pallet { Error::::BatchIndexAlreadyProcessed ); + ensure!( + T::ValidatorVisitor::is_providers_batch_valid( + cluster_id, + era, + batch_index, + &payees, + &batch_proof + ), + Error::::BatchValidationFailed + ); + let max_dust = MaxDust::get().saturated_into::>(); let mut updated_billing_report = billing_report.clone(); - for payee in payees { + for (node_provider_id, node_usage) in payees { let node_reward = get_node_reward( - &payee.1, + &node_usage, &billing_report.total_node_usage, &billing_report.total_customer_charge, ) @@ -756,7 +801,6 @@ pub mod pallet { })() .ok_or(Error::::ArithmeticOverflow)?; - let node_provider_id = payee.0; let mut reward_ = amount_to_reward; let mut reward: BalanceOf = amount_to_reward.saturated_into::>(); if amount_to_reward > 0 { @@ -795,14 +839,10 @@ pub mod pallet { .ok_or(Error::::ArithmeticOverflow)?; } - Self::deposit_event(Event::::ProviderRewarded { + Self::deposit_event(Event::::Rewarded { cluster_id, era, batch_index, - stored_bytes: payee.1.stored_bytes, - transferred_bytes: payee.1.transferred_bytes, - number_of_puts: payee.1.number_of_puts, - number_of_gets: payee.1.number_of_gets, node_provider_id, rewarded: reward_, expected_to_reward: amount_to_reward, @@ -819,6 +859,8 @@ pub mod pallet { Ok(()) } + // todo! remove extrensics from payout pallet and factor the extrensics implementation into + // PayoutProcessor trait #[pallet::call_index(7)] #[pallet::weight(T::WeightInfo::end_rewarding_providers())] pub fn end_rewarding_providers( @@ -827,17 +869,18 @@ pub mod pallet { era: DdcEra, ) -> DispatchResult { let caller = ensure_signed(origin)?; - ensure!(Self::authorised_caller() == Some(caller), Error::::Unauthorised); + ensure!(Self::authorised_caller() == Some(caller.clone()), Error::::Unauthorised); + ensure!(T::ValidatorVisitor::is_ocw_validator(caller), Error::::Unauthorised); let mut billing_report = ActiveBillingReports::::try_get(cluster_id, era) .map_err(|_| Error::::BillingReportDoesNotExist)?; ensure!( - billing_report.state == State::RewardingProviders, + billing_report.state == PayoutState::RewardingProviders, Error::::NotExpectedState ); - validate_batches::( + Self::validate_batches( &billing_report.rewarding_processed_batches, &billing_report.rewarding_max_batch_index, )?; @@ -862,7 +905,7 @@ pub mod pallet { }); } - billing_report.state = State::ProvidersRewarded; + billing_report.state = PayoutState::ProvidersRewarded; ActiveBillingReports::::insert(cluster_id, era, billing_report); Self::deposit_event(Event::::RewardingFinished { cluster_id, era }); @@ -870,6 +913,8 @@ pub mod pallet { Ok(()) } + // todo! remove extrensics from payout pallet and factor the extrensics implementation into + // PayoutProcessor trait #[pallet::call_index(8)] #[pallet::weight(T::WeightInfo::end_billing_report())] pub fn end_billing_report( @@ -878,16 +923,20 @@ pub mod pallet { era: DdcEra, ) -> DispatchResult { let caller = ensure_signed(origin)?; - ensure!(Self::authorised_caller() == Some(caller), Error::::Unauthorised); + ensure!(Self::authorised_caller() == Some(caller.clone()), Error::::Unauthorised); + ensure!(T::ValidatorVisitor::is_ocw_validator(caller), Error::::Unauthorised); let mut billing_report = ActiveBillingReports::::try_get(cluster_id, era) .map_err(|_| Error::::BillingReportDoesNotExist)?; - ensure!(billing_report.state == State::ProvidersRewarded, Error::::NotExpectedState); + ensure!( + billing_report.state == PayoutState::ProvidersRewarded, + Error::::NotExpectedState + ); billing_report.charging_processed_batches.clear(); billing_report.rewarding_processed_batches.clear(); - billing_report.state = State::Finalized; + billing_report.state = PayoutState::Finalized; ActiveBillingReports::::insert(cluster_id, era, billing_report); Self::deposit_event(Event::::BillingReportFinalized { cluster_id, era }); @@ -1008,14 +1057,16 @@ pub mod pallet { } fn get_customer_charge( - cluster_id: ClusterId, + cluster_id: &ClusterId, usage: &CustomerUsage, + bucket_id: BucketId, + customer_id: &T::AccountId, start_era: i64, end_era: i64, ) -> Result> { let mut total = CustomerCharge::default(); - let pricing = T::ClusterProtocol::get_pricing_params(&cluster_id) + let pricing = T::ClusterProtocol::get_pricing_params(cluster_id) .map_err(|_| Error::::NotExpectedClusterState)?; total.transfer = (|| -> Option { @@ -1031,9 +1082,15 @@ pub mod pallet { let fraction_of_month = Perquintill::from_rational(duration_seconds as u64, seconds_in_month as u64); + let mut total_stored_bytes = + T::BucketVisitor::get_total_customer_usage(cluster_id, bucket_id, customer_id) + .map_err(|_| Error::::NotExpectedBucketState)? + .map_or(0, |customer_usage| customer_usage.stored_bytes); + total_stored_bytes += usage.stored_bytes; + total.storage = fraction_of_month * (|| -> Option { - (usage.stored_bytes as u128) + (total_stored_bytes as u128) .checked_mul(pricing.unit_per_mb_stored)? .checked_div(byte_unit::MEBIBYTE) })() @@ -1050,22 +1107,6 @@ pub mod pallet { Ok(total) } - fn validate_batches( - batches: &BoundedBTreeSet, - max_batch_index: &BatchIndex, - ) -> DispatchResult { - // Check if the Vec contains all integers between 1 and rewarding_max_batch_index - ensure!(!batches.is_empty(), Error::::BatchesMissed); - - ensure!((*max_batch_index + 1) as usize == batches.len(), Error::::BatchesMissed); - - for index in 0..*max_batch_index + 1 { - ensure!(batches.contains(&index), Error::::BatchesMissed); - } - - Ok(()) - } - #[pallet::genesis_config] pub struct GenesisConfig { pub feeder_account: Option, @@ -1109,6 +1150,180 @@ pub mod pallet { } } + impl PayoutVisitor for Pallet { + fn begin_billing_report( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + start_era: i64, + end_era: i64, + ) -> DispatchResult { + let origin = frame_system::RawOrigin::Signed(origin).into(); + Self::begin_billing_report(origin, cluster_id, era_id, start_era, end_era) + } + + fn begin_charging_customers( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + max_batch_index: BatchIndex, + ) -> DispatchResult { + let origin = frame_system::RawOrigin::Signed(origin).into(); + Self::begin_charging_customers(origin, cluster_id, era_id, max_batch_index) + } + + fn send_charging_customers_batch( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + batch_index: BatchIndex, + payers: &[(T::AccountId, BucketId, CustomerUsage)], + batch_proof: MMRProof, + ) -> DispatchResult { + let origin = frame_system::RawOrigin::Signed(origin).into(); + Self::send_charging_customers_batch( + origin, + cluster_id, + era_id, + batch_index, + (*payers).to_vec(), + batch_proof, + ) + } + + fn end_charging_customers( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + ) -> DispatchResult { + let origin = frame_system::RawOrigin::Signed(origin).into(); + Self::end_charging_customers(origin, cluster_id, era_id) + } + + fn begin_rewarding_providers( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + max_batch_index: BatchIndex, + total_node_usage: NodeUsage, + ) -> DispatchResult { + let origin = frame_system::RawOrigin::Signed(origin).into(); + Self::begin_rewarding_providers( + origin, + cluster_id, + era_id, + max_batch_index, + total_node_usage, + ) + } + + fn send_rewarding_providers_batch( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + batch_index: BatchIndex, + payees: &[(T::AccountId, NodeUsage)], + batch_proof: MMRProof, + ) -> DispatchResult { + let origin = frame_system::RawOrigin::Signed(origin).into(); + Self::send_rewarding_providers_batch( + origin, + cluster_id, + era_id, + batch_index, + (*payees).to_vec(), + batch_proof, + ) + } + + fn end_rewarding_providers( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + ) -> DispatchResult { + let origin = frame_system::RawOrigin::Signed(origin).into(); + Self::end_rewarding_providers(origin, cluster_id, era_id) + } + + fn end_billing_report( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + ) -> DispatchResult { + let origin = frame_system::RawOrigin::Signed(origin).into(); + Self::end_billing_report(origin, cluster_id, era_id) + } + + fn get_billing_report_status(cluster_id: &ClusterId, era: DdcEra) -> PayoutState { + let billing_report = ActiveBillingReports::::get(cluster_id, era); + + match billing_report { + Some(report) => report.state, + None => PayoutState::NotInitialized, // Return NotInitialized if entry doesn't exist + } + } + + fn all_customer_batches_processed(cluster_id: &ClusterId, era_id: DdcEra) -> bool { + let billing_report = match ActiveBillingReports::::try_get(cluster_id, era_id) { + Ok(report) => report, + Err(_) => return false, /* Return false if there's any error (e.g., + * BillingReportDoesNotExist) */ + }; + + Self::validate_batches( + &billing_report.charging_processed_batches, + &billing_report.charging_max_batch_index, + ) + .is_ok() + } + + fn all_provider_batches_processed(cluster_id: &ClusterId, era_id: DdcEra) -> bool { + let billing_report = match ActiveBillingReports::::try_get(cluster_id, era_id) { + Ok(report) => report, + Err(_) => return false, /* Return false if there's any error (e.g., + * BillingReportDoesNotExist) */ + }; + + Self::validate_batches( + &billing_report.rewarding_processed_batches, + &billing_report.rewarding_max_batch_index, + ) + .is_ok() + } + + fn get_next_customer_batch_for_payment( + cluster_id: &ClusterId, + era_id: DdcEra, + ) -> Result, PayoutError> { + let billing_report = ActiveBillingReports::::try_get(cluster_id, era_id) + .map_err(|_| PayoutError::BillingReportDoesNotExist)?; + + for batch_index in 0..=billing_report.charging_max_batch_index { + if !billing_report.charging_processed_batches.contains(&batch_index) { + return Ok(Some(batch_index)); + } + } + + Ok(None) + } + + fn get_next_provider_batch_for_payment( + cluster_id: &ClusterId, + era_id: DdcEra, + ) -> Result, PayoutError> { + let billing_report = ActiveBillingReports::::try_get(cluster_id, era_id) + .map_err(|_| PayoutError::BillingReportDoesNotExist)?; + + for batch_index in 0..=billing_report.rewarding_max_batch_index { + if !billing_report.rewarding_processed_batches.contains(&batch_index) { + return Ok(Some(batch_index)); + } + } + + Ok(None) + } + } + impl Pallet { pub fn account_id() -> T::AccountId { T::PalletId::get().into_account_truncating() @@ -1125,5 +1340,21 @@ pub mod pallet { // be fulfilled with trailing zeros. T::PalletId::get().into_sub_account_truncating(hash) } + + pub(crate) fn validate_batches( + batches: &BoundedBTreeSet, + max_batch_index: &BatchIndex, + ) -> DispatchResult { + // Check if the Vec contains all integers between 1 and rewarding_max_batch_index + ensure!(!batches.is_empty(), Error::::BatchesMissed); + + ensure!((*max_batch_index + 1) as usize == batches.len(), Error::::BatchesMissed); + + for index in 0..*max_batch_index + 1 { + ensure!(batches.contains(&index), Error::::BatchesMissed); + } + + Ok(()) + } } } diff --git a/pallets/ddc-payouts/src/mock.rs b/pallets/ddc-payouts/src/mock.rs index 8154dff95..32f828e8b 100644 --- a/pallets/ddc-payouts/src/mock.rs +++ b/pallets/ddc-payouts/src/mock.rs @@ -4,13 +4,14 @@ use ddc_primitives::{ traits::{ + bucket::BucketVisitor, cluster::{ClusterCreator, ClusterProtocol}, customer::{CustomerCharger, CustomerDepositor}, pallet::PalletVisitor, - ClusterQuery, + ClusterQuery, ValidatorVisitor, }, - ClusterBondingParams, ClusterFeesParams, ClusterParams, ClusterPricingParams, - ClusterProtocolParams, ClusterStatus, NodeType, DOLLARS, + BucketVisitorError, ClusterBondingParams, ClusterFeesParams, ClusterParams, + ClusterPricingParams, ClusterProtocolParams, ClusterStatus, NodeType, DOLLARS, }; use frame_election_provider_support::SortedListProvider; use frame_support::{ @@ -124,6 +125,7 @@ impl crate::pallet::Config for Test { type PalletId = PayoutsPalletId; type Currency = Balances; type CustomerCharger = TestCustomerCharger; + type BucketVisitor = TestBucketVisitor; type CustomerDepositor = TestCustomerDepositor; type ClusterProtocol = TestClusterProtocol; type TreasuryVisitor = TestTreasuryVisitor; @@ -132,13 +134,62 @@ impl crate::pallet::Config for Test { type VoteScoreToU64 = Identity; type WeightInfo = (); + type ValidatorVisitor = MockValidatorVisitor; +} + +pub struct MockValidatorVisitor; + +impl ValidatorVisitor for MockValidatorVisitor +where + ::AccountId: From, +{ + fn setup_validators(_validators: Vec) { + unimplemented!() + } + fn is_ocw_validator(caller: T::AccountId) -> bool { + let validators = [DAC_ACCOUNT_ID.into(), 123u128.into()]; + validators.contains(&caller) + } + fn is_customers_batch_valid( + _cluster_id: ClusterId, + _era: DdcEra, + _batch_index: BatchIndex, + _payers: &[(T::AccountId, BucketId, CustomerUsage)], + _batch_proof: &MMRProof, + ) -> bool { + true + } + + fn is_providers_batch_valid( + _cluster_id: ClusterId, + _era: DdcEra, + _batch_index: BatchIndex, + _payees: &[(T::AccountId, NodeUsage)], + _batch_proof: &MMRProof, + ) -> bool { + true + } +} + +pub struct TestBucketVisitor; +impl BucketVisitor for TestBucketVisitor { + fn get_total_customer_usage( + _cluster_id: &ClusterId, + _bucket_id: BucketId, + _content_owner: &T::AccountId, + ) -> Result, BucketVisitorError> { + Ok(None) + } } pub struct TestCustomerCharger; impl CustomerCharger for TestCustomerCharger { fn charge_content_owner( + _cluster_id: &ClusterId, + _bucket_id: BucketId, content_owner: T::AccountId, billing_vault: T::AccountId, + _customer_usage: &CustomerUsage, amount: u128, ) -> Result { let mut amount_to_charge = amount; @@ -212,6 +263,7 @@ impl CustomerDepositor for TestCustomerDepositor { } } +pub const DAC_ACCOUNT_ID: AccountId = 2; pub const RESERVE_ACCOUNT_ID: AccountId = 999; pub const TREASURY_ACCOUNT_ID: AccountId = 888; pub const VALIDATOR1_ACCOUNT_ID: AccountId = 111; diff --git a/pallets/ddc-payouts/src/tests.rs b/pallets/ddc-payouts/src/tests.rs index 9d50196ef..57dcd7267 100644 --- a/pallets/ddc-payouts/src/tests.rs +++ b/pallets/ddc-payouts/src/tests.rs @@ -99,7 +99,7 @@ fn begin_billing_report_works() { System::assert_last_event(Event::BillingReportInitialized { cluster_id, era }.into()); let report = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); - assert_eq!(report.state, State::Initialized); + assert_eq!(report.state, PayoutState::Initialized); assert_eq!(report.start_era, start_era); assert_eq!(report.end_era, end_era); }) @@ -108,7 +108,7 @@ fn begin_billing_report_works() { #[test] fn begin_charging_customers_fails_uninitialised() { ExtBuilder.build_and_execute(|| { - let dac_account = 2u128; + let dac_account = 3u128; let cluster_id = ClusterId::from([12; 20]); let era = 100; let max_batch_index = 2; @@ -133,11 +133,11 @@ fn begin_charging_customers_fails_uninitialised() { BadOrigin ); - assert_ok!(DdcPayouts::set_authorised_caller(RuntimeOrigin::root(), dac_account)); + assert_ok!(DdcPayouts::set_authorised_caller(RuntimeOrigin::root(), DAC_ACCOUNT_ID)); assert_noop!( DdcPayouts::begin_charging_customers( - RuntimeOrigin::signed(dac_account), + RuntimeOrigin::signed(DAC_ACCOUNT_ID), cluster_id, era, max_batch_index, @@ -183,7 +183,7 @@ fn begin_charging_customers_works() { System::assert_last_event(Event::ChargingStarted { cluster_id, era }.into()); let report = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); - assert_eq!(report.state, State::ChargingCustomers); + assert_eq!(report.state, PayoutState::ChargingCustomers); assert_eq!(report.charging_max_batch_index, max_batch_index); }) } @@ -199,8 +199,10 @@ fn send_charging_customers_batch_fails_uninitialised() { let era = 100; let max_batch_index = 2; let batch_index = 1; - let payers1 = vec![(user1, CustomerUsage::default())]; - let payers2 = vec![(user2, CustomerUsage::default())]; + let bucket_id1: BucketId = 1; + let bucket_id2: BucketId = 2; + let payers1 = vec![(user1, bucket_id1, CustomerUsage::default())]; + let payers2 = vec![(user2, bucket_id2, CustomerUsage::default())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight @@ -215,6 +217,7 @@ fn send_charging_customers_batch_fails_uninitialised() { era, batch_index, payers1.clone(), + MMRProof::default(), ), Error::::Unauthorised ); @@ -226,6 +229,7 @@ fn send_charging_customers_batch_fails_uninitialised() { era, batch_index, payers1.clone(), + MMRProof::default(), ), BadOrigin ); @@ -239,6 +243,7 @@ fn send_charging_customers_batch_fails_uninitialised() { era, batch_index, payers1.clone(), + MMRProof::default(), ), Error::::BillingReportDoesNotExist ); @@ -258,6 +263,7 @@ fn send_charging_customers_batch_fails_uninitialised() { era, batch_index, payers1.clone(), + MMRProof::default(), ), Error::::NotExpectedState ); @@ -275,6 +281,7 @@ fn send_charging_customers_batch_fails_uninitialised() { era, batch_index, payers1.clone(), + MMRProof::default(), )); assert_noop!( @@ -284,6 +291,7 @@ fn send_charging_customers_batch_fails_uninitialised() { era, batch_index, payers1, + MMRProof::default(), ), Error::::BatchIndexAlreadyProcessed ); @@ -295,6 +303,7 @@ fn send_charging_customers_batch_fails_uninitialised() { era, batch_index, payers2, + MMRProof::default(), ), Error::::BatchIndexAlreadyProcessed ); @@ -401,6 +410,11 @@ fn send_charging_customers_batch_works() { let era = 100; let max_batch_index = 3; let mut batch_index = 0; + let bucket_id1: BucketId = 1; + let bucket_id2: BucketId = 2; + let bucket_id3: BucketId = 3; + let bucket_id4: BucketId = 4; + let usage1 = CustomerUsage { // should pass without debt transferred_bytes: 23452345, @@ -429,9 +443,10 @@ fn send_charging_customers_batch_works() { number_of_puts: 3456345, number_of_gets: 242334563456423, }; - let payers1 = vec![(user2_debtor, usage2.clone()), (user4, usage4.clone())]; - let payers2 = vec![(user1, usage1.clone())]; - let payers3 = vec![(user3_debtor, usage3.clone())]; + let payers1 = + vec![(user2_debtor, bucket_id2, usage2.clone()), (user4, bucket_id4, usage4.clone())]; + let payers2 = vec![(user1, bucket_id1, usage1.clone())]; + let payers3 = vec![(user3_debtor, bucket_id3, usage3.clone())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight let start_era: i64 = @@ -462,6 +477,7 @@ fn send_charging_customers_batch_works() { era, batch_index, payers1, + MMRProof::default(), )); let usage4_charge = calculate_charge_for_month(cluster_id, usage4.clone()); @@ -489,6 +505,7 @@ fn send_charging_customers_batch_works() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, charged: USER2_BALANCE, expected_to_charge: expected_charge2, @@ -501,6 +518,7 @@ fn send_charging_customers_batch_works() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, amount: debt, } @@ -511,6 +529,7 @@ fn send_charging_customers_batch_works() { cluster_id, era, customer_id: user4, + bucket_id: bucket_id4, batch_index, amount: usage4_charge, } @@ -528,6 +547,7 @@ fn send_charging_customers_batch_works() { era, batch_index, payers2, + MMRProof::default(), )); System::assert_last_event( @@ -535,6 +555,7 @@ fn send_charging_customers_batch_works() { cluster_id, era, batch_index, + bucket_id: bucket_id1, customer_id: user1, amount: calculate_charge_for_month(cluster_id, usage1.clone()), } @@ -560,7 +581,7 @@ fn send_charging_customers_batch_works() { report.total_customer_charge.transfer ); - assert_eq!(report.state, State::ChargingCustomers); + assert_eq!(report.state, PayoutState::ChargingCustomers); let user1_debt = DdcPayouts::debtor_customers(cluster_id, user1); assert_eq!(user1_debt, None); @@ -575,6 +596,7 @@ fn send_charging_customers_batch_works() { era, batch_index, payers3, + MMRProof::default(), )); let user3_charge = calculate_charge_for_month(cluster_id, usage3.clone()); @@ -610,6 +632,7 @@ fn send_charging_customers_batch_works() { cluster_id, era, customer_id: user3_debtor, + bucket_id: bucket_id3, batch_index, amount: user3_debt, } @@ -622,6 +645,7 @@ fn send_charging_customers_batch_works() { era, batch_index, customer_id: user3_debtor, + bucket_id: bucket_id3, charged: PARTIAL_CHARGE, expected_to_charge: user3_charge, } @@ -642,6 +666,9 @@ fn end_charging_customers_works_small_usage_1_hour() { let era = 100; let max_batch_index = 0; let batch_index = 0; + let bucket_id6: BucketId = 6; + let bucket_id7: BucketId = 7; + let usage6 = CustomerUsage { transferred_bytes: 0, stored_bytes: 474_957, @@ -654,7 +681,8 @@ fn end_charging_customers_works_small_usage_1_hour() { number_of_puts: 0, number_of_gets: 0, }; - let payers1 = vec![(user6, usage6.clone()), (user7, usage7.clone())]; + let payers1 = + vec![(user6, bucket_id6, usage6.clone()), (user7, bucket_id7, usage7.clone())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight let start_era: i64 = @@ -685,6 +713,7 @@ fn end_charging_customers_works_small_usage_1_hour() { era, batch_index, payers1, + MMRProof::default(), )); let report_before = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); @@ -705,6 +734,7 @@ fn end_charging_customers_works_small_usage_1_hour() { cluster_id, era, customer_id: user6, + bucket_id: bucket_id6, batch_index, amount: usage6_charge, } @@ -716,6 +746,7 @@ fn end_charging_customers_works_small_usage_1_hour() { cluster_id, era, customer_id: user7, + bucket_id: bucket_id7, batch_index, amount: usage7_charge, } @@ -749,7 +780,7 @@ fn end_charging_customers_works_small_usage_1_hour() { System::assert_has_event(Event::ChargingFinished { cluster_id, era }.into()); let report_after = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); - assert_eq!(report_after.state, State::CustomersChargedWithFees); + assert_eq!(report_after.state, PayoutState::CustomersChargedWithFees); let fees = get_fees(&cluster_id); let total_left_from_one = @@ -834,6 +865,11 @@ fn send_charging_customers_batch_works_for_day() { let era = 100; let max_batch_index = 3; let mut batch_index = 0; + let bucket_id1: BucketId = 1; + let bucket_id2: BucketId = 2; + let bucket_id3: BucketId = 3; + let bucket_id4: BucketId = 4; + let usage1 = CustomerUsage { // should pass without debt transferred_bytes: 23452345, @@ -862,9 +898,10 @@ fn send_charging_customers_batch_works_for_day() { number_of_puts: 3456345, number_of_gets: 242334563456423, }; - let payers1 = vec![(user2_debtor, usage2.clone()), (user4, usage4.clone())]; - let payers2 = vec![(user1, usage1.clone())]; - let payers3 = vec![(user3_debtor, usage3.clone())]; + let payers1 = + vec![(user2_debtor, bucket_id2, usage2.clone()), (user4, bucket_id4, usage4.clone())]; + let payers2 = vec![(user1, bucket_id1, usage1.clone())]; + let payers3 = vec![(user3_debtor, bucket_id3, usage3.clone())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight let start_era: i64 = @@ -895,6 +932,7 @@ fn send_charging_customers_batch_works_for_day() { era, batch_index, payers1, + MMRProof::default(), )); let usage4_charge = calculate_charge_for_day(cluster_id, usage4.clone()); @@ -922,6 +960,7 @@ fn send_charging_customers_batch_works_for_day() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, charged: USER2_BALANCE, expected_to_charge: expected_charge2, @@ -934,6 +973,7 @@ fn send_charging_customers_batch_works_for_day() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, amount: debt, } @@ -944,6 +984,7 @@ fn send_charging_customers_batch_works_for_day() { cluster_id, era, customer_id: user4, + bucket_id: bucket_id4, batch_index, amount: usage4_charge, } @@ -961,6 +1002,7 @@ fn send_charging_customers_batch_works_for_day() { era, batch_index, payers2, + MMRProof::default(), )); System::assert_last_event( @@ -969,6 +1011,7 @@ fn send_charging_customers_batch_works_for_day() { era, batch_index, customer_id: user1, + bucket_id: bucket_id1, amount: calculate_charge_for_day(cluster_id, usage1.clone()), } .into(), @@ -993,7 +1036,7 @@ fn send_charging_customers_batch_works_for_day() { report.total_customer_charge.transfer ); - assert_eq!(report.state, State::ChargingCustomers); + assert_eq!(report.state, PayoutState::ChargingCustomers); let user1_debt = DdcPayouts::debtor_customers(cluster_id, user1); assert_eq!(user1_debt, None); @@ -1008,6 +1051,7 @@ fn send_charging_customers_batch_works_for_day() { era, batch_index, payers3, + MMRProof::default(), )); let user3_charge = calculate_charge_for_day(cluster_id, usage3.clone()); @@ -1043,6 +1087,7 @@ fn send_charging_customers_batch_works_for_day() { cluster_id, era, customer_id: user3_debtor, + bucket_id: bucket_id3, batch_index, amount: user3_debt, } @@ -1055,6 +1100,7 @@ fn send_charging_customers_batch_works_for_day() { era, batch_index, customer_id: user3_debtor, + bucket_id: bucket_id3, charged: PARTIAL_CHARGE, expected_to_charge: user3_charge, } @@ -1077,6 +1123,11 @@ fn send_charging_customers_batch_works_for_day_free_storage() { let era = 100; let max_batch_index = 3; let mut batch_index = 0; + let bucket_id1: BucketId = 1; + let bucket_id2: BucketId = 2; + let bucket_id3: BucketId = 3; + let bucket_id4: BucketId = 4; + let usage1 = CustomerUsage { // should pass without debt transferred_bytes: 23452345, @@ -1105,9 +1156,10 @@ fn send_charging_customers_batch_works_for_day_free_storage() { number_of_puts: 3456345, number_of_gets: 242334563456423, }; - let payers1 = vec![(user2_debtor, usage2.clone()), (user4, usage4.clone())]; - let payers2 = vec![(user1, usage1.clone())]; - let payers3 = vec![(user3_debtor, usage3.clone())]; + let payers1 = + vec![(user2_debtor, bucket_id2, usage2.clone()), (user4, bucket_id4, usage4.clone())]; + let payers2 = vec![(user1, bucket_id1, usage1.clone())]; + let payers3 = vec![(user3_debtor, bucket_id3, usage3.clone())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight let start_era: i64 = @@ -1138,6 +1190,7 @@ fn send_charging_customers_batch_works_for_day_free_storage() { era, batch_index, payers1, + MMRProof::default(), )); let usage4_charge = calculate_charge_for_day(cluster_id, usage4.clone()); @@ -1165,6 +1218,7 @@ fn send_charging_customers_batch_works_for_day_free_storage() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, charged: USER2_BALANCE, expected_to_charge: expected_charge2, @@ -1177,6 +1231,7 @@ fn send_charging_customers_batch_works_for_day_free_storage() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, amount: debt, } @@ -1187,6 +1242,7 @@ fn send_charging_customers_batch_works_for_day_free_storage() { cluster_id, era, customer_id: user4, + bucket_id: bucket_id4, batch_index, amount: usage4_charge, } @@ -1204,6 +1260,7 @@ fn send_charging_customers_batch_works_for_day_free_storage() { era, batch_index, payers2, + MMRProof::default(), )); System::assert_last_event( @@ -1212,6 +1269,7 @@ fn send_charging_customers_batch_works_for_day_free_storage() { era, batch_index, customer_id: user1, + bucket_id: bucket_id1, amount: calculate_charge_for_day(cluster_id, usage1.clone()), } .into(), @@ -1236,7 +1294,7 @@ fn send_charging_customers_batch_works_for_day_free_storage() { report.total_customer_charge.transfer ); - assert_eq!(report.state, State::ChargingCustomers); + assert_eq!(report.state, PayoutState::ChargingCustomers); let user1_debt = DdcPayouts::debtor_customers(cluster_id, user1); assert_eq!(user1_debt, None); @@ -1251,6 +1309,7 @@ fn send_charging_customers_batch_works_for_day_free_storage() { era, batch_index, payers3, + MMRProof::default(), )); let user3_charge = calculate_charge_for_day(cluster_id, usage3.clone()); @@ -1286,6 +1345,7 @@ fn send_charging_customers_batch_works_for_day_free_storage() { cluster_id, era, customer_id: user3_debtor, + bucket_id: bucket_id3, batch_index, amount: user3_debt, } @@ -1298,6 +1358,7 @@ fn send_charging_customers_batch_works_for_day_free_storage() { era, batch_index, customer_id: user3_debtor, + bucket_id: bucket_id3, charged: PARTIAL_CHARGE, expected_to_charge: user3_charge, } @@ -1320,6 +1381,11 @@ fn send_charging_customers_batch_works_for_day_free_stream() { let era = 100; let max_batch_index = 3; let mut batch_index = 0; + let bucket_id1: BucketId = 1; + let bucket_id2: BucketId = 2; + let bucket_id3: BucketId = 3; + let bucket_id4: BucketId = 4; + let usage1 = CustomerUsage { // should pass without debt transferred_bytes: 23452345, @@ -1348,9 +1414,10 @@ fn send_charging_customers_batch_works_for_day_free_stream() { number_of_puts: 3456345, number_of_gets: 242334563456423, }; - let payers1 = vec![(user2_debtor, usage2.clone()), (user4, usage4.clone())]; - let payers2 = vec![(user1, usage1.clone())]; - let payers3 = vec![(user3_debtor, usage3.clone())]; + let payers1 = + vec![(user2_debtor, bucket_id2, usage2.clone()), (user4, bucket_id4, usage4.clone())]; + let payers2 = vec![(user1, bucket_id1, usage1.clone())]; + let payers3 = vec![(user3_debtor, bucket_id3, usage3.clone())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight let start_era: i64 = @@ -1381,6 +1448,7 @@ fn send_charging_customers_batch_works_for_day_free_stream() { era, batch_index, payers1, + MMRProof::default(), )); let usage4_charge = calculate_charge_for_day(cluster_id, usage4.clone()); @@ -1408,6 +1476,7 @@ fn send_charging_customers_batch_works_for_day_free_stream() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, charged: USER2_BALANCE, expected_to_charge: expected_charge2, @@ -1420,6 +1489,7 @@ fn send_charging_customers_batch_works_for_day_free_stream() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, amount: debt, } @@ -1430,6 +1500,7 @@ fn send_charging_customers_batch_works_for_day_free_stream() { cluster_id, era, customer_id: user4, + bucket_id: bucket_id4, batch_index, amount: usage4_charge, } @@ -1447,6 +1518,7 @@ fn send_charging_customers_batch_works_for_day_free_stream() { era, batch_index, payers2, + MMRProof::default(), )); System::assert_last_event( @@ -1455,6 +1527,7 @@ fn send_charging_customers_batch_works_for_day_free_stream() { era, batch_index, customer_id: user1, + bucket_id: bucket_id1, amount: calculate_charge_for_day(cluster_id, usage1.clone()), } .into(), @@ -1479,7 +1552,7 @@ fn send_charging_customers_batch_works_for_day_free_stream() { report.total_customer_charge.transfer ); - assert_eq!(report.state, State::ChargingCustomers); + assert_eq!(report.state, PayoutState::ChargingCustomers); let user1_debt = DdcPayouts::debtor_customers(cluster_id, user1); assert_eq!(user1_debt, None); @@ -1494,6 +1567,7 @@ fn send_charging_customers_batch_works_for_day_free_stream() { era, batch_index, payers3, + MMRProof::default(), )); let user3_charge = calculate_charge_for_day(cluster_id, usage3.clone()); @@ -1529,6 +1603,7 @@ fn send_charging_customers_batch_works_for_day_free_stream() { cluster_id, era, customer_id: user3_debtor, + bucket_id: bucket_id3, batch_index, amount: user3_debt, } @@ -1541,6 +1616,7 @@ fn send_charging_customers_batch_works_for_day_free_stream() { era, batch_index, customer_id: user3_debtor, + bucket_id: bucket_id3, charged: PARTIAL_CHARGE, expected_to_charge: user3_charge, } @@ -1563,6 +1639,11 @@ fn send_charging_customers_batch_works_for_day_free_get() { let era = 100; let max_batch_index = 3; let mut batch_index = 0; + let bucket_id1: BucketId = 1; + let bucket_id2: BucketId = 2; + let bucket_id3: BucketId = 3; + let bucket_id4: BucketId = 4; + let usage1 = CustomerUsage { // should pass without debt transferred_bytes: 23452345, @@ -1591,9 +1672,10 @@ fn send_charging_customers_batch_works_for_day_free_get() { number_of_puts: 3456345, number_of_gets: 242334563456423, }; - let payers1 = vec![(user2_debtor, usage2.clone()), (user4, usage4.clone())]; - let payers2 = vec![(user1, usage1.clone())]; - let payers3 = vec![(user3_debtor, usage3.clone())]; + let payers1 = + vec![(user2_debtor, bucket_id2, usage2.clone()), (user4, bucket_id4, usage4.clone())]; + let payers2 = vec![(user1, bucket_id1, usage1.clone())]; + let payers3 = vec![(user3_debtor, bucket_id3, usage3.clone())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight let start_era: i64 = @@ -1624,6 +1706,7 @@ fn send_charging_customers_batch_works_for_day_free_get() { era, batch_index, payers1, + MMRProof::default(), )); let usage4_charge = calculate_charge_for_day(cluster_id, usage4.clone()); @@ -1651,6 +1734,7 @@ fn send_charging_customers_batch_works_for_day_free_get() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, charged: USER2_BALANCE, expected_to_charge: expected_charge2, @@ -1663,6 +1747,7 @@ fn send_charging_customers_batch_works_for_day_free_get() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, amount: debt, } @@ -1673,6 +1758,7 @@ fn send_charging_customers_batch_works_for_day_free_get() { cluster_id, era, customer_id: user4, + bucket_id: bucket_id4, batch_index, amount: usage4_charge, } @@ -1690,6 +1776,7 @@ fn send_charging_customers_batch_works_for_day_free_get() { era, batch_index, payers2, + MMRProof::default(), )); System::assert_last_event( @@ -1698,6 +1785,7 @@ fn send_charging_customers_batch_works_for_day_free_get() { era, batch_index, customer_id: user1, + bucket_id: bucket_id1, amount: calculate_charge_for_day(cluster_id, usage1.clone()), } .into(), @@ -1722,7 +1810,7 @@ fn send_charging_customers_batch_works_for_day_free_get() { report.total_customer_charge.transfer ); - assert_eq!(report.state, State::ChargingCustomers); + assert_eq!(report.state, PayoutState::ChargingCustomers); let user1_debt = DdcPayouts::debtor_customers(cluster_id, user1); assert_eq!(user1_debt, None); @@ -1737,6 +1825,7 @@ fn send_charging_customers_batch_works_for_day_free_get() { era, batch_index, payers3, + MMRProof::default(), )); let user3_charge = calculate_charge_for_day(cluster_id, usage3.clone()); @@ -1772,6 +1861,7 @@ fn send_charging_customers_batch_works_for_day_free_get() { cluster_id, era, customer_id: user3_debtor, + bucket_id: bucket_id3, batch_index, amount: user3_debt, } @@ -1784,6 +1874,7 @@ fn send_charging_customers_batch_works_for_day_free_get() { era, batch_index, customer_id: user3_debtor, + bucket_id: bucket_id3, charged: PARTIAL_CHARGE, expected_to_charge: user3_charge, } @@ -1806,6 +1897,11 @@ fn send_charging_customers_batch_works_for_day_free_put() { let era = 100; let max_batch_index = 3; let mut batch_index = 0; + let bucket_id1: BucketId = 1; + let bucket_id2: BucketId = 2; + let bucket_id3: BucketId = 3; + let bucket_id4: BucketId = 4; + let usage1 = CustomerUsage { // should pass without debt transferred_bytes: 23452345, @@ -1834,9 +1930,10 @@ fn send_charging_customers_batch_works_for_day_free_put() { number_of_puts: 3456345, number_of_gets: 242334563456423, }; - let payers1 = vec![(user2_debtor, usage2.clone()), (user4, usage4.clone())]; - let payers2 = vec![(user1, usage1.clone())]; - let payers3 = vec![(user3_debtor, usage3.clone())]; + let payers1 = + vec![(user2_debtor, bucket_id2, usage2.clone()), (user4, bucket_id4, usage4.clone())]; + let payers2 = vec![(user1, bucket_id1, usage1.clone())]; + let payers3 = vec![(user3_debtor, bucket_id3, usage3.clone())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight let start_era: i64 = @@ -1867,6 +1964,7 @@ fn send_charging_customers_batch_works_for_day_free_put() { era, batch_index, payers1, + MMRProof::default(), )); let usage4_charge = calculate_charge_for_day(cluster_id, usage4.clone()); @@ -1894,6 +1992,7 @@ fn send_charging_customers_batch_works_for_day_free_put() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, charged: USER2_BALANCE, expected_to_charge: expected_charge2, @@ -1906,6 +2005,7 @@ fn send_charging_customers_batch_works_for_day_free_put() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, amount: debt, } @@ -1916,6 +2016,7 @@ fn send_charging_customers_batch_works_for_day_free_put() { cluster_id, era, customer_id: user4, + bucket_id: bucket_id4, batch_index, amount: usage4_charge, } @@ -1933,6 +2034,7 @@ fn send_charging_customers_batch_works_for_day_free_put() { era, batch_index, payers2, + MMRProof::default(), )); System::assert_last_event( @@ -1940,6 +2042,7 @@ fn send_charging_customers_batch_works_for_day_free_put() { cluster_id, era, batch_index, + bucket_id: bucket_id1, customer_id: user1, amount: calculate_charge_for_day(cluster_id, usage1.clone()), } @@ -1965,7 +2068,7 @@ fn send_charging_customers_batch_works_for_day_free_put() { report.total_customer_charge.transfer ); - assert_eq!(report.state, State::ChargingCustomers); + assert_eq!(report.state, PayoutState::ChargingCustomers); let user1_debt = DdcPayouts::debtor_customers(cluster_id, user1); assert_eq!(user1_debt, None); @@ -1980,6 +2083,7 @@ fn send_charging_customers_batch_works_for_day_free_put() { era, batch_index, payers3, + MMRProof::default(), )); let user3_charge = calculate_charge_for_day(cluster_id, usage3.clone()); @@ -2015,6 +2119,7 @@ fn send_charging_customers_batch_works_for_day_free_put() { cluster_id, era, customer_id: user3_debtor, + bucket_id: bucket_id3, batch_index, amount: user3_debt, } @@ -2027,6 +2132,7 @@ fn send_charging_customers_batch_works_for_day_free_put() { era, batch_index, customer_id: user3_debtor, + bucket_id: bucket_id3, charged: PARTIAL_CHARGE, expected_to_charge: user3_charge, } @@ -2049,6 +2155,11 @@ fn send_charging_customers_batch_works_for_day_free_storage_stream() { let era = 100; let max_batch_index = 3; let mut batch_index = 0; + let bucket_id1: BucketId = 1; + let bucket_id2: BucketId = 2; + let bucket_id3: BucketId = 3; + let bucket_id4: BucketId = 4; + let usage1 = CustomerUsage { // should pass without debt transferred_bytes: 23452345, @@ -2077,9 +2188,10 @@ fn send_charging_customers_batch_works_for_day_free_storage_stream() { number_of_puts: 3456345, number_of_gets: 242334563456423, }; - let payers1 = vec![(user2_debtor, usage2.clone()), (user4, usage4.clone())]; - let payers2 = vec![(user1, usage1.clone())]; - let payers3 = vec![(user3_debtor, usage3.clone())]; + let payers1 = + vec![(user2_debtor, bucket_id2, usage2.clone()), (user4, bucket_id4, usage4.clone())]; + let payers2 = vec![(user1, bucket_id1, usage1.clone())]; + let payers3 = vec![(user3_debtor, bucket_id3, usage3.clone())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight let start_era: i64 = @@ -2110,6 +2222,7 @@ fn send_charging_customers_batch_works_for_day_free_storage_stream() { era, batch_index, payers1, + MMRProof::default(), )); let usage4_charge = calculate_charge_for_day(cluster_id, usage4.clone()); @@ -2137,6 +2250,7 @@ fn send_charging_customers_batch_works_for_day_free_storage_stream() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, charged: USER2_BALANCE, expected_to_charge: expected_charge2, @@ -2149,6 +2263,7 @@ fn send_charging_customers_batch_works_for_day_free_storage_stream() { cluster_id, era, customer_id: user2_debtor, + bucket_id: bucket_id2, batch_index, amount: debt, } @@ -2159,6 +2274,7 @@ fn send_charging_customers_batch_works_for_day_free_storage_stream() { cluster_id, era, customer_id: user4, + bucket_id: bucket_id4, batch_index, amount: usage4_charge, } @@ -2176,6 +2292,7 @@ fn send_charging_customers_batch_works_for_day_free_storage_stream() { era, batch_index, payers2, + MMRProof::default(), )); System::assert_last_event( @@ -2183,6 +2300,7 @@ fn send_charging_customers_batch_works_for_day_free_storage_stream() { cluster_id, era, batch_index, + bucket_id: bucket_id1, customer_id: user1, amount: calculate_charge_for_day(cluster_id, usage1.clone()), } @@ -2208,7 +2326,7 @@ fn send_charging_customers_batch_works_for_day_free_storage_stream() { report.total_customer_charge.transfer ); - assert_eq!(report.state, State::ChargingCustomers); + assert_eq!(report.state, PayoutState::ChargingCustomers); let user1_debt = DdcPayouts::debtor_customers(cluster_id, user1); assert_eq!(user1_debt, None); @@ -2223,6 +2341,7 @@ fn send_charging_customers_batch_works_for_day_free_storage_stream() { era, batch_index, payers3, + MMRProof::default(), )); let user3_charge = calculate_charge_for_day(cluster_id, usage3.clone()); @@ -2258,6 +2377,7 @@ fn send_charging_customers_batch_works_for_day_free_storage_stream() { cluster_id, era, customer_id: user3_debtor, + bucket_id: bucket_id3, batch_index, amount: user3_debt, } @@ -2269,6 +2389,7 @@ fn send_charging_customers_batch_works_for_day_free_storage_stream() { cluster_id, era, batch_index, + bucket_id: bucket_id3, customer_id: user3_debtor, charged: PARTIAL_CHARGE, expected_to_charge: user3_charge, @@ -2289,6 +2410,7 @@ fn send_charging_customers_batch_works_zero_fees() { let era = 100; let max_batch_index = 0; let batch_index = 0; + let bucket_id5: BucketId = 5; let usage5 = CustomerUsage { // should pass without debt transferred_bytes: 1024, @@ -2296,7 +2418,7 @@ fn send_charging_customers_batch_works_zero_fees() { number_of_puts: 1, number_of_gets: 1, }; - let payers5 = vec![(user5, usage5.clone())]; + let payers5 = vec![(user5, bucket_id5, usage5.clone())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight let start_era: i64 = @@ -2330,6 +2452,7 @@ fn send_charging_customers_batch_works_zero_fees() { era, batch_index, payers5, + MMRProof::default(), )); let usage5_charge = calculate_charge_for_month(cluster_id, usage5.clone()); @@ -2366,7 +2489,8 @@ fn end_charging_customers_fails_uninitialised() { let era = 100; let max_batch_index = 2; let batch_index = 1; - let payers = vec![(user1, CustomerUsage::default())]; + let bucket_id1: BucketId = 1; + let payers = vec![(user1, bucket_id1, CustomerUsage::default())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight let start_era: i64 = @@ -2425,6 +2549,7 @@ fn end_charging_customers_fails_uninitialised() { era, batch_index, payers, + MMRProof::default(), )); assert_noop!( @@ -2445,13 +2570,14 @@ fn end_charging_customers_works() { let era = 100; let max_batch_index = 0; let batch_index = 0; + let bucket_id1: BucketId = 1; let usage1 = CustomerUsage { transferred_bytes: 23452345, stored_bytes: 3345234523, number_of_puts: 4456456345234523, number_of_gets: 523423, }; - let payers = vec![(user1, usage1.clone())]; + let payers = vec![(user1, bucket_id1, usage1.clone())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight @@ -2482,13 +2608,21 @@ fn end_charging_customers_works() { era, batch_index, payers, + MMRProof::default(), )); let report_before = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); let charge = calculate_charge_for_month(cluster_id, usage1); System::assert_last_event( - Event::Charged { cluster_id, era, batch_index, customer_id: user1, amount: charge } - .into(), + Event::Charged { + cluster_id, + era, + batch_index, + customer_id: user1, + amount: charge, + bucket_id: bucket_id1, + } + .into(), ); let mut balance = Balances::free_balance(DdcPayouts::account_id()); @@ -2519,11 +2653,11 @@ fn end_charging_customers_works() { Event::ValidatorFeesCollected { cluster_id, era, amount: validator_fee }.into(), ); - let transfers = 3 + 3 + 3 + 3 * 3; // for Currency::transfer - assert_eq!(System::events().len(), 5 + 1 + 3 + transfers); + let transfers = 3 + 3 + 3 * 3; // for Currency::transfer + assert_eq!(System::events().len(), 8 + 1 + 3 + transfers); let report_after = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); - assert_eq!(report_after.state, State::CustomersChargedWithFees); + assert_eq!(report_after.state, PayoutState::CustomersChargedWithFees); let total_left_from_one = (get_fees(&cluster_id).treasury_share + get_fees(&cluster_id).validators_share + @@ -2619,13 +2753,14 @@ fn end_charging_customers_works_zero_fees() { let era = 100; let max_batch_index = 0; let batch_index = 0; + let bucket_id1: BucketId = 1; let usage1 = CustomerUsage { transferred_bytes: 23452345, stored_bytes: 3345234523, number_of_puts: 1, number_of_gets: 1, }; - let payers = vec![(user1, usage1.clone())]; + let payers = vec![(user1, bucket_id1, usage1.clone())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight @@ -2656,13 +2791,21 @@ fn end_charging_customers_works_zero_fees() { era, batch_index, payers, + MMRProof::default(), )); let report_before = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); let charge = calculate_charge_for_month(cluster_id, usage1); System::assert_last_event( - Event::Charged { cluster_id, era, customer_id: user1, batch_index, amount: charge } - .into(), + Event::Charged { + cluster_id, + era, + customer_id: user1, + bucket_id: bucket_id1, + batch_index, + amount: charge, + } + .into(), ); let mut balance = Balances::free_balance(DdcPayouts::account_id()); @@ -2679,7 +2822,7 @@ fn end_charging_customers_works_zero_fees() { assert_eq!(System::events().len(), 5 + 1); let report_after = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); - assert_eq!(report_after.state, State::CustomersChargedWithFees); + assert_eq!(report_after.state, PayoutState::CustomersChargedWithFees); let fees = get_fees(&cluster_id); @@ -2737,7 +2880,8 @@ fn begin_rewarding_providers_fails_uninitialised() { let era = 100; let max_batch_index = 2; let batch_index = 1; - let payers = vec![(user1, CustomerUsage::default())]; + let bucket_id1: BucketId = 1; + let payers = vec![(user1, bucket_id1, CustomerUsage::default())]; let node_usage = NodeUsage::default(); let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st @@ -2824,6 +2968,7 @@ fn begin_rewarding_providers_fails_uninitialised() { era, batch_index, payers.clone(), + MMRProof::default(), )); assert_noop!( @@ -2843,6 +2988,7 @@ fn begin_rewarding_providers_fails_uninitialised() { era, batch_index + 1, payers, + MMRProof::default(), )); assert_noop!( @@ -2869,8 +3015,9 @@ fn begin_rewarding_providers_works() { let era = 100; let max_batch_index = 0; let batch_index = 0; + let bucket_id1: BucketId = 1; let total_node_usage = NodeUsage::default(); - let payers = vec![(user1, CustomerUsage::default())]; + let payers = vec![(user1, bucket_id1, CustomerUsage::default())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st let time = NaiveTime::from_hms_opt(0, 0, 0).unwrap(); // Midnight @@ -2889,7 +3036,7 @@ fn begin_rewarding_providers_works() { )); let mut report = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); - assert_eq!(report.state, State::Initialized); + assert_eq!(report.state, PayoutState::Initialized); assert_ok!(DdcPayouts::begin_charging_customers( RuntimeOrigin::signed(dac_account), @@ -2904,6 +3051,7 @@ fn begin_rewarding_providers_works() { era, batch_index, payers, + MMRProof::default(), )); assert_ok!(DdcPayouts::end_charging_customers( @@ -2923,7 +3071,7 @@ fn begin_rewarding_providers_works() { System::assert_last_event(Event::RewardingStarted { cluster_id, era }.into()); report = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); - assert_eq!(report.state, State::RewardingProviders); + assert_eq!(report.state, PayoutState::RewardingProviders); }) } @@ -2939,8 +3087,10 @@ fn send_rewarding_providers_batch_fails_uninitialised() { let era = 100; let max_batch_index = 1; let batch_index = 0; - let payers1 = vec![(user1, CustomerUsage::default())]; - let payers2 = vec![(user2, CustomerUsage::default())]; + let bucket_id1: BucketId = 1; + let bucket_id2: BucketId = 2; + let payers1 = vec![(user1, bucket_id1, CustomerUsage::default())]; + let payers2 = vec![(user2, bucket_id2, CustomerUsage::default())]; let payees = vec![(node1, NodeUsage::default())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st @@ -2956,6 +3106,7 @@ fn send_rewarding_providers_batch_fails_uninitialised() { era, batch_index, payees.clone(), + MMRProof::default(), ), Error::::Unauthorised ); @@ -2967,6 +3118,7 @@ fn send_rewarding_providers_batch_fails_uninitialised() { era, batch_index, payees.clone(), + MMRProof::default(), ), BadOrigin ); @@ -2980,6 +3132,7 @@ fn send_rewarding_providers_batch_fails_uninitialised() { era, batch_index, payees.clone(), + MMRProof::default(), ), Error::::BillingReportDoesNotExist ); @@ -2999,6 +3152,7 @@ fn send_rewarding_providers_batch_fails_uninitialised() { era, batch_index, payees.clone(), + MMRProof::default(), ), Error::::NotExpectedState ); @@ -3017,6 +3171,7 @@ fn send_rewarding_providers_batch_fails_uninitialised() { era, batch_index, payees.clone(), + MMRProof::default(), ), Error::::NotExpectedState ); @@ -3027,6 +3182,7 @@ fn send_rewarding_providers_batch_fails_uninitialised() { era, batch_index, payers1, + MMRProof::default(), )); assert_noop!( @@ -3036,6 +3192,7 @@ fn send_rewarding_providers_batch_fails_uninitialised() { era, batch_index, payees.clone(), + MMRProof::default(), ), Error::::NotExpectedState ); @@ -3046,6 +3203,7 @@ fn send_rewarding_providers_batch_fails_uninitialised() { era, batch_index + 1, payers2, + MMRProof::default(), )); assert_noop!( @@ -3055,6 +3213,7 @@ fn send_rewarding_providers_batch_fails_uninitialised() { era, batch_index, payees.clone(), + MMRProof::default(), ), Error::::NotExpectedState ); @@ -3072,6 +3231,7 @@ fn send_rewarding_providers_batch_fails_uninitialised() { era, batch_index, payees, + MMRProof::default(), ), Error::::NotExpectedState ); @@ -3094,6 +3254,7 @@ fn send_rewarding_providers_batch_works() { let max_node_batch_index = 1; let batch_index = 0; let batch_node_index = 0; + let bucket_id1: BucketId = 1; let usage1 = CustomerUsage { transferred_bytes: 23452345, stored_bytes: 3345234523, @@ -3140,7 +3301,7 @@ fn send_rewarding_providers_batch_works() { node_usage3.number_of_gets, }; - let payers = vec![(user1, usage1)]; + let payers = vec![(user1, bucket_id1, usage1)]; let payees1 = vec![(node1, node_usage1.clone()), (node2, node_usage2.clone())]; let payees2 = vec![(node3, node_usage3.clone())]; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st @@ -3173,6 +3334,7 @@ fn send_rewarding_providers_batch_works() { era, batch_index, payers, + MMRProof::default(), )); let report_before = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); @@ -3219,6 +3381,7 @@ fn send_rewarding_providers_batch_works() { era, batch_node_index, payees1, + MMRProof::default(), )); let ratio1_transfer = Perquintill::from_rational( @@ -3248,15 +3411,11 @@ fn send_rewarding_providers_batch_works() { let mut report_reward = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); System::assert_has_event( - Event::ProviderRewarded { + Event::Rewarded { cluster_id, era, node_provider_id: node1, batch_index: batch_node_index, - stored_bytes: node_usage1.stored_bytes, - transferred_bytes: node_usage1.transferred_bytes, - number_of_puts: node_usage1.number_of_puts, - number_of_gets: node_usage1.number_of_gets, rewarded: balance_node1, expected_to_reward: balance_node1, } @@ -3290,15 +3449,11 @@ fn send_rewarding_providers_batch_works() { assert_eq!(report_reward.total_distributed_reward, balance_node1 + balance_node2); System::assert_has_event( - Event::ProviderRewarded { + Event::Rewarded { cluster_id, era, node_provider_id: node2, batch_index: batch_node_index, - stored_bytes: node_usage2.stored_bytes, - transferred_bytes: node_usage2.transferred_bytes, - number_of_puts: node_usage2.number_of_puts, - number_of_gets: node_usage2.number_of_gets, rewarded: balance_node2, expected_to_reward: balance_node2, } @@ -3312,6 +3467,7 @@ fn send_rewarding_providers_batch_works() { era, batch_node_index + 1, payees2, + MMRProof::default(), )); let ratio3_transfer = Perquintill::from_rational( @@ -3341,15 +3497,11 @@ fn send_rewarding_providers_batch_works() { assert_eq!(balance_node3, transfer_charge + storage_charge + puts_charge + gets_charge); System::assert_has_event( - Event::ProviderRewarded { + Event::Rewarded { cluster_id, era, node_provider_id: node3, batch_index: batch_node_index + 1, - stored_bytes: node_usage3.stored_bytes, - transferred_bytes: node_usage3.transferred_bytes, - number_of_puts: node_usage3.number_of_puts, - number_of_gets: node_usage3.number_of_gets, rewarded: balance_node3, expected_to_reward: balance_node3, } @@ -3389,6 +3541,7 @@ fn send_rewarding_providers_batch_100_nodes_small_usage_works() { let era = 100; let user_batch_size = 10; let node_batch_size = 10; + let bucketid1: BucketId = 1; let mut batch_user_index = 0; let mut batch_node_index = 0; let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st @@ -3454,8 +3607,8 @@ fn send_rewarding_providers_batch_100_nodes_small_usage_works() { } let mut total_charge = 0u128; - let mut payers: Vec> = Vec::new(); - let mut user_batch: Vec<(u128, CustomerUsage)> = Vec::new(); + let mut payers: Vec> = Vec::new(); + let mut user_batch: Vec<(u128, BucketId, CustomerUsage)> = Vec::new(); for user_id in 1000..1000 + num_users { let ratio = match user_id % 5 { 0 => Perquintill::one(), @@ -3481,7 +3634,7 @@ fn send_rewarding_providers_batch_100_nodes_small_usage_works() { .unwrap(); total_charge += expected_charge; - user_batch.push((user_id, user_usage)); + user_batch.push((user_id, bucketid1, user_usage)); if user_batch.len() == user_batch_size { payers.push(user_batch.clone()); user_batch.clear(); @@ -3513,15 +3666,17 @@ fn send_rewarding_providers_batch_100_nodes_small_usage_works() { era, batch_user_index, batch.to_vec(), + MMRProof::default(), )); - for (customer_id, usage) in batch.iter() { + for (customer_id, _bucket_id, usage) in batch.iter() { let charge = calculate_charge_for_month(cluster_id, usage.clone()); System::assert_has_event( Event::Charged { cluster_id, era, + bucket_id: bucketid1, customer_id: *customer_id, batch_index: batch_user_index, amount: charge, @@ -3591,6 +3746,7 @@ fn send_rewarding_providers_batch_100_nodes_small_usage_works() { era, batch_node_index, batch.to_vec(), + MMRProof::default(), )); let mut batch_charge = 0; @@ -3655,8 +3811,10 @@ fn send_rewarding_providers_batch_100_nodes_large_usage_works() { let era = 100; let user_batch_size = 10; let node_batch_size = 10; + let bucketid1: BucketId = 1; let mut batch_user_index = 0; let mut batch_node_index = 0; + let bucket_id: BucketId = 1; let usage1 = CustomerUsage { transferred_bytes: 1024, stored_bytes: 1024, @@ -3727,8 +3885,8 @@ fn send_rewarding_providers_batch_100_nodes_large_usage_works() { } let mut total_charge = 0u128; - let mut payers: Vec> = Vec::new(); - let mut user_batch: Vec<(u128, CustomerUsage)> = Vec::new(); + let mut payers: Vec> = Vec::new(); + let mut user_batch: Vec<(u128, BucketId, CustomerUsage)> = Vec::new(); for user_id in 1000..1000 + num_users { let ratio = match user_id % 5 { 0 => Perquintill::from_float(1_000_000.0), @@ -3754,7 +3912,7 @@ fn send_rewarding_providers_batch_100_nodes_large_usage_works() { .unwrap(); total_charge += expected_charge; - user_batch.push((user_id, user_usage)); + user_batch.push((user_id, bucket_id, user_usage)); if user_batch.len() == user_batch_size { payers.push(user_batch.clone()); user_batch.clear(); @@ -3786,15 +3944,17 @@ fn send_rewarding_providers_batch_100_nodes_large_usage_works() { era, batch_user_index, batch.to_vec(), + MMRProof::default(), )); - for (customer_id, usage) in batch.iter() { + for (customer_id, _bucket_id, usage) in batch.iter() { let charge = calculate_charge_for_month(cluster_id, usage.clone()); System::assert_has_event( Event::Charged { cluster_id, era, + bucket_id: bucketid1, customer_id: *customer_id, batch_index: batch_user_index, amount: charge, @@ -3864,6 +4024,7 @@ fn send_rewarding_providers_batch_100_nodes_large_usage_works() { era, batch_node_index, batch.to_vec(), + MMRProof::default(), )); let mut batch_charge = 0; @@ -3926,6 +4087,7 @@ fn send_rewarding_providers_batch_100_nodes_small_large_usage_works() { let bank = 1u128; let cluster_id = ONE_CLUSTER_ID; let era = 100; + let bucketid1: BucketId = 1; let user_batch_size = 10; let node_batch_size = 10; let mut batch_user_index = 0; @@ -4000,8 +4162,8 @@ fn send_rewarding_providers_batch_100_nodes_small_large_usage_works() { } let mut total_charge = 0u128; - let mut payers: Vec> = Vec::new(); - let mut user_batch: Vec<(u128, CustomerUsage)> = Vec::new(); + let mut payers: Vec> = Vec::new(); + let mut user_batch: Vec<(u128, BucketId, CustomerUsage)> = Vec::new(); for user_id in 1000..1000 + num_users { let ratio = match user_id % 5 { 0 => Perquintill::from_float(1_000_000.0), @@ -4027,7 +4189,7 @@ fn send_rewarding_providers_batch_100_nodes_small_large_usage_works() { .unwrap(); total_charge += expected_charge; - user_batch.push((user_id, user_usage)); + user_batch.push((user_id, bucketid1, user_usage)); if user_batch.len() == user_batch_size { payers.push(user_batch.clone()); user_batch.clear(); @@ -4059,9 +4221,10 @@ fn send_rewarding_providers_batch_100_nodes_small_large_usage_works() { era, batch_user_index, batch.to_vec(), + MMRProof::default(), )); - for (customer_id, usage) in batch.iter() { + for (customer_id, _bucket_id, usage) in batch.iter() { let charge = calculate_charge_for_month(cluster_id, usage.clone()); System::assert_has_event( @@ -4069,6 +4232,7 @@ fn send_rewarding_providers_batch_100_nodes_small_large_usage_works() { cluster_id, era, customer_id: *customer_id, + bucket_id: bucketid1, batch_index: batch_user_index, amount: charge, } @@ -4137,6 +4301,7 @@ fn send_rewarding_providers_batch_100_nodes_small_large_usage_works() { era, batch_node_index, batch.to_vec(), + MMRProof::default(), )); let mut batch_charge = 0; @@ -4213,6 +4378,7 @@ fn send_rewarding_providers_batch_100_nodes_random_usage_works() { let node_batch_size = 10; let mut batch_user_index = 0; let mut batch_node_index = 0; + let bucket_id1: BucketId = 1; let mut payees: Vec> = Vec::new(); let mut node_batch: Vec<(u128, NodeUsage)> = Vec::new(); let mut total_nodes_usage = NodeUsage::default(); @@ -4240,8 +4406,8 @@ fn send_rewarding_providers_batch_100_nodes_random_usage_works() { } let mut total_charge = 0u128; - let mut payers: Vec> = Vec::new(); - let mut user_batch: Vec<(u128, CustomerUsage)> = Vec::new(); + let mut payers: Vec> = Vec::new(); + let mut user_batch: Vec<(u128, BucketId, CustomerUsage)> = Vec::new(); for user_id in 1000..1000 + num_users { let user_usage = CustomerUsage { transferred_bytes: generate_random_u64(&mock_randomness, min, max), @@ -4259,7 +4425,7 @@ fn send_rewarding_providers_batch_100_nodes_random_usage_works() { .unwrap(); total_charge += expected_charge; - user_batch.push((user_id, user_usage)); + user_batch.push((user_id, bucket_id1, user_usage)); if user_batch.len() == user_batch_size { payers.push(user_batch.clone()); user_batch.clear(); @@ -4291,15 +4457,17 @@ fn send_rewarding_providers_batch_100_nodes_random_usage_works() { era, batch_user_index, batch.to_vec(), + MMRProof::default(), )); - for (customer_id, usage) in batch.iter() { + for (customer_id, _bucket_id, usage) in batch.iter() { let charge = calculate_charge_for_month(cluster_id, usage.clone()); System::assert_has_event( Event::Charged { cluster_id, era, + bucket_id: bucket_id1, customer_id: *customer_id, batch_index: batch_user_index, amount: charge, @@ -4369,6 +4537,7 @@ fn send_rewarding_providers_batch_100_nodes_random_usage_works() { era, batch_node_index, batch.to_vec(), + MMRProof::default(), )); let mut batch_charge = 0; @@ -4426,8 +4595,10 @@ fn end_rewarding_providers_fails_uninitialised() { let era = 100; let max_batch_index = 1; let batch_index = 0; - let payers1 = vec![(user1, CustomerUsage::default())]; - let payers2 = vec![(user2, CustomerUsage::default())]; + let bucket_id1: BucketId = 1; + let bucket_id2: BucketId = 2; + let payers1 = vec![(user1, bucket_id1, CustomerUsage::default())]; + let payers2 = vec![(user2, bucket_id2, CustomerUsage::default())]; let payees = vec![(node1, NodeUsage::default())]; let total_node_usage = NodeUsage::default(); let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st @@ -4501,6 +4672,7 @@ fn end_rewarding_providers_fails_uninitialised() { era, batch_index, payers1, + MMRProof::default(), )); assert_noop!( @@ -4518,6 +4690,7 @@ fn end_rewarding_providers_fails_uninitialised() { era, batch_index + 1, payers2, + MMRProof::default(), )); assert_noop!( @@ -4567,6 +4740,7 @@ fn end_rewarding_providers_fails_uninitialised() { era, batch_index, payees, + MMRProof::default(), )); assert_noop!( @@ -4598,6 +4772,7 @@ fn end_rewarding_providers_works() { let era = 100; let max_batch_index = 0; let batch_index = 0; + let bucket_id1: BucketId = 1; let usage1 = CustomerUsage { transferred_bytes: 23452345, stored_bytes: 3345234523, @@ -4613,7 +4788,7 @@ fn end_rewarding_providers_works() { number_of_gets: usage1.number_of_gets * 2 / 3, }; let total_node_usage = node_usage1.clone(); - let payers = vec![(user1, usage1)]; + let payers = vec![(user1, bucket_id1, usage1)]; let payees = vec![(node1, node_usage1)]; assert_ok!(DdcPayouts::set_authorised_caller(RuntimeOrigin::root(), dac_account)); @@ -4627,7 +4802,7 @@ fn end_rewarding_providers_works() { )); let mut report = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); - assert_eq!(report.state, State::Initialized); + assert_eq!(report.state, PayoutState::Initialized); assert_ok!(DdcPayouts::begin_charging_customers( RuntimeOrigin::signed(dac_account), @@ -4642,6 +4817,7 @@ fn end_rewarding_providers_works() { era, batch_index, payers, + MMRProof::default(), )); assert_ok!(DdcPayouts::end_charging_customers( @@ -4664,6 +4840,7 @@ fn end_rewarding_providers_works() { era, batch_index, payees, + MMRProof::default(), )); assert_ok!(DdcPayouts::end_rewarding_providers( @@ -4675,7 +4852,7 @@ fn end_rewarding_providers_works() { System::assert_last_event(Event::RewardingFinished { cluster_id, era }.into()); report = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); - assert_eq!(report.state, State::ProvidersRewarded); + assert_eq!(report.state, PayoutState::ProvidersRewarded); }) } @@ -4697,8 +4874,10 @@ fn end_billing_report_fails_uninitialised() { let era = 100; let max_batch_index = 1; let batch_index = 0; - let payers1 = vec![(user1, CustomerUsage::default())]; - let payers2 = vec![(user2, CustomerUsage::default())]; + let bucket_id1: BucketId = 1; + let bucket_id2: BucketId = 2; + let payers1 = vec![(user1, bucket_id1, CustomerUsage::default())]; + let payers2 = vec![(user2, bucket_id2, CustomerUsage::default())]; let payees = vec![(node1, NodeUsage::default())]; let total_node_usage = NodeUsage::default(); @@ -4750,6 +4929,7 @@ fn end_billing_report_fails_uninitialised() { era, batch_index, payers1, + MMRProof::default(), )); assert_noop!( @@ -4763,6 +4943,7 @@ fn end_billing_report_fails_uninitialised() { era, batch_index + 1, payers2, + MMRProof::default(), )); assert_noop!( @@ -4800,6 +4981,7 @@ fn end_billing_report_fails_uninitialised() { era, batch_index, payees.clone(), + MMRProof::default(), )); assert_noop!( @@ -4813,6 +4995,7 @@ fn end_billing_report_fails_uninitialised() { era, batch_index + 1, payees, + MMRProof::default(), )); assert_noop!( @@ -4841,7 +5024,8 @@ fn end_billing_report_works() { let max_batch_index = 0; let batch_index = 0; let total_node_usage = NodeUsage::default(); - let payers = vec![(user1, CustomerUsage::default())]; + let bucket_id1 = 1; + let payers = vec![(user1, bucket_id1, CustomerUsage::default())]; let payees = vec![(node1, NodeUsage::default())]; assert_ok!(DdcPayouts::set_authorised_caller(RuntimeOrigin::root(), dac_account)); @@ -4855,7 +5039,7 @@ fn end_billing_report_works() { )); let report = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); - assert_eq!(report.state, State::Initialized); + assert_eq!(report.state, PayoutState::Initialized); assert_ok!(DdcPayouts::begin_charging_customers( RuntimeOrigin::signed(dac_account), @@ -4870,6 +5054,7 @@ fn end_billing_report_works() { era, batch_index, payers, + MMRProof::default(), )); assert_ok!(DdcPayouts::end_charging_customers( @@ -4892,6 +5077,7 @@ fn end_billing_report_works() { era, batch_index, payees, + MMRProof::default(), )); assert_ok!(DdcPayouts::end_rewarding_providers( @@ -4911,6 +5097,6 @@ fn end_billing_report_works() { let report_end = DdcPayouts::active_billing_reports(cluster_id, era).unwrap(); assert!(report_end.rewarding_processed_batches.is_empty()); assert!(report_end.charging_processed_batches.is_empty()); - assert_eq!(report_end.state, State::Finalized); + assert_eq!(report_end.state, PayoutState::Finalized); }) } diff --git a/pallets/ddc-staking/src/benchmarking.rs b/pallets/ddc-staking/src/benchmarking.rs index 57c3c3a0b..ecd280e2a 100644 --- a/pallets/ddc-staking/src/benchmarking.rs +++ b/pallets/ddc-staking/src/benchmarking.rs @@ -30,8 +30,8 @@ fn fast_forward_to(n: BlockNumberFor) { } } -fn assert_has_event(generic_event: ::RuntimeEvent) { - frame_system::Pallet::::assert_has_event(generic_event.into()); +fn assert_last_event(generic_event: ::RuntimeEvent) { + frame_system::Pallet::::assert_last_event(generic_event.into()); } benchmarks! { @@ -181,7 +181,7 @@ benchmarks! { assert!(ClusterBonded::::contains_key(&cluster_reserve_id)); assert!(ClusterLedger::::contains_key(&cluster_manager_id)); let amount = T::ClusterBondingAmount::get(); - assert_has_event::(Event::Bonded(cluster_reserve_id, amount).into()); + assert_last_event::(Event::Bonded(cluster_reserve_id, amount).into()); } unbond_cluster { @@ -209,7 +209,7 @@ benchmarks! { }: _(RawOrigin::Signed(cluster_manager_id.clone()), cluster_id) verify { let amount = T::ClusterBondingAmount::get(); - assert_has_event::(Event::Unbonded(cluster_reserve_id, amount).into()); + assert_last_event::(Event::Unbonded(cluster_reserve_id, amount).into()); } withdraw_unbonded_cluster { @@ -243,6 +243,6 @@ benchmarks! { assert!(!ClusterBonded::::contains_key(&cluster_reserve_id)); assert!(!ClusterLedger::::contains_key(&cluster_manager_id)); let amount = T::ClusterBondingAmount::get(); - assert_has_event::(Event::Withdrawn(cluster_reserve_id, amount).into()); + assert_last_event::(Event::Withdrawn(cluster_reserve_id, amount).into()); } } diff --git a/pallets/ddc-staking/src/lib.rs b/pallets/ddc-staking/src/lib.rs index ae4ae6d5f..ee4ef92e1 100644 --- a/pallets/ddc-staking/src/lib.rs +++ b/pallets/ddc-staking/src/lib.rs @@ -1154,5 +1154,11 @@ pub mod pallet { Ok(is_chilling_attempt) } + + fn stash_by_ctrl(controller: &T::AccountId) -> Result { + Self::ledger(controller) + .map(|l| l.stash) + .ok_or(StakingVisitorError::ControllerDoesNotExist) + } } } diff --git a/pallets/ddc-verification/Cargo.toml b/pallets/ddc-verification/Cargo.toml new file mode 100644 index 000000000..fc97bc559 --- /dev/null +++ b/pallets/ddc-verification/Cargo.toml @@ -0,0 +1,71 @@ +[package] +name = "pallet-ddc-verification" +version.workspace = true +authors.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true + +[dependencies] +array-bytes = { workspace = true } +# 3rd-party dependencies +codec = { workspace = true } +# Cere dependencies +ddc-primitives = { workspace = true } +# Substrate dependencies +frame-benchmarking = { workspace = true, optional = true } +frame-election-provider-support = { workspace = true } +frame-support = { workspace = true } +frame-system = { workspace = true } +itertools = { workspace = true } +log = { workspace = true } +polkadot-ckb-merkle-mountain-range = { workspace = true } +scale-info = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } +sp-application-crypto = { workspace = true } +sp-core = { workspace = true } +sp-io = { workspace = true } +sp-runtime = { workspace = true } +sp-staking = { workspace = true } +sp-std = { workspace = true } + +[dev-dependencies] +pallet-balances = { workspace = true, default-features = true } +pallet-session = { workspace = true, default-features = true } +pallet-staking = { workspace = true, default-features = true } +pallet-staking-reward-curve = { workspace = true } +pallet-timestamp = { workspace = true } +sp-core = { workspace = true, default-features = true } +sp-keystore = { workspace = true } + +[features] +default = ["std"] +std = [ + "polkadot-ckb-merkle-mountain-range/std", + "codec/std", + "frame-benchmarking?/std", + "frame-support/std", + "frame-system/std", + "scale-info/std", + "sp-std/std", + "scale-info/std", + "sp-runtime/std", + "sp-io/std", + "sp-core/std", + "sp-application-crypto/std", + "sp-staking/std", + "frame-election-provider-support/std", +] +runtime-benchmarks = [ + "frame-benchmarking/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] +try-runtime = [ + "frame-support/try-runtime", + "frame-system/try-runtime", +] diff --git a/pallets/ddc-verification/src/benchmarking.rs b/pallets/ddc-verification/src/benchmarking.rs new file mode 100644 index 000000000..6695674e5 --- /dev/null +++ b/pallets/ddc-verification/src/benchmarking.rs @@ -0,0 +1,37 @@ +#![cfg(feature = "runtime-benchmarks")] + +use frame_benchmarking::v2::*; +use frame_system::RawOrigin; +use sp_std::vec; + +use super::*; +#[allow(unused)] +use crate::Pallet as DdcVerification; + +#[benchmarks] +mod benchmarks { + use super::*; + + #[benchmark] + fn create_billing_reports() { + let cluster_id = ClusterId::from([1; 20]); + let era: DdcEra = 1; + let merkel_root_hash: MmrRootHash = array_bytes::hex_n_into_unchecked( + "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc", + ); + let caller: T::AccountId = whitelisted_caller(); + #[extrinsic_call] + create_billing_reports( + RawOrigin::Signed(caller), + cluster_id, + era, + merkel_root_hash, + ); + + assert!(ActiveBillingReports::::contains_key(cluster_id, era)); + let billing_report = ActiveBillingReports::::get(cluster_id, era).unwrap(); + assert_eq!(billing_report.merkle_root_hash, ActivityHash::from(merkel_root_hash)); + } + + impl_benchmark_test_suite!(DdcVerification, crate::mock::new_test_ext(), crate::mock::Test); +} diff --git a/pallets/ddc-verification/src/lib.rs b/pallets/ddc-verification/src/lib.rs new file mode 100644 index 000000000..22ee43323 --- /dev/null +++ b/pallets/ddc-verification/src/lib.rs @@ -0,0 +1,3024 @@ +//! # DDC Verification Pallet +//! +//! The DDC Verification pallet is used to validate zk-SNARK Proof and Signature +//! +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] +#![allow(clippy::missing_docs_in_private_items)] +#![cfg_attr(not(feature = "std"), no_std)] +#![recursion_limit = "256"] + +use core::str; + +use ddc_primitives::{ + traits::{ClusterManager, NodeVisitor, PayoutVisitor, ValidatorVisitor}, + ActivityHash, BatchIndex, ClusterId, CustomerUsage, DdcEra, MMRProof, NodeParams, NodePubKey, + NodeUsage, PayoutState, StorageNodeMode, StorageNodeParams, +}; +use frame_support::{ + pallet_prelude::*, + traits::{Get, OneSessionHandler}, +}; +use frame_system::{ + offchain::{AppCrypto, CreateSignedTransaction, SendSignedTransaction, Signer}, + pallet_prelude::*, +}; +pub use pallet::*; +use polkadot_ckb_merkle_mountain_range::{ + util::{MemMMR, MemStore}, + MerkleProof, MMR, +}; +use scale_info::prelude::{format, string::String}; +use serde::{Deserialize, Serialize}; +use sp_application_crypto::RuntimeAppPublic; +use sp_runtime::{ + offchain as rt_offchain, + offchain::{http, StorageKind}, + traits::Hash, + Percent, +}; +use sp_std::{collections::btree_map::BTreeMap, prelude::*}; + +pub mod weights; +use itertools::Itertools; +use sp_staking::StakingInterface; + +use crate::weights::WeightInfo; + +#[cfg(test)] +pub(crate) mod mock; +#[cfg(test)] +mod tests; + +#[frame_support::pallet] +pub mod pallet { + use ddc_primitives::{BucketId, MergeActivityHash, KEY_TYPE}; + use frame_support::PalletId; + use sp_runtime::SaturatedConversion; + + use super::*; + + /// The current storage version. + const STORAGE_VERSION: frame_support::traits::StorageVersion = + frame_support::traits::StorageVersion::new(0); + + #[pallet::pallet] + #[pallet::storage_version(STORAGE_VERSION)] + #[pallet::without_storage_info] + pub struct Pallet(_); + + #[pallet::config] + /// The module configuration trait. + pub trait Config: CreateSignedTransaction> + frame_system::Config { + /// The overarching event type. + type RuntimeEvent: From> + IsType<::RuntimeEvent>; + /// The accounts's pallet id, used for deriving its sovereign account ID. + #[pallet::constant] + type PalletId: Get; + /// Weight info type. + type WeightInfo: WeightInfo; + /// DDC clusters nodes manager. + type ClusterManager: ClusterManager; + type PayoutVisitor: PayoutVisitor; + /// DDC nodes read-only registry. + type NodeVisitor: NodeVisitor; + /// The output of the `ActivityHasher` function. + type ActivityHash: Member + + Parameter + + MaybeSerializeDeserialize + + Ord + + Into + + From; + /// The hashing system (algorithm) + type ActivityHasher: Hash; + /// The identifier type for an authority. + type AuthorityId: Member + + Parameter + + RuntimeAppPublic + + Ord + + MaybeSerializeDeserialize + + Into + + From; + /// The identifier type for an offchain worker. + type OffchainIdentifierId: AppCrypto; + /// The majority of validators. + const MAJORITY: u8; + /// Block to start from. + const BLOCK_TO_START: u16; + const MIN_DAC_NODES_FOR_CONSENSUS: u16; + const MAX_PAYOUT_BATCH_COUNT: u16; + const MAX_PAYOUT_BATCH_SIZE: u16; + /// The access to staking functionality. + type StakingVisitor: StakingInterface; + } + + /// The event type. + #[pallet::event] + /// The `generate_deposit` macro generates a function on `Pallet` called `deposit_event` which + /// will properly convert the error type of your pallet into `RuntimeEvent` (recall `type + /// RuntimeEvent: From>`, so it can be converted) and deposit it via + /// `frame_system::Pallet::deposit_event`. + #[pallet::generate_deposit(pub(crate) fn deposit_event)] + pub enum Event { + /// A new billing report was created from `ClusterId` and `ERA`. + BillingReportCreated { + cluster_id: ClusterId, + era_id: DdcEra, + }, + /// A verification key was stored with `VerificationKey`. + VerificationKeyStored { + verification_key: Vec, + }, + /// A new payout batch was created from `ClusterId` and `ERA`. + PayoutBatchCreated { + cluster_id: ClusterId, + era_id: DdcEra, + }, + EraValidationReady { + cluster_id: ClusterId, + era_id: DdcEra, + }, + EraValidationNotReady { + cluster_id: ClusterId, + era_id: DdcEra, + }, + /// Not enough nodes for consensus. + NotEnoughNodesForConsensus { + cluster_id: ClusterId, + era_id: DdcEra, + id: ActivityHash, + validator: T::AccountId, + }, + /// No activity in consensus. + ActivityNotInConsensus { + cluster_id: ClusterId, + era_id: DdcEra, + id: ActivityHash, + validator: T::AccountId, + }, + /// Node Usage Retrieval Error. + NodeUsageRetrievalError { + cluster_id: ClusterId, + era_id: DdcEra, + node_pub_key: NodePubKey, + validator: T::AccountId, + }, + /// Customer Usage Retrieval Error. + CustomerUsageRetrievalError { + cluster_id: ClusterId, + era_id: DdcEra, + node_pub_key: NodePubKey, + validator: T::AccountId, + }, + EraRetrievalError { + cluster_id: ClusterId, + node_pub_key: NodePubKey, + validator: T::AccountId, + }, + PrepareEraTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + payers_merkle_root_hash: ActivityHash, + payees_merkle_root_hash: ActivityHash, + validator: T::AccountId, + }, + BeginBillingReportTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + validator: T::AccountId, + }, + BeginChargingCustomersTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + validator: T::AccountId, + }, + SendChargingCustomersBatchTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + batch_index: BatchIndex, + validator: T::AccountId, + }, + SendRewardingProvidersBatchTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + batch_index: BatchIndex, + validator: T::AccountId, + }, + EndChargingCustomersTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + validator: T::AccountId, + }, + BeginRewardingProvidersTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + validator: T::AccountId, + }, + EndRewardingProvidersTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + validator: T::AccountId, + }, + EndBillingReportTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + validator: T::AccountId, + }, + BillingReportDoesNotExist { + cluster_id: ClusterId, + era_id: DdcEra, + validator: T::AccountId, + }, + EmptyCustomerActivity { + cluster_id: ClusterId, + era_id: DdcEra, + validator: T::AccountId, + }, + BatchIndexConversionFailed { + cluster_id: ClusterId, + era_id: DdcEra, + validator: T::AccountId, + }, + NoAvailableSigner { + validator: T::AccountId, + }, + NotEnoughDACNodes { + num_nodes: u16, + validator: T::AccountId, + }, + FailedToCreateMerkleRoot { + cluster_id: ClusterId, + era_id: DdcEra, + validator: T::AccountId, + }, + FailedToCreateMerkleProof { + cluster_id: ClusterId, + era_id: DdcEra, + validator: T::AccountId, + }, + FailedToFetchCurrentValidator { + validator: T::AccountId, + }, + FailedToFetchNodeProvider { + validator: T::AccountId, + }, + } + + /// Consensus Errors + #[derive(Debug, Encode, Decode, Clone, TypeInfo, PartialEq)] + pub enum OCWError { + /// Not enough nodes for consensus. + NotEnoughNodesForConsensus { + cluster_id: ClusterId, + era_id: DdcEra, + id: ActivityHash, + }, + /// No activity in consensus. + ActivityNotInConsensus { + cluster_id: ClusterId, + era_id: DdcEra, + id: ActivityHash, + }, + /// Node Usage Retrieval Error. + NodeUsageRetrievalError { + cluster_id: ClusterId, + era_id: DdcEra, + node_pub_key: NodePubKey, + }, + /// Customer Usage Retrieval Error. + CustomerUsageRetrievalError { + cluster_id: ClusterId, + era_id: DdcEra, + node_pub_key: NodePubKey, + }, + EraRetrievalError { + cluster_id: ClusterId, + node_pub_key: NodePubKey, + }, + PrepareEraTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + payers_merkle_root_hash: ActivityHash, + payees_merkle_root_hash: ActivityHash, + }, + BeginBillingReportTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + }, + BeginChargingCustomersTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + }, + SendChargingCustomersBatchTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + batch_index: BatchIndex, + }, + SendRewardingProvidersBatchTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + batch_index: BatchIndex, + }, + EndChargingCustomersTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + }, + BeginRewardingProvidersTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + }, + EndRewardingProvidersTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + }, + EndBillingReportTransactionError { + cluster_id: ClusterId, + era_id: DdcEra, + }, + BillingReportDoesNotExist { + cluster_id: ClusterId, + era_id: DdcEra, + }, + EmptyCustomerActivity { + cluster_id: ClusterId, + era_id: DdcEra, + }, + BatchIndexConversionFailed { + cluster_id: ClusterId, + era_id: DdcEra, + }, + NoAvailableSigner, + NotEnoughDACNodes { + num_nodes: u16, + }, + FailedToCreateMerkleRoot { + cluster_id: ClusterId, + era_id: DdcEra, + }, + FailedToCreateMerkleProof { + cluster_id: ClusterId, + era_id: DdcEra, + }, + FailedToFetchCurrentValidator, + FailedToFetchNodeProvider, + } + + #[pallet::error] + #[derive(PartialEq)] + pub enum Error { + /// Bad verification key. + BadVerificationKey, + /// Bad requests. + BadRequest, + /// Not a validator. + Unauthorised, + /// Already signed era. + AlreadySignedEra, + NotExpectedState, + /// Already signed payout batch. + AlreadySignedPayoutBatch, + /// Node Retrieval Error. + NodeRetrievalError, + /// Cluster To Validate Retrieval Error. + ClusterToValidateRetrievalError, + /// Era To Validate Retrieval Error. + EraToValidateRetrievalError, + /// Era Per Node Retrieval Error. + EraPerNodeRetrievalError, + /// Fail to fetch Ids. + FailToFetchIds, + /// No validator exists. + NoValidatorExist, + /// Not a controller. + NotController, + /// Not a validator stash. + NotValidatorStash, + /// DDC Validator Key Not Registered + DDCValidatorKeyNotRegistered, + TransactionSubmissionError, + NoAvailableSigner, + /// Fail to generate proof + FailToGenerateProof, + /// Fail to verify merkle proof + FailToVerifyMerkleProof, + /// No Era Validation exist + NoEraValidation, + } + + /// Era validations + #[pallet::storage] + #[pallet::getter(fn era_validations)] + pub type EraValidations = StorageDoubleMap< + _, + Blake2_128Concat, + ClusterId, + Blake2_128Concat, + DdcEra, + EraValidation, + >; + + /// Cluster id storage + #[pallet::storage] + #[pallet::getter(fn cluster_to_validate)] + pub type ClusterToValidate = StorageValue<_, ClusterId>; + + /// List of validators. + #[pallet::storage] + #[pallet::getter(fn validator_set)] + pub type ValidatorSet = StorageValue<_, Vec, ValueQuery>; + + /// Validator stash key mapping + #[pallet::storage] + #[pallet::getter(fn get_stash_for_ddc_validator)] + pub type ValidatorToStashKey = StorageMap<_, Identity, T::AccountId, T::AccountId>; + #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo, PartialEq)] + pub enum EraValidationStatus { + ValidatingData, + ReadyForPayout, + PayoutInProgress, + PayoutFailed, + PayoutSuccess, + } + + #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo, PartialEq)] + #[scale_info(skip_type_params(T))] + pub struct EraValidation { + pub validators: BTreeMap<(ActivityHash, ActivityHash), Vec>, /* todo! change to signatures (T::AccountId, Signature) */ + pub start_era: i64, + pub end_era: i64, + pub payers_merkle_root_hash: ActivityHash, + pub payees_merkle_root_hash: ActivityHash, + pub status: EraValidationStatus, + } + + /// Era activity of a node. + #[derive( + Debug, + Serialize, + Deserialize, + Clone, + Hash, + Ord, + PartialOrd, + PartialEq, + Eq, + TypeInfo, + Encode, + Decode, + )] + pub struct EraActivity { + /// Era id. + pub id: DdcEra, + pub start: i64, + pub end: i64, + } + + pub struct CustomerBatch { + pub(crate) batch_index: BatchIndex, + pub(crate) payers: Vec<(T::AccountId, BucketId, CustomerUsage)>, + pub(crate) batch_proof: MMRProof, + } + + pub struct ProviderBatch { + pub(crate) batch_index: BatchIndex, + pub(crate) payees: Vec<(T::AccountId, NodeUsage)>, + pub(crate) batch_proof: MMRProof, + } + + /// Node activity of a node. + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub(crate) struct NodeActivity { + /// Node id. + pub(crate) node_id: String, + /// Total amount of stored bytes. + pub(crate) stored_bytes: u64, + /// Total amount of transferred bytes. + pub(crate) transferred_bytes: u64, + /// Total number of puts. + pub(crate) number_of_puts: u64, + /// Total number of gets. + pub(crate) number_of_gets: u64, + } + + /// Customer Activity of a bucket. + #[derive( + Debug, Serialize, Deserialize, Clone, Hash, Ord, PartialOrd, PartialEq, Eq, Encode, Decode, + )] + pub(crate) struct CustomerActivity { + /// Customer id. + pub(crate) customer_id: String, + /// Bucket id + pub(crate) bucket_id: BucketId, + /// Total amount of stored bytes. + pub(crate) stored_bytes: u64, + /// Total amount of transferred bytes. + pub(crate) transferred_bytes: u64, + /// Total number of puts. + pub(crate) number_of_puts: u64, + /// Total number of gets. + pub(crate) number_of_gets: u64, + } + + // Define a common trait + pub trait Activity: + Clone + Ord + PartialEq + Eq + Serialize + for<'de> Deserialize<'de> + { + fn get_consensus_id(&self) -> ActivityHash; + fn hash(&self) -> ActivityHash; + } + + impl Activity for NodeActivity { + fn get_consensus_id(&self) -> ActivityHash { + T::ActivityHasher::hash(self.node_id.as_bytes()).into() + } + + fn hash(&self) -> ActivityHash { + T::ActivityHasher::hash(&self.encode()).into() + } + } + impl Activity for CustomerActivity { + fn get_consensus_id(&self) -> ActivityHash { + let mut data = self.customer_id.as_bytes().to_vec(); + data.extend_from_slice(&self.bucket_id.encode()); + T::ActivityHasher::hash(&data).into() + } + + fn hash(&self) -> ActivityHash { + T::ActivityHasher::hash(&self.encode()).into() + } + } + + /// Unwrap or send an error log + macro_rules! unwrap_or_log_error { + ($result:expr, $error_msg:expr) => { + match $result { + Ok(val) => val, + Err(err) => { + log::error!("{}: {:?}", $error_msg, err); + return; + }, + } + }; + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn offchain_worker(block_number: BlockNumberFor) { + if !sp_io::offchain::is_validator() { + return; + } + + let signer = Signer::::any_account(); + if !signer.can_sign() { + log::error!("🚨No OCW is available."); + return; + } + + if Self::fetch_current_validator().is_err() { + let _ = signer.send_signed_transaction(|account| { + log::info!("🏭📋‍ Setting current validator... {:?}", account.id); // todo! consistent emojis in logs with 2 icons, one for OCW 🏭, other is + // log📋/error❌ + + Self::store_current_validator(account.id.encode()); + + Call::set_current_validator {} + }); + } + if (block_number.saturated_into::() % T::BLOCK_TO_START as u32) != 0 { + return; + } + + log::info!("👋 Hello from pallet-ddc-verification."); + + let cluster_id = unwrap_or_log_error!( + Self::get_cluster_to_validate(), + "🏭❌ Error retrieving cluster to validate" + ); + + let dac_nodes = unwrap_or_log_error!( + Self::get_dac_nodes(&cluster_id), + "🏭❌ Error retrieving dac nodes to validate" + ); + + let min_nodes = T::MIN_DAC_NODES_FOR_CONSENSUS; + let batch_size = T::MAX_PAYOUT_BATCH_SIZE; + let mut errors: Vec = Vec::new(); + + let processed_dac_data = + Self::process_dac_data(&cluster_id, None, &dac_nodes, min_nodes, batch_size.into()); + + match processed_dac_data { + Ok(Some((era_activity, payers_merkle_root_hash, payees_merkle_root_hash))) => { + log::info!( + "🏭🚀 Processing era_id: {:?} for cluster_id: {:?}", + era_activity.clone(), + cluster_id + ); + + let results = signer.send_signed_transaction(|_account| { + Call::set_prepare_era_for_payout { + cluster_id, + era_activity: era_activity.clone(), + payers_merkle_root_hash, + payees_merkle_root_hash, + } + }); + + for (_, res) in &results { + match res { + Ok(()) => { + log::info!( + "🏭⛳️ Merkle roots posted on-chain for cluster_id: {:?}, era: {:?}", + cluster_id, + era_activity.clone() + ); + }, + Err(e) => { + log::error!( + "🏭❌ Error to post merkle roots on-chain for cluster_id: {:?}, era: {:?}: {:?}", + cluster_id, + era_activity.clone(), + e + ); + // Extrinsic call failed + errors.push(OCWError::PrepareEraTransactionError { + cluster_id, + era_id: era_activity.id, + payers_merkle_root_hash, + payees_merkle_root_hash, + }); + }, + } + } + }, + Ok(None) => { + log::info!("🏭ℹ️ No eras for DAC process for cluster_id: {:?}", cluster_id); + }, + Err(process_errors) => { + errors.extend(process_errors); + }, + }; + + // todo! factor out as macro as this is repetitive + match Self::prepare_begin_billing_report(&cluster_id) { + Ok(Some((era_id, start_era, end_era))) => { + log::info!( + "🏭🚀 process_start_payout processed successfully for cluster_id: {:?}, era_id: {:?}, start_era: {:?}, end_era: {:?} ", + cluster_id, + era_id, + start_era, + end_era + ); + let results = signer.send_signed_transaction(|_account| { + Call::begin_billing_report { cluster_id, era_id, start_era, end_era } + }); + + for (_, res) in &results { + match res { + Ok(()) => { + log::info!( + "🏭🏄‍ Sent begin_billing_report successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + }, + Err(e) => { + log::error!( + "🏭❌ Error to post begin_billing_report for cluster_id: {:?}, era_id: {:?}: {:?}", + cluster_id, + era_id, + e + ); + // Extrinsic call failed + errors.push(OCWError::BeginBillingReportTransactionError { + cluster_id, + era_id, + }); + }, + } + } + }, + Ok(None) => { + log::info!("🏭❌ No era for payout for cluster_id: {:?}", cluster_id); + }, + Err(e) => { + errors.push(e); + }, + } + + // todo! factor out as macro as this is repetitive + match Self::prepare_begin_charging_customers( + &cluster_id, + &dac_nodes, + min_nodes, + batch_size.into(), + ) { + Ok(Some((era_id, max_batch_index))) => { + log::info!( + "🏭🎁 prepare_begin_charging_customers processed successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + + if let Some((_, res)) = signer.send_signed_transaction(|_acc| { + Call::begin_charging_customers { cluster_id, era_id, max_batch_index } + }) { + match res { + Ok(_) => { + // Extrinsic call succeeded + log::info!( + "🏭🚀 Sent begin_charging_customers successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + }, + Err(e) => { + log::error!( + "🏭❌ Error to post begin_charging_customers for cluster_id: {:?}, era_id: {:?}: {:?}", + cluster_id, + era_id, + e + ); + // Extrinsic call failed + errors.push(OCWError::BeginChargingCustomersTransactionError { + cluster_id, + era_id, + }); + }, + } + } else { + log::error!("🏭❌ No account available to sign the transaction"); + errors.push(OCWError::NoAvailableSigner); + } + }, + Ok(None) => { + log::error!( + "🏭🦀 No era for begin_charging_customers for cluster_id: {:?}", + cluster_id + ); + }, + Err(e) => errors.extend(e), + } + + // todo! factor out as macro as this is repetitive + match Self::prepare_send_charging_customers_batch( + &cluster_id, + batch_size.into(), + &dac_nodes, + min_nodes, + ) { + Ok(Some((era_id, batch_payout))) => { + log::info!( + "🏭🎁 prepare_send_charging_customers_batch processed successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + + if let Some((_, res)) = + signer.send_signed_transaction(|_acc| Call::send_charging_customers_batch { + cluster_id, + era_id, + batch_index: batch_payout.batch_index, + payers: batch_payout.payers.clone(), + batch_proof: batch_payout.batch_proof.clone(), + }) { + match res { + Ok(_) => { + // Extrinsic call succeeded + log::info!( + "🏭🚀 Sent send_charging_customers_batch successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + }, + Err(e) => { + log::error!( + "🏭❌ Error to post send_charging_customers_batch for cluster_id: {:?}, era_id: {:?}: {:?}", + cluster_id, + era_id, + e + ); + // Extrinsic call failed + errors.push(OCWError::SendChargingCustomersBatchTransactionError { + cluster_id, + era_id, + batch_index: batch_payout.batch_index, + }); + }, + } + } else { + log::error!("🏭❌ No account available to sign the transaction"); + errors.push(OCWError::NoAvailableSigner); + } + }, + Ok(None) => { + log::info!( + "🏭🦀 No era for send_charging_customers_batch for cluster_id: {:?}", + cluster_id + ); + }, + Err(e) => { + errors.extend(e); + }, + } + + // todo! factor out as macro as this is repetitive + match Self::prepare_end_charging_customers(&cluster_id) { + Ok(Some(era_id)) => { + log::info!( + "🏭📝prepare_end_charging_customers processed successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + + if let Some((_, res)) = signer.send_signed_transaction(|_acc| { + Call::end_charging_customers { cluster_id, era_id } + }) { + match res { + Ok(_) => { + // Extrinsic call succeeded + log::info!( + "🏭📝Sent end_charging_customers successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + }, + Err(e) => { + log::error!( + "🏭❌Error to post end_charging_customers for cluster_id: {:?}, era_id: {:?}: {:?}", + cluster_id, + era_id, + e + ); + // Extrinsic call failed + errors.push(OCWError::EndChargingCustomersTransactionError { + cluster_id, + era_id, + }); + }, + } + } else { + log::error!("🏭❌No account available to sign the transaction"); + errors.push(OCWError::NoAvailableSigner); + } + }, + Ok(None) => { + log::info!( + "🏭📝No era for end_charging_customers for cluster_id: {:?}", + cluster_id + ); + }, + Err(e) => { + errors.push(e); + }, + } + + // todo! factor out as macro as this is repetitive + match Self::prepare_begin_rewarding_providers( + &cluster_id, + &dac_nodes, + min_nodes, + batch_size.into(), + ) { + Ok(Some((era_id, max_batch_index, total_node_usage))) => { + log::info!( + "🏭📝prepare_begin_rewarding_providers processed successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + + if let Some((_, res)) = + signer.send_signed_transaction(|_acc| Call::begin_rewarding_providers { + cluster_id, + era_id, + max_batch_index, + total_node_usage: total_node_usage.clone(), + }) { + match res { + Ok(_) => { + // Extrinsic call succeeded + log::info!( + "🏭📝Sent begin_rewarding_providers successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + }, + Err(e) => { + log::error!( + "🏭❌Error to post begin_rewarding_providers for cluster_id: {:?}, era_id: {:?}: {:?}", + cluster_id, + era_id, + e + ); + // Extrinsic call failed + errors.push(OCWError::BeginRewardingProvidersTransactionError { + cluster_id, + era_id, + }); + }, + } + } else { + log::error!("🏭❌No account available to sign the transaction"); + errors.push(OCWError::NoAvailableSigner); + } + }, + Ok(None) => { + log::info!( + "🏭📝No era for begin_rewarding_providers for cluster_id: {:?}", + cluster_id + ); + }, + Err(e) => { + errors.extend(e); + }, + } + + // todo! factor out as macro as this is repetitive + match Self::prepare_send_rewarding_providers_batch( + &cluster_id, + batch_size.into(), + &dac_nodes, + min_nodes, + ) { + Ok(Some((era_id, batch_payout))) => { + log::info!( + "🎁 prepare_send_rewarding_providers_batch processed successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + + if let Some((_, res)) = signer.send_signed_transaction(|_acc| { + Call::send_rewarding_providers_batch { + cluster_id, + era_id, + batch_index: batch_payout.batch_index, + payees: batch_payout.payees.clone(), + batch_proof: batch_payout.batch_proof.clone(), + } + }) { + match res { + Ok(_) => { + // Extrinsic call succeeded + log::info!( + "🚀 Sent send_rewarding_providers_batch successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + }, + Err(e) => { + log::error!( + "🦀 Error to post send_rewarding_providers_batch for cluster_id: {:?}, era_id: {:?}: {:?}", + cluster_id, + era_id, + e + ); + // Extrinsic call failed + errors.push( + OCWError::SendRewardingProvidersBatchTransactionError { + cluster_id, + era_id, + batch_index: batch_payout.batch_index, + }, + ); + }, + } + } else { + log::error!("🦀 No account available to sign the transaction"); + errors.push(OCWError::NoAvailableSigner); + } + }, + Ok(None) => { + log::info!( + "🦀 No era for send_rewarding_providers_batch for cluster_id: {:?}", + cluster_id + ); + }, + Err(e) => { + errors.extend(e); + }, + } + + // todo! factor out as macro as this is repetitive + match Self::prepare_end_rewarding_providers(&cluster_id) { + Ok(Some(era_id)) => { + log::info!( + "🏭📝prepare_end_rewarding_providers processed successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + + if let Some((_, res)) = signer.send_signed_transaction(|_acc| { + Call::end_rewarding_providers { cluster_id, era_id } + }) { + match res { + Ok(_) => { + // Extrinsic call succeeded + log::info!( + "🏭📝Sent end_rewarding_providers successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + }, + Err(e) => { + log::error!( + "🏭❌Error to post end_rewarding_providers for cluster_id: {:?}, era_id: {:?}: {:?}", + cluster_id, + era_id, + e + ); + // Extrinsic call failed + errors.push(OCWError::EndRewardingProvidersTransactionError { + cluster_id, + era_id, + }); + }, + } + } else { + log::error!("🏭❌No account available to sign the transaction"); + errors.push(OCWError::NoAvailableSigner); + } + }, + Ok(None) => { + log::info!( + "🏭📝No era for end_rewarding_providers for cluster_id: {:?}", + cluster_id + ); + }, + Err(e) => { + errors.push(e); + }, + } + + // todo! factor out as macro as this is repetitive + match Self::prepare_end_billing_report(&cluster_id) { + Ok(Some(era_id)) => { + log::info!( + "🏭📝prepare_end_billing_report processed successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + + if let Some((_, res)) = signer.send_signed_transaction(|_acc| { + Call::end_billing_report { cluster_id, era_id } + }) { + match res { + Ok(_) => { + // Extrinsic call succeeded + log::info!( + "🏭📝Sent end_billing_report successfully for cluster_id: {:?}, era_id: {:?}", + cluster_id, + era_id + ); + }, + Err(e) => { + log::error!( + "🏭❌Error to post end_billing_report for cluster_id: {:?}, era_id: {:?}: {:?}", + cluster_id, + era_id, + e + ); + // Extrinsic call failed + errors.push(OCWError::EndBillingReportTransactionError { + cluster_id, + era_id, + }); + }, + } + } else { + log::error!("🏭❌No account available to sign the transaction"); + errors.push(OCWError::NoAvailableSigner); + } + }, + Ok(None) => { + log::info!( + "🏭📝No era for end_billing_report for cluster_id: {:?}", + cluster_id + ); + }, + Err(e) => { + errors.push(e); + }, + } + + if !errors.is_empty() { + let results = signer.send_signed_transaction(|_account| { + Call::emit_consensus_errors { errors: errors.clone() } + }); + + for (_, res) in &results { + match res { + Ok(()) => log::info!("✅ Successfully submitted emit_consensus_errors tx"), + Err(_) => log::error!("🏭❌ Failed to submit emit_consensus_errors tx"), + } + } + } + } + } + + impl Pallet { + pub(crate) fn process_dac_data( + cluster_id: &ClusterId, + era_id_to_process: Option, + dac_nodes: &[(NodePubKey, StorageNodeParams)], + min_nodes: u16, + batch_size: usize, + ) -> Result, Vec> { + log::info!("🚀 Processing dac data for cluster_id: {:?}", cluster_id); + if dac_nodes.len().ilog2() < min_nodes.into() { + return Err(vec![OCWError::NotEnoughDACNodes { num_nodes: min_nodes }]); + } + + let era_activity = if let Some(era_activity) = era_id_to_process { + EraActivity { + id: era_activity.id, + start: era_activity.start, + end: era_activity.end, + } + } else { + match Self::get_era_for_validation(cluster_id, dac_nodes) { + Ok(Some(era_activity)) => era_activity, + Ok(None) => return Ok(None), + Err(err) => return Err(vec![err]), + } + }; + + let nodes_usage = + Self::fetch_nodes_usage_for_era(cluster_id, era_activity.id, dac_nodes) + .map_err(|err| vec![err])?; + let customers_usage = + Self::fetch_customers_usage_for_era(cluster_id, era_activity.id, dac_nodes) + .map_err(|err| vec![err])?; + + let customers_activity_in_consensus = Self::get_consensus_for_activities( + cluster_id, + era_activity.id, + &customers_usage, + min_nodes, + Percent::from_percent(T::MAJORITY), + )?; + + log::info!("🪅 customers_activity_in_consensus executed successfully. "); + let customers_activity_batch_roots = Self::convert_to_batch_merkle_roots( + cluster_id, + era_activity.id, + Self::split_to_batches(&customers_activity_in_consensus, batch_size), + ) + .map_err(|err| vec![err])?; + + let customers_activity_root = Self::create_merkle_root( + cluster_id, + era_activity.id, + &customers_activity_batch_roots, + ) + .map_err(|err| vec![err])?; + + let nodes_activity_in_consensus = Self::get_consensus_for_activities( + cluster_id, + era_activity.id, + &nodes_usage, + min_nodes, + Percent::from_percent(T::MAJORITY), + )?; + + log::info!("🪅 nodes_activity_in_consensus executed successfully. "); + let nodes_activity_batch_roots = Self::convert_to_batch_merkle_roots( + cluster_id, + era_activity.id, + Self::split_to_batches(&nodes_activity_in_consensus, batch_size), + ) + .map_err(|err| vec![err])?; + + let nodes_activity_root = + Self::create_merkle_root(cluster_id, era_activity.id, &nodes_activity_batch_roots) + .map_err(|err| vec![err])?; + + Self::store_validation_activities( + cluster_id, + era_activity.id, + &customers_activity_in_consensus, + customers_activity_root, + &customers_activity_batch_roots, + &nodes_activity_in_consensus, + nodes_activity_root, + &nodes_activity_batch_roots, + ); + log::info!("🙇‍ Dac data processing completed for cluster_id: {:?}", cluster_id); + Ok(Some((era_activity, customers_activity_root, nodes_activity_root))) + } + + #[allow(dead_code)] + pub(crate) fn prepare_begin_billing_report( + cluster_id: &ClusterId, + ) -> Result, OCWError> { + Ok(Self::get_era_for_payout(cluster_id, EraValidationStatus::ReadyForPayout)) + // todo! get start and end values based on result + } + + pub(crate) fn prepare_begin_charging_customers( + cluster_id: &ClusterId, + dac_nodes: &[(NodePubKey, StorageNodeParams)], + min_nodes: u16, + batch_size: usize, + ) -> Result, Vec> { + if let Some((era_id, start, end)) = + Self::get_era_for_payout(cluster_id, EraValidationStatus::PayoutInProgress) + { + if T::PayoutVisitor::get_billing_report_status(cluster_id, era_id) == + PayoutState::Initialized + { + if let Some((_, _, customers_activity_batch_roots, _, _, _)) = + Self::fetch_validation_activities::( + cluster_id, era_id, + ) { + Self::fetch_customer_activity( + cluster_id, + era_id, + customers_activity_batch_roots, + ) + } else { + let era_activity = EraActivity { id: era_id, start, end }; + + let _ = Self::process_dac_data( + cluster_id, + Some(era_activity), + dac_nodes, + min_nodes, + batch_size, + )?; + + if let Some((_, _, customers_activity_batch_roots, _, _, _)) = + Self::fetch_validation_activities::( + cluster_id, era_id, + ) { + Self::fetch_customer_activity( + cluster_id, + era_id, + customers_activity_batch_roots, + ) + } else { + Ok(None) + } + } + } else { + Ok(None) + } + } else { + Ok(None) + } + } + + pub(crate) fn fetch_customer_activity( + cluster_id: &ClusterId, + era_id: DdcEra, + customers_activity_batch_roots: Vec, + ) -> Result, Vec> { + if let Some(max_batch_index) = customers_activity_batch_roots.len().checked_sub(1) + // -1 cause payout expects max_index, not length + { + let max_batch_index: u16 = max_batch_index.try_into().map_err(|_| { + vec![OCWError::BatchIndexConversionFailed { cluster_id: *cluster_id, era_id }] + })?; + Ok(Some((era_id, max_batch_index))) + } else { + Err(vec![OCWError::EmptyCustomerActivity { cluster_id: *cluster_id, era_id }]) + } + } + + pub(crate) fn prepare_send_charging_customers_batch( + cluster_id: &ClusterId, + batch_size: usize, + dac_nodes: &[(NodePubKey, StorageNodeParams)], + min_nodes: u16, + ) -> Result)>, Vec> { + if let Some((era_id, start, end)) = + Self::get_era_for_payout(cluster_id, EraValidationStatus::PayoutInProgress) + { + if T::PayoutVisitor::get_billing_report_status(cluster_id, era_id) == + PayoutState::ChargingCustomers + { + if let Some(( + customers_activity_in_consensus, + _, + customers_activity_batch_roots, + _, + _, + _, + )) = Self::fetch_validation_activities::( + cluster_id, era_id, + ) { + Self::fetch_charging_activities( + cluster_id, + batch_size, + era_id, + customers_activity_in_consensus, + customers_activity_batch_roots, + ) + } else { + let era_activity = EraActivity { id: era_id, start, end }; + + let _ = Self::process_dac_data( + cluster_id, + Some(era_activity), + dac_nodes, + min_nodes, + batch_size, + )?; + + if let Some(( + customers_activity_in_consensus, + _, + customers_activity_batch_roots, + _, + _, + _, + )) = Self::fetch_validation_activities::( + cluster_id, era_id, + ) { + Self::fetch_charging_activities( + cluster_id, + batch_size, + era_id, + customers_activity_in_consensus, + customers_activity_batch_roots, + ) + } else { + Ok(None) + } + } + } else { + Ok(None) + } + } else { + Ok(None) + } + } + + fn fetch_charging_activities( + cluster_id: &ClusterId, + batch_size: usize, + era_id: DdcEra, + customers_activity_in_consensus: Vec, + customers_activity_batch_roots: Vec, + ) -> Result)>, Vec> { + let batch_index = T::PayoutVisitor::get_next_customer_batch_for_payment( + cluster_id, era_id, + ) + .map_err(|_| { + vec![OCWError::BillingReportDoesNotExist { cluster_id: *cluster_id, era_id }] + })?; + + if let Some(index) = batch_index { + let i: usize = index.into(); + // todo! store batched activity to avoid splitting it again each time + let customers_activity_batched = + Self::split_to_batches(&customers_activity_in_consensus, batch_size); + + let batch_root = customers_activity_batch_roots[i]; + let store = MemStore::default(); + let mut mmr: MMR> = + MemMMR::<_, MergeActivityHash>::new(0, &store); + + let leaf_position_map: Vec<(ActivityHash, u64)> = customers_activity_batch_roots + .iter() + .map(|a| (*a, mmr.push(*a).unwrap())) + .collect(); + + let leaf_position: Vec<(u64, ActivityHash)> = leaf_position_map + .iter() + .filter(|&(l, _)| l == &batch_root) + .map(|&(ref l, p)| (p, *l)) + .collect(); + let position: Vec = + leaf_position.clone().into_iter().map(|(p, _)| p).collect(); + + let proof = mmr + .gen_proof(position) + .map_err(|_| OCWError::FailedToCreateMerkleProof { + cluster_id: *cluster_id, + era_id, + }) + .map_err(|e| vec![e])? + .proof_items() + .to_vec(); + + let batch_proof = MMRProof { + mmr_size: mmr.mmr_size(), + proof, + leaf_with_position: leaf_position[0], + }; + Ok(Some(( + era_id, + CustomerBatch { + batch_index: index, + payers: customers_activity_batched[i] + .iter() + .map(|activity| { + let account_id = + T::AccountId::decode(&mut &activity.customer_id.as_bytes()[..]) + .unwrap(); + let customer_usage = CustomerUsage { + transferred_bytes: activity.transferred_bytes, + stored_bytes: activity.stored_bytes, + number_of_puts: activity.number_of_puts, + number_of_gets: activity.number_of_gets, + }; + (account_id, activity.bucket_id, customer_usage) + }) + .collect(), + batch_proof, + }, + ))) + } else { + Ok(None) + } + } + + pub(crate) fn prepare_end_charging_customers( + cluster_id: &ClusterId, + ) -> Result, OCWError> { + if let Some((era_id, _start, _end)) = + Self::get_era_for_payout(cluster_id, EraValidationStatus::PayoutInProgress) + { + if T::PayoutVisitor::get_billing_report_status(cluster_id, era_id) == + PayoutState::ChargingCustomers && + T::PayoutVisitor::all_customer_batches_processed(cluster_id, era_id) + { + return Ok(Some(era_id)); + } + } + Ok(None) + } + + pub(crate) fn prepare_begin_rewarding_providers( + cluster_id: &ClusterId, + dac_nodes: &[(NodePubKey, StorageNodeParams)], + min_nodes: u16, + batch_size: usize, + ) -> Result, Vec> { + if let Some((era_id, start, end)) = + Self::get_era_for_payout(cluster_id, EraValidationStatus::PayoutInProgress) + { + if T::PayoutVisitor::get_billing_report_status(cluster_id, era_id) == + PayoutState::CustomersChargedWithFees + { + if let Some(( + _, + _, + _, + nodes_activity_in_consensus, + _, + nodes_activity_batch_roots, + )) = Self::fetch_validation_activities::( + cluster_id, era_id, + ) { + Self::fetch_reward_activities( + cluster_id, + era_id, + nodes_activity_in_consensus, + nodes_activity_batch_roots, + ) + } else { + let era_activity = EraActivity { id: era_id, start, end }; + + let _ = Self::process_dac_data( + cluster_id, + Some(era_activity), + dac_nodes, + min_nodes, + batch_size, + )?; + + if let Some(( + _, + _, + _, + nodes_activity_in_consensus, + _, + nodes_activity_batch_roots, + )) = Self::fetch_validation_activities::( + cluster_id, era_id, + ) { + Self::fetch_reward_activities( + cluster_id, + era_id, + nodes_activity_in_consensus, + nodes_activity_batch_roots, + ) + } else { + Ok(None) + } + } + } else { + Ok(None) + } + } else { + Ok(None) + } + } + + fn fetch_reward_activities( + cluster_id: &ClusterId, + era_id: DdcEra, + nodes_activity_in_consensus: Vec, + nodes_activity_batch_roots: Vec, + ) -> Result, Vec> { + if let Some(max_batch_index) = nodes_activity_batch_roots.len().checked_sub(1) + // -1 cause payout expects max_index, not length + { + let max_batch_index: u16 = max_batch_index.try_into().map_err(|_| { + vec![OCWError::BatchIndexConversionFailed { cluster_id: *cluster_id, era_id }] + })?; + + let total_node_usage = nodes_activity_in_consensus.into_iter().fold( + NodeUsage { + transferred_bytes: 0, + stored_bytes: 0, + number_of_puts: 0, + number_of_gets: 0, + }, + |mut acc, activity| { + acc.transferred_bytes += activity.transferred_bytes; + acc.stored_bytes += activity.stored_bytes; + acc.number_of_puts += activity.number_of_puts; + acc.number_of_gets += activity.number_of_gets; + acc + }, + ); + + Ok(Some((era_id, max_batch_index, total_node_usage))) + } else { + Err(vec![OCWError::EmptyCustomerActivity { cluster_id: *cluster_id, era_id }]) + } + } + + pub(crate) fn prepare_send_rewarding_providers_batch( + cluster_id: &ClusterId, + batch_size: usize, + dac_nodes: &[(NodePubKey, StorageNodeParams)], + min_nodes: u16, + ) -> Result)>, Vec> { + if let Some((era_id, start, end)) = + Self::get_era_for_payout(cluster_id, EraValidationStatus::PayoutInProgress) + { + if T::PayoutVisitor::get_billing_report_status(cluster_id, era_id) == + PayoutState::RewardingProviders + { + if let Some(( + _, + _, + _, + nodes_activity_in_consensus, + _, + nodes_activity_batch_roots, + )) = Self::fetch_validation_activities::( + cluster_id, era_id, + ) { + Self::fetch_reward_provider_batch( + cluster_id, + batch_size, + era_id, + nodes_activity_in_consensus, + nodes_activity_batch_roots, + ) + } else { + let era_activity = EraActivity { id: era_id, start, end }; + + let _ = Self::process_dac_data( + cluster_id, + Some(era_activity), + dac_nodes, + min_nodes, + batch_size, + )?; + + if let Some(( + _, + _, + _, + nodes_activity_in_consensus, + _, + nodes_activity_batch_roots, + )) = Self::fetch_validation_activities::( + cluster_id, era_id, + ) { + Self::fetch_reward_provider_batch( + cluster_id, + batch_size, + era_id, + nodes_activity_in_consensus, + nodes_activity_batch_roots, + ) + } else { + Ok(None) + } + } + } else { + Ok(None) + } + } else { + Ok(None) + } + } + + fn fetch_reward_provider_batch( + cluster_id: &ClusterId, + batch_size: usize, + era_id: DdcEra, + nodes_activity_in_consensus: Vec, + nodes_activity_batch_roots: Vec, + ) -> Result)>, Vec> { + let batch_index = T::PayoutVisitor::get_next_provider_batch_for_payment( + cluster_id, era_id, + ) + .map_err(|_| { + vec![OCWError::BillingReportDoesNotExist { cluster_id: *cluster_id, era_id }] + })?; + + if let Some(index) = batch_index { + let i: usize = index.into(); + // todo! store batched activity to avoid splitting it again each time + let nodes_activity_batched = + Self::split_to_batches(&nodes_activity_in_consensus, batch_size); + + let batch_root = nodes_activity_batch_roots[i]; + let store = MemStore::default(); + let mut mmr: MMR> = + MemMMR::<_, MergeActivityHash>::new(0, &store); + + let leaf_position_map: Vec<(ActivityHash, u64)> = nodes_activity_batch_roots + .iter() + .map(|a| (*a, mmr.push(*a).unwrap())) + .collect(); + + let leaf_position: Vec<(u64, ActivityHash)> = leaf_position_map + .iter() + .filter(|&(l, _)| l == &batch_root) + .map(|&(ref l, p)| (p, *l)) + .collect(); + let position: Vec = + leaf_position.clone().into_iter().map(|(p, _)| p).collect(); + + let proof = mmr + .gen_proof(position) + .map_err(|_| { + vec![OCWError::FailedToCreateMerkleProof { + cluster_id: *cluster_id, + era_id, + }] + })? + .proof_items() + .to_vec(); + + // todo! attend [i] through get(i).ok_or() + // todo! attend accountid conversion + let batch_proof = MMRProof { + mmr_size: mmr.mmr_size(), + proof, + leaf_with_position: leaf_position[0], + }; + Ok(Some(( + era_id, + ProviderBatch { + batch_index: index, + payees: nodes_activity_batched[i] + .iter() + .map(|activity| { + let node_id = activity.clone().node_id; + + let provider_id = Self::fetch_provider_id(node_id).unwrap(); // todo! remove unwrap + + let node_usage = NodeUsage { + transferred_bytes: activity.transferred_bytes, + stored_bytes: activity.stored_bytes, + number_of_puts: activity.number_of_puts, + number_of_gets: activity.number_of_gets, + }; + (provider_id, node_usage) + }) + .collect(), + batch_proof, + }, + ))) + } else { + Ok(None) + } + } + + pub(crate) fn prepare_end_rewarding_providers( + cluster_id: &ClusterId, + ) -> Result, OCWError> { + if let Some((era_id, _start, _end)) = + Self::get_era_for_payout(cluster_id, EraValidationStatus::PayoutInProgress) + { + if T::PayoutVisitor::get_billing_report_status(cluster_id, era_id) == + PayoutState::RewardingProviders && + T::PayoutVisitor::all_provider_batches_processed(cluster_id, era_id) + { + return Ok(Some(era_id)); + } + } + Ok(None) + } + + pub(crate) fn prepare_end_billing_report( + cluster_id: &ClusterId, + ) -> Result, OCWError> { + if let Some((era_id, _start, _end)) = + Self::get_era_for_payout(cluster_id, EraValidationStatus::PayoutInProgress) + { + if T::PayoutVisitor::get_billing_report_status(cluster_id, era_id) == + PayoutState::ProvidersRewarded + { + return Ok(Some(era_id)); + } + } + Ok(None) + } + + pub(crate) fn derive_key(cluster_id: &ClusterId, era_id: DdcEra) -> Vec { + format!("offchain::activities::{:?}::{:?}", cluster_id, era_id).into_bytes() + } + + pub(crate) fn store_current_validator(validator: Vec) { + let key = format!("offchain::validator::{:?}", KEY_TYPE).into_bytes(); + sp_io::offchain::local_storage_set(StorageKind::PERSISTENT, &key, &validator); + } + + pub(crate) fn fetch_current_validator() -> Result, OCWError> { + let key = format!("offchain::validator::{:?}", KEY_TYPE).into_bytes(); + + match sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) { + Some(data) => Ok(data), + None => Err(OCWError::FailedToFetchCurrentValidator), + } + } + + #[allow(clippy::too_many_arguments)] // todo! (2) refactor into 2 different methods (for customers and nodes) + use type info for + // derive_key + // todo! introduce new struct for input and remove clippy::type_complexity + pub(crate) fn store_validation_activities( + // todo! (3) add tests + cluster_id: &ClusterId, + era_id: DdcEra, + customers_activity_in_consensus: &[A], + customers_activity_root: ActivityHash, + customers_activity_batch_roots: &[ActivityHash], + nodes_activity_in_consensus: &[B], + nodes_activity_root: ActivityHash, + nodes_activity_batch_roots: &[ActivityHash], + ) { + let key = Self::derive_key(cluster_id, era_id); + let encoded_tuple = ( + customers_activity_in_consensus, + customers_activity_root, + customers_activity_batch_roots, + nodes_activity_in_consensus, + nodes_activity_root, + nodes_activity_batch_roots, + ) + .encode(); + + // Store the serialized data in local offchain storage + sp_io::offchain::local_storage_set(StorageKind::PERSISTENT, &key, &encoded_tuple); + } + + #[allow(clippy::type_complexity)] + pub(crate) fn fetch_validation_activities( + // todo! (4) add tests + // todo! introduce new struct for results and remove clippy::type_complexity + cluster_id: &ClusterId, + era_id: DdcEra, + ) -> Option<( + Vec, + ActivityHash, + Vec, + Vec, + ActivityHash, + Vec, + )> { + let key = Self::derive_key(cluster_id, era_id); + + // Retrieve encoded tuple from local storage + let encoded_tuple = + match sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) { + Some(data) => data, + None => return None, + }; + + // Attempt to decode tuple from bytes + match Decode::decode(&mut &encoded_tuple[..]) { + Ok(( + customers_activity_in_consensus, + customers_activity_root, + customers_activity_batch_roots, + nodes_activity_in_consensus, + nodes_activity_root, + nodes_activity_batch_roots, + )) => Some(( + customers_activity_in_consensus, + customers_activity_root, + customers_activity_batch_roots, + nodes_activity_in_consensus, + nodes_activity_root, + nodes_activity_batch_roots, + )), + Err(err) => { + // Print error message with details of the decoding error + log::error!("🦀Decoding error: {:?}", err); + None + }, + } + } + + pub(crate) fn store_provider_id( + // todo! (3) add tests + node_id: String, + provider_id: A, + ) { + let key = format!("offchain::activities::provider_id::{:?}", node_id).into_bytes(); + let encoded_tuple = provider_id.encode(); + + // Store the serialized data in local offchain storage + sp_io::offchain::local_storage_set(StorageKind::PERSISTENT, &key, &encoded_tuple); + } + + pub(crate) fn fetch_provider_id(node_id: String) -> Option { + let key = format!("offchain::activities::provider_id::{:?}", node_id).into_bytes(); + // Retrieve encoded tuple from local storage + let encoded_tuple = + match sp_io::offchain::local_storage_get(StorageKind::PERSISTENT, &key) { + Some(data) => data, + None => return None, + }; + + match Decode::decode(&mut &encoded_tuple[..]) { + Ok(provider_id) => Some(provider_id), + Err(err) => { + // Print error message with details of the decoding error + log::error!("🦀Decoding error while fetching provider id: {:?}", err); + None + }, + } + } + /// Converts a vector of activity batches into their corresponding Merkle roots. + /// + /// This function takes a vector of activity batches, where each batch is a vector of + /// activities. It computes the Merkle root for each batch by first hashing each activity + /// and then combining these hashes into a single Merkle root. + /// + /// # Input Parameters + /// - `activities: Vec>`: A vector of vectors, where each inner vector represents a + /// batch of activities. + /// + /// # Output + /// - `Vec`: A vector of Merkle roots, one for each batch of activities. + pub(crate) fn convert_to_batch_merkle_roots( + cluster_id: &ClusterId, + era_id: DdcEra, + activities: Vec>, + ) -> Result, OCWError> { + activities + .into_iter() + .map(|inner_vec| { + let activity_hashes: Vec = + inner_vec.into_iter().map(|a| a.hash::()).collect(); + Self::create_merkle_root(cluster_id, era_id, &activity_hashes).map_err(|_| { + OCWError::FailedToCreateMerkleRoot { cluster_id: *cluster_id, era_id } + }) + }) + .collect::, OCWError>>() + } + + /// Splits a slice of activities into batches of a specified size. + /// + /// This function sorts the given activities and splits them into batches of the specified + /// size. Each batch is returned as a separate vector. + /// + /// # Input Parameters + /// - `activities: &[A]`: A slice of activities to be split into batches. + /// - `batch_size: usize`: The size of each batch. + /// + /// # Output + /// - `Vec>`: A vector of vectors, where each inner vector is a batch of activities. + pub(crate) fn split_to_batches( + activities: &[A], + batch_size: usize, + ) -> Vec> { + if activities.is_empty() { + return vec![]; + } + // Sort the activities first + let mut sorted_activities = activities.to_vec(); + sorted_activities.sort(); // Sort using the derived Ord trait + + // Split the sorted activities into chunks and collect them into vectors + sorted_activities.chunks(batch_size).map(|chunk| chunk.to_vec()).collect() + } + + /// Creates a Merkle root from a list of activity hashes. + /// + /// This function takes a slice of `ActivityHash` and constructs a Merkle tree + /// using an in-memory store. It returns a tuple containing the Merkle root hash, + /// the size of the Merkle tree, and a vector mapping each input leaf to its position + /// in the Merkle tree. + /// + /// # Input Parameters + /// + /// * `leaves` - A slice of `ActivityHash` representing the leaves of the Merkle tree. + /// + /// # Output + /// + /// A `Result` containing: + /// * A tuple with the Merkle root `ActivityHash`, the size of the Merkle tree, and a vector + /// mapping each input leaf to its position in the Merkle tree. + /// * `OCWError::FailedToCreateMerkleRoot` if there is an error creating the Merkle root. + pub(crate) fn create_merkle_root( + cluster_id: &ClusterId, + era_id: DdcEra, + leaves: &[ActivityHash], + ) -> Result { + if leaves.is_empty() { + return Ok(ActivityHash::default()); + } + + let store = MemStore::default(); + let mut mmr: MMR> = + MemMMR::<_, MergeActivityHash>::new(0, &store); + + let mut leaves_with_position: Vec<(u64, ActivityHash)> = + Vec::with_capacity(leaves.len()); + + for &leaf in leaves { + match mmr.push(leaf) { + Ok(pos) => leaves_with_position.push((pos, leaf)), + Err(_) => + return Err(OCWError::FailedToCreateMerkleRoot { + cluster_id: *cluster_id, + era_id, + }), + } + } + + mmr.get_root() + .map_err(|_| OCWError::FailedToCreateMerkleRoot { cluster_id: *cluster_id, era_id }) + } + + /// Verify whether leaf is part of tree + /// + /// Parameters: + /// - `root`: merkle root + /// - `leaf`: Leaf of the tree + pub(crate) fn proof_merkle_leaf( + root: ActivityHash, + batch_proof: &MMRProof, + ) -> Result> { + let proof: MerkleProof = + MerkleProof::new(batch_proof.mmr_size, batch_proof.proof.clone()); + proof + .verify(root, vec![batch_proof.leaf_with_position]) + .map_err(|_| Error::::FailToVerifyMerkleProof) + } + + // todo! simplify method by removing start/end from the result + pub(crate) fn get_era_for_payout( + cluster_id: &ClusterId, + status: EraValidationStatus, + ) -> Option<(DdcEra, i64, i64)> { + let mut smallest_era_id: Option = None; + let mut start_era: i64 = Default::default(); + let mut end_era: i64 = Default::default(); + + for (stored_cluster_id, era_id, validation) in EraValidations::::iter() { + if stored_cluster_id == *cluster_id && + validation.status == status && + (smallest_era_id.is_none() || era_id < smallest_era_id.unwrap()) + { + smallest_era_id = Some(era_id); + start_era = validation.start_era; + end_era = validation.end_era; + } + } + + smallest_era_id.map(|era_id| (era_id, start_era, end_era)) + } + + /// Retrieves the last era in which the specified validator participated for a given + /// cluster. + /// + /// This function iterates through all eras in `EraValidations` for the given `cluster_id`, + /// filtering for eras where the specified `validator` is present in the validators list. + /// It returns the maximum era found where the validator participated. + /// + /// # Input Parameters + /// - `cluster_id: &ClusterId`: The ID of the cluster to check for the validator's + /// participation. + /// - `validator: T::AccountId`: The account ID of the validator whose participation is + /// being checked. + /// + /// # Output + /// - `Result, OCWError>`: + /// - `Ok(Some(DdcEra))`: The maximum era in which the validator participated. + /// - `Ok(None)`: The validator did not participate in any era for the given cluster. + /// - `Err(OCWError)`: An error occurred while retrieving the data. + // todo! add tests for start and end era + pub(crate) fn get_last_validated_era( + cluster_id: &ClusterId, + validator: T::AccountId, + ) -> Result, OCWError> { + let mut max_era: Option = None; + + // Iterate through all eras in EraValidations for the given cluster_id + >::iter_prefix(cluster_id) + .filter_map(|(era, validation)| { + // Filter for validators that contain the given validator + if validation + .validators + .values() + .any(|validators| validators.contains(&validator)) + { + Some(era) + } else { + None + } + }) + .for_each(|era| { + // Update max_era to the maximum era found + if let Some(current_max) = max_era { + if era > current_max { + max_era = Some(era); + } + } else { + max_era = Some(era); + } + }); + + Ok(max_era) + } + + /// Fetch current era across all DAC nodes to validate. + /// + /// Parameters: + /// - `cluster_id`: cluster id of a cluster + /// - `dac_nodes`: List of DAC nodes + pub(crate) fn get_era_for_validation( + // todo! this needs to be rewriten - too complex and inefficient + cluster_id: &ClusterId, + dac_nodes: &[(NodePubKey, StorageNodeParams)], + ) -> Result, OCWError> { + let current_validator_data = Self::fetch_current_validator()?; + + let current_validator = T::AccountId::decode(&mut ¤t_validator_data[..]).unwrap(); + + let last_validated_era = Self::get_last_validated_era(cluster_id, current_validator)? + .unwrap_or_else(DdcEra::default); + + let all_ids = Self::fetch_processed_era_for_node(cluster_id, dac_nodes)?; + + let ids_greater_than_last_validated_era: Vec = all_ids + .iter() + .flat_map(|eras| eras.iter().filter(|&ids| ids.id > last_validated_era).cloned()) + .sorted() + .collect::>(); + + let mut grouped_data: Vec<(u32, EraActivity)> = Vec::new(); + for (key, chunk) in + &ids_greater_than_last_validated_era.into_iter().chunk_by(|elt| elt.clone()) + { + grouped_data.push((chunk.count() as u32, key)); + } + + let all_node_eras = grouped_data + .into_iter() + .filter(|(v, _)| *v == dac_nodes.len() as u32) + .map(|(_, id)| id) + .collect::>(); + + Ok(all_node_eras.iter().cloned().min_by_key(|n| n.id)) + } + + /// Determines if a consensus is reached for a set of activities based on a specified + /// threshold. + /// + /// This function counts the occurrences of each activity in the provided list and checks if + /// any activity's count meets or exceeds the given threshold. If such an activity is found, + /// it is returned. + /// + /// # Input Parameters + /// - `activities: &[A]`: A slice of activities to be analyzed for consensus. + /// - `threshold: usize`: The minimum number of occurrences required for an activity to be + /// considered in consensus. + /// + /// # Output + /// - `Option`: + /// - `Some(A)`: An activity that has met or exceeded the threshold. + /// - `None`: No activity met the threshold. + pub(crate) fn reach_consensus( + activities: &[A], + threshold: usize, + ) -> Option { + let mut count_map: BTreeMap = BTreeMap::new(); + + for activity in activities { + *count_map.entry(activity.clone()).or_default() += 1; + } + + count_map + .into_iter() + .find(|&(_, count)| count >= threshold) + .map(|(activity, _)| activity) + } + + /// Computes the consensus for a set of activities across multiple nodes within a given + /// cluster and era. + /// + /// This function collects activities from various nodes, groups them by their consensus ID, + /// and then determines if a consensus is reached for each group based on the minimum number + /// of nodes and a given threshold. If the consensus is reached, the activity is included + /// in the result. Otherwise, appropriate errors are returned. + /// + /// # Input Parameters + /// - `cluster_id: &ClusterId`: The ID of the cluster for which consensus is being computed. + /// - `era_id: DdcEra`: The era ID within the cluster. + /// - `activities: &[(NodePubKey, Vec)]`: A list of tuples, where each tuple contains a + /// node's public key and a vector of activities reported by that node. + /// - `min_nodes: u16`: The minimum number of nodes that must report an activity for it to + /// be considered for consensus. + /// - `threshold: Percent`: The threshold percentage that determines if an activity is in + /// consensus. + /// + /// # Output + /// - `Result, Vec>`: + /// - `Ok(Vec)`: A vector of activities that have reached consensus. + /// - `Err(Vec)`: A vector of errors indicating why consensus was not reached + /// for some activities. + pub(crate) fn get_consensus_for_activities( + cluster_id: &ClusterId, + era_id: DdcEra, + activities: &[(NodePubKey, Vec)], + min_nodes: u16, + threshold: Percent, + ) -> Result, Vec> { + let mut customer_buckets: BTreeMap> = BTreeMap::new(); + + // Flatten and collect all customer activities + for (_node_id, activities) in activities.iter() { + for activity in activities.iter() { + customer_buckets + .entry(activity.get_consensus_id::()) + .or_default() + .push(activity.clone()); + } + } + + let mut consensus_activities = Vec::new(); + let mut errors = Vec::new(); + let min_threshold = threshold * min_nodes; + + // Check if each customer/bucket appears in at least `min_nodes` nodes + for (id, activities) in customer_buckets { + if activities.len() < min_nodes.into() { + errors.push(OCWError::NotEnoughNodesForConsensus { + cluster_id: (*cluster_id), + era_id, + id, + }); + } else if let Some(activity) = + Self::reach_consensus(&activities, min_threshold.into()) + { + consensus_activities.push(activity); + } else { + errors.push(OCWError::ActivityNotInConsensus { + cluster_id: (*cluster_id), + era_id, + id, + }); + } + } + + if errors.is_empty() { + Ok(consensus_activities) + } else { + Err(errors) + } + } + + /// Fetch cluster to validate. + fn get_cluster_to_validate() -> Result> { + // todo! to implement + Self::cluster_to_validate().ok_or(Error::ClusterToValidateRetrievalError) + } + + /// Fetch processed era. + /// + /// Parameters: + /// - `node_params`: DAC node parameters + #[allow(dead_code)] + pub(crate) fn fetch_processed_era( + node_params: &StorageNodeParams, + ) -> Result, http::Error> { + let scheme = if node_params.ssl { "https" } else { "http" }; + let host = str::from_utf8(&node_params.host).map_err(|_| http::Error::Unknown)?; + let url = format!("{}://{}:{}/activity/eras", scheme, host, node_params.http_port); + let request = http::Request::get(&url); + let timeout = sp_io::offchain::timestamp() + .add(sp_runtime::offchain::Duration::from_millis(10000)); + let pending = request.deadline(timeout).send().map_err(|_| http::Error::IoError)?; + + // todo! filter by status == PROCESSED + + let response = + pending.try_wait(timeout).map_err(|_| http::Error::DeadlineReached)??; + if response.code != 200 { + return Err(http::Error::Unknown); + } + + let body = response.body().collect::>(); + + serde_json::from_slice(&body).map_err(|_| http::Error::Unknown) + } + /// Fetch customer usage. + /// + /// Parameters: + /// - `cluster_id`: cluster id of a cluster + /// - `era_id`: era id + /// - `node_params`: DAC node parameters + pub(crate) fn fetch_customers_usage( + _cluster_id: &ClusterId, + era_id: DdcEra, + node_params: &StorageNodeParams, + ) -> Result, http::Error> { + let scheme = if node_params.ssl { "https" } else { "http" }; + let host = str::from_utf8(&node_params.host).map_err(|_| http::Error::Unknown)?; + let url = format!( + "{}://{}:{}/activity/buckets?eraId={}", + scheme, host, node_params.http_port, era_id + ); + + let request = http::Request::get(&url); + let timeout = sp_io::offchain::timestamp() + .add(sp_runtime::offchain::Duration::from_millis(10000)); + let pending = request.deadline(timeout).send().map_err(|_| http::Error::IoError)?; + + let response = + pending.try_wait(timeout).map_err(|_| http::Error::DeadlineReached)??; + if response.code != 200 { + return Err(http::Error::Unknown); + } + + let body = response.body().collect::>(); + serde_json::from_slice(&body).map_err(|_| http::Error::Unknown) + } + + /// Fetch node usage. + /// + /// Parameters: + /// - `cluster_id`: cluster id of a cluster + /// - `era_id`: era id + /// - `node_params`: DAC node parameters + pub(crate) fn fetch_node_usage( + _cluster_id: &ClusterId, + era_id: DdcEra, + node_params: &StorageNodeParams, + ) -> Result, http::Error> { + let scheme = if node_params.ssl { "https" } else { "http" }; + let host = str::from_utf8(&node_params.host).map_err(|_| http::Error::Unknown)?; + let url = format!( + "{}://{}:{}/activity/nodes?eraId={}", + scheme, host, node_params.http_port, era_id + ); + + let request = http::Request::get(&url); + let timeout = + sp_io::offchain::timestamp().add(rt_offchain::Duration::from_millis(10000)); + let pending = request.deadline(timeout).send().map_err(|_| http::Error::IoError)?; + + let response = + pending.try_wait(timeout).map_err(|_| http::Error::DeadlineReached)??; + if response.code != 200 { + return Err(http::Error::Unknown); + } + + let body = response.body().collect::>(); + serde_json::from_slice(&body).map_err(|_| http::Error::Unknown) + } + + /// Fetch DAC nodes of a cluster. + /// Parameters: + /// - `cluster_id`: Cluster id of a cluster. + fn get_dac_nodes( + cluster_id: &ClusterId, + ) -> Result, Error> { + let mut dac_nodes = Vec::new(); + + let nodes = T::ClusterManager::get_nodes(cluster_id) + .map_err(|_| Error::::NodeRetrievalError)?; + + // Iterate over each node + for node_pub_key in nodes { + // Get the node parameters + if let Ok(NodeParams::StorageParams(storage_params)) = + T::NodeVisitor::get_node_params(&node_pub_key) + { + // Check if the mode is StorageNodeMode::DAC + if storage_params.mode == StorageNodeMode::DAC { + // Add to the results if the mode matches + dac_nodes.push((node_pub_key, storage_params)); + } + } + } + + Ok(dac_nodes) + } + + fn get_node_provider_id(node_pub_key: &NodePubKey) -> Result { + T::NodeVisitor::get_node_provider_id(node_pub_key) + .map_err(|_| OCWError::FailedToFetchNodeProvider) + } + + /// Fetch node usage of an era. + /// + /// Parameters: + /// - `cluster_id`: cluster id of a cluster + /// - `era_id`: era id + /// - `node_params`: DAC node parameters + fn fetch_nodes_usage_for_era( + cluster_id: &ClusterId, + era_id: DdcEra, + dac_nodes: &[(NodePubKey, StorageNodeParams)], + ) -> Result)>, OCWError> { + let mut node_usages = Vec::new(); + + for (node_pub_key, node_params) in dac_nodes { + // todo! probably shouldn't stop when some DAC is not responding as we can still + // work with others + let usage = + Self::fetch_node_usage(cluster_id, era_id, node_params).map_err(|_| { + OCWError::NodeUsageRetrievalError { + cluster_id: *cluster_id, + era_id, + node_pub_key: node_pub_key.clone(), + } + })?; + for node_activity in usage.clone() { + let provider_id = Self::get_node_provider_id(node_pub_key).unwrap(); + Self::store_provider_id(node_activity.node_id, provider_id); + } + + node_usages.push((node_pub_key.clone(), usage)); + } + + Ok(node_usages) + } + + /// Fetch customer usage for an era. + /// + /// Parameters: + /// - `cluster_id`: cluster id of a cluster + /// - `era_id`: era id + /// - `node_params`: DAC node parameters + fn fetch_customers_usage_for_era( + cluster_id: &ClusterId, + era_id: DdcEra, + dac_nodes: &[(NodePubKey, StorageNodeParams)], + ) -> Result)>, OCWError> { + let mut customers_usages = Vec::new(); + + for (node_pub_key, node_params) in dac_nodes { + // todo! probably shouldn't stop when some DAC is not responding as we can still + // work with others + let usage = + Self::fetch_customers_usage(cluster_id, era_id, node_params).map_err(|_| { + OCWError::CustomerUsageRetrievalError { + cluster_id: *cluster_id, + era_id, + node_pub_key: node_pub_key.clone(), + } + })?; + + customers_usages.push((node_pub_key.clone(), usage)); + } + + Ok(customers_usages) + } + + /// Fetch processed era for across all nodes. + /// + /// Parameters: + /// - `cluster_id`: Cluster id + /// - `node_params`: DAC node parameters + fn fetch_processed_era_for_node( + cluster_id: &ClusterId, + dac_nodes: &[(NodePubKey, StorageNodeParams)], + ) -> Result>, OCWError> { + let mut eras = Vec::new(); + + for (node_pub_key, node_params) in dac_nodes { + // todo! probably shouldn't stop when some DAC is not responding as we can still + // work with others + + let ids = Self::fetch_processed_era(node_params).map_err(|_| { + OCWError::EraRetrievalError { + cluster_id: *cluster_id, + node_pub_key: node_pub_key.clone(), + } + })?; + + eras.push(ids); + } + + Ok(eras) + } + } + + #[pallet::call] + impl Pallet { + /// Create billing reports from a public origin. + /// + /// The origin must be Signed. + /// + /// Parameters: + /// - `cluster_id`: Cluster id of a cluster. + /// - `era`: Era id. + /// - `payers_merkle_root_hash`: Merkle root hash of payers + /// - `payees_merkle_root_hash`: Merkle root hash of payees + /// + /// Emits `BillingReportCreated` event when successful. + #[pallet::call_index(0)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + pub fn set_prepare_era_for_payout( + origin: OriginFor, + cluster_id: ClusterId, + era_activity: EraActivity, + payers_merkle_root_hash: ActivityHash, + payees_merkle_root_hash: ActivityHash, + ) -> DispatchResult { + let caller = ensure_signed(origin)?; + + ensure!(Self::is_ocw_validator(caller.clone()), Error::::Unauthorised); + let mut era_validation = { + let era_validations = >::get(cluster_id, era_activity.id); + + if era_validations.is_none() { + EraValidation { + payers_merkle_root_hash: ActivityHash::default(), + payees_merkle_root_hash: ActivityHash::default(), + start_era: Default::default(), + end_era: Default::default(), + validators: Default::default(), + status: EraValidationStatus::ValidatingData, + } + } else { + era_validations.unwrap() + } + }; + + // disallow signatures after era status change + ensure!( + era_validation.status == EraValidationStatus::ValidatingData, + Error::::NotExpectedState + ); + + // Ensure the validators entry exists for the specified (payers_merkle_root_hash, + // payees_merkle_root_hash) + let signed_validators = era_validation + .validators + .entry((payers_merkle_root_hash, payees_merkle_root_hash)) + .or_insert_with(Vec::new); + + ensure!(!signed_validators.contains(&caller.clone()), Error::::AlreadySignedEra); + signed_validators.push(caller.clone()); + + let percent = Percent::from_percent(T::MAJORITY); + let threshold = percent * >::get().len(); + + let mut should_deposit_ready_event = false; + if threshold < signed_validators.len() { + // Update payers_merkle_root_hash and payees_merkle_root_hash as ones passed the + // threshold + era_validation.payers_merkle_root_hash = payers_merkle_root_hash; + era_validation.payees_merkle_root_hash = payees_merkle_root_hash; + era_validation.start_era = era_activity.start; // todo! start/end is set by the last validator and is not in consensus + era_validation.end_era = era_activity.end; + + if payers_merkle_root_hash == ActivityHash::default() && + payees_merkle_root_hash == payers_merkle_root_hash + { + era_validation.status = EraValidationStatus::PayoutSuccess; + } else { + era_validation.status = EraValidationStatus::ReadyForPayout; + } + + should_deposit_ready_event = true; + } + + // Update the EraValidations storage + >::insert(cluster_id, era_activity.id, era_validation); + if should_deposit_ready_event { + Self::deposit_event(Event::::EraValidationReady { + cluster_id, + era_id: era_activity.id, + }); + } else { + Self::deposit_event(Event::::EraValidationNotReady { + cluster_id, + era_id: era_activity.id, + }); + } + + Ok(()) + } + + /// Emit consensus errors. + /// + /// The origin must be a validator. + /// + /// Parameters: + /// - errors`: List of consensus errors + /// + /// Emits `NotEnoughNodesForConsensus` OR `ActivityNotInConsensus` event depend of error + /// type, when successful. + #[pallet::call_index(1)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + pub fn emit_consensus_errors( + origin: OriginFor, + errors: Vec, + ) -> DispatchResult { + let caller = ensure_signed(origin)?; + ensure!(Self::is_ocw_validator(caller.clone()), Error::::Unauthorised); + + for error in errors { + match error { + OCWError::NotEnoughNodesForConsensus { cluster_id, era_id, id } => { + Self::deposit_event(Event::NotEnoughNodesForConsensus { + cluster_id, + era_id, + id, + validator: caller.clone(), + }); + }, + OCWError::ActivityNotInConsensus { cluster_id, era_id, id } => { + Self::deposit_event(Event::ActivityNotInConsensus { + cluster_id, + era_id, + id, + validator: caller.clone(), + }); + }, + OCWError::NodeUsageRetrievalError { cluster_id, era_id, node_pub_key } => { + Self::deposit_event(Event::NodeUsageRetrievalError { + cluster_id, + era_id, + node_pub_key, + validator: caller.clone(), + }); + }, + OCWError::CustomerUsageRetrievalError { cluster_id, era_id, node_pub_key } => { + Self::deposit_event(Event::CustomerUsageRetrievalError { + cluster_id, + era_id, + node_pub_key, + validator: caller.clone(), + }); + }, + OCWError::EraRetrievalError { cluster_id, node_pub_key } => { + Self::deposit_event(Event::EraRetrievalError { + cluster_id, + node_pub_key, + validator: caller.clone(), + }); + }, + OCWError::PrepareEraTransactionError { + cluster_id, + era_id, + payers_merkle_root_hash, + payees_merkle_root_hash, + } => { + Self::deposit_event(Event::PrepareEraTransactionError { + cluster_id, + era_id, + payers_merkle_root_hash, + payees_merkle_root_hash, + validator: caller.clone(), + }); + }, + OCWError::BeginBillingReportTransactionError { cluster_id, era_id } => { + Self::deposit_event(Event::BeginBillingReportTransactionError { + cluster_id, + era_id, + validator: caller.clone(), + }); + }, + OCWError::BeginChargingCustomersTransactionError { cluster_id, era_id } => { + Self::deposit_event(Event::BeginChargingCustomersTransactionError { + cluster_id, + era_id, + validator: caller.clone(), + }); + }, + OCWError::SendChargingCustomersBatchTransactionError { + cluster_id, + era_id, + batch_index, + } => { + Self::deposit_event(Event::SendChargingCustomersBatchTransactionError { + cluster_id, + era_id, + batch_index, + validator: caller.clone(), + }); + }, + OCWError::SendRewardingProvidersBatchTransactionError { + cluster_id, + era_id, + batch_index, + } => { + Self::deposit_event(Event::SendRewardingProvidersBatchTransactionError { + cluster_id, + era_id, + batch_index, + validator: caller.clone(), + }); + }, + OCWError::EndChargingCustomersTransactionError { cluster_id, era_id } => { + Self::deposit_event(Event::EndChargingCustomersTransactionError { + cluster_id, + era_id, + validator: caller.clone(), + }); + }, + OCWError::BeginRewardingProvidersTransactionError { cluster_id, era_id } => { + Self::deposit_event(Event::BeginRewardingProvidersTransactionError { + cluster_id, + era_id, + validator: caller.clone(), + }); + }, + OCWError::EndRewardingProvidersTransactionError { cluster_id, era_id } => { + Self::deposit_event(Event::EndRewardingProvidersTransactionError { + cluster_id, + era_id, + validator: caller.clone(), + }); + }, + OCWError::EndBillingReportTransactionError { cluster_id, era_id } => { + Self::deposit_event(Event::EndBillingReportTransactionError { + cluster_id, + era_id, + validator: caller.clone(), + }); + }, + OCWError::BillingReportDoesNotExist { cluster_id, era_id } => { + Self::deposit_event(Event::BillingReportDoesNotExist { + cluster_id, + era_id, + validator: caller.clone(), + }); + }, + OCWError::EmptyCustomerActivity { cluster_id, era_id } => { + Self::deposit_event(Event::EmptyCustomerActivity { + cluster_id, + era_id, + validator: caller.clone(), + }); + }, + OCWError::BatchIndexConversionFailed { cluster_id, era_id } => { + Self::deposit_event(Event::BatchIndexConversionFailed { + cluster_id, + era_id, + validator: caller.clone(), + }); + }, + OCWError::NoAvailableSigner => { + Self::deposit_event(Event::NoAvailableSigner { validator: caller.clone() }); + }, + OCWError::NotEnoughDACNodes { num_nodes } => { + Self::deposit_event(Event::NotEnoughDACNodes { + num_nodes, + validator: caller.clone(), + }); + }, + OCWError::FailedToCreateMerkleRoot { cluster_id, era_id } => { + Self::deposit_event(Event::FailedToCreateMerkleRoot { + cluster_id, + era_id, + validator: caller.clone(), + }); + }, + OCWError::FailedToCreateMerkleProof { cluster_id, era_id } => { + Self::deposit_event(Event::FailedToCreateMerkleProof { + cluster_id, + era_id, + validator: caller.clone(), + }); + }, + OCWError::FailedToFetchCurrentValidator => { + Self::deposit_event(Event::FailedToFetchCurrentValidator { + validator: caller.clone(), + }); + }, + OCWError::FailedToFetchNodeProvider => { + Self::deposit_event(Event::FailedToFetchNodeProvider { + validator: caller.clone(), + }); + }, + } + } + + Ok(()) + } + + /// Set validator key. + /// + /// The origin must be a validator. + /// + /// Parameters: + /// - `ddc_validator_pub`: validator Key + #[pallet::call_index(2)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + pub fn set_validator_key( + origin: OriginFor, + ddc_validator_pub: T::AccountId, + ) -> DispatchResult { + let controller = ensure_signed(origin)?; + + ensure!( + T::StakingVisitor::stash_by_ctrl(&controller).is_ok(), + Error::::NotController + ); + + ensure!( + >::get().contains(&ddc_validator_pub), + Error::::NotValidatorStash + ); + + ValidatorToStashKey::::insert(&ddc_validator_pub, &ddc_validator_pub); + Ok(()) + } + + #[pallet::call_index(3)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + pub fn begin_billing_report( + origin: OriginFor, + cluster_id: ClusterId, + era_id: DdcEra, + start_era: i64, + end_era: i64, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + ensure!(Self::is_ocw_validator(sender.clone()), Error::::Unauthorised); + + T::PayoutVisitor::begin_billing_report(sender, cluster_id, era_id, start_era, end_era)?; + + EraValidations::::try_mutate( + cluster_id, + era_id, + |maybe_era_validations| -> DispatchResult { + maybe_era_validations.as_mut().ok_or(Error::::NoEraValidation)?.status = + EraValidationStatus::PayoutInProgress; + Ok(()) + }, + )?; + + Ok(()) + } + + #[pallet::call_index(4)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + pub fn begin_charging_customers( + origin: OriginFor, + cluster_id: ClusterId, + era_id: DdcEra, + max_batch_index: BatchIndex, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + ensure!(Self::is_ocw_validator(sender.clone()), Error::::Unauthorised); + T::PayoutVisitor::begin_charging_customers(sender, cluster_id, era_id, max_batch_index) + } + + #[pallet::call_index(5)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + // todo! remove clippy::too_many_arguments + pub fn send_charging_customers_batch( + origin: OriginFor, + cluster_id: ClusterId, + era_id: DdcEra, + batch_index: BatchIndex, + payers: Vec<(T::AccountId, BucketId, CustomerUsage)>, + batch_proof: MMRProof, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + ensure!(Self::is_ocw_validator(sender.clone()), Error::::Unauthorised); + T::PayoutVisitor::send_charging_customers_batch( + sender, + cluster_id, + era_id, + batch_index, + &payers, + batch_proof, + ) + } + + #[pallet::call_index(6)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + pub fn end_charging_customers( + origin: OriginFor, + cluster_id: ClusterId, + era_id: DdcEra, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + ensure!(Self::is_ocw_validator(sender.clone()), Error::::Unauthorised); + T::PayoutVisitor::end_charging_customers(sender, cluster_id, era_id) + } + + #[pallet::call_index(7)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + pub fn begin_rewarding_providers( + origin: OriginFor, + cluster_id: ClusterId, + era_id: DdcEra, + max_batch_index: BatchIndex, + total_node_usage: NodeUsage, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + ensure!(Self::is_ocw_validator(sender.clone()), Error::::Unauthorised); + T::PayoutVisitor::begin_rewarding_providers( + sender, + cluster_id, + era_id, + max_batch_index, + total_node_usage, + ) + } + + #[pallet::call_index(8)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + pub fn send_rewarding_providers_batch( + origin: OriginFor, + cluster_id: ClusterId, + era_id: DdcEra, + batch_index: BatchIndex, + payees: Vec<(T::AccountId, NodeUsage)>, + batch_proof: MMRProof, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + ensure!(Self::is_ocw_validator(sender.clone()), Error::::Unauthorised); + T::PayoutVisitor::send_rewarding_providers_batch( + sender, + cluster_id, + era_id, + batch_index, + &payees, + batch_proof, + ) + } + + #[pallet::call_index(9)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + pub fn end_rewarding_providers( + origin: OriginFor, + cluster_id: ClusterId, + era_id: DdcEra, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + ensure!(Self::is_ocw_validator(sender.clone()), Error::::Unauthorised); + T::PayoutVisitor::end_rewarding_providers(sender, cluster_id, era_id) + } + + #[pallet::call_index(10)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + pub fn end_billing_report( + origin: OriginFor, + cluster_id: ClusterId, + era_id: DdcEra, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + ensure!(Self::is_ocw_validator(sender.clone()), Error::::Unauthorised); + T::PayoutVisitor::end_billing_report(sender, cluster_id, era_id)?; + + let mut era_validation = >::get(cluster_id, era_id).unwrap(); // should exist + era_validation.status = EraValidationStatus::PayoutSuccess; + >::insert(cluster_id, era_id, era_validation); + + Ok(()) + } + + // todo! Need to remove this + #[pallet::call_index(11)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + pub fn set_current_validator(origin: OriginFor) -> DispatchResult { + let caller: T::AccountId = ensure_signed(origin)?; + + ensure!(>::get().contains(&caller), Error::::NotValidatorStash); + + if Self::is_ocw_validator(caller.clone()) { + log::info!("🏄‍ is_ocw_validator is a validator {:?}", caller.clone()); + } else { + log::info!("🏄‍ is_ocw_validator is not a validator {:?}", caller.clone()); + } + + Ok(()) + } + + #[pallet::call_index(12)] + #[pallet::weight(::WeightInfo::create_billing_reports())] // todo! implement weights + pub fn set_cluster_to_validate( + origin: OriginFor, + cluster_id: ClusterId, + ) -> DispatchResult { + ensure_root(origin)?; + ClusterToValidate::::put(cluster_id); + + Ok(()) + } + } + + impl ValidatorVisitor for Pallet { + fn setup_validators(validators: Vec) { + ValidatorSet::::put(validators); + } + fn is_ocw_validator(caller: T::AccountId) -> bool { + if let Some(stash) = ValidatorToStashKey::::get(caller) { + >::get().contains(&stash) + } else { + false + } + } + + // todo! use batch_index and payers as part of the validation + fn is_customers_batch_valid( + cluster_id: ClusterId, + era_id: DdcEra, + _batch_index: BatchIndex, + _payers: &[(T::AccountId, BucketId, CustomerUsage)], + batch_proof: &MMRProof, + ) -> bool { + let validation_era = EraValidations::::get(cluster_id, era_id); + + match validation_era { + Some(valid_era) => { + //Self::create_merkle_root(leaves) + + let root = valid_era.payers_merkle_root_hash; + Self::proof_merkle_leaf(root, batch_proof).unwrap_or(false) + }, + None => false, + } + } + + // todo! use batch_index and payees as part of the validation + fn is_providers_batch_valid( + cluster_id: ClusterId, + era_id: DdcEra, + _batch_index: BatchIndex, + _payees: &[(T::AccountId, NodeUsage)], + batch_proof: &MMRProof, + ) -> bool { + let validation_era = EraValidations::::get(cluster_id, era_id); + + match validation_era { + Some(valid_era) => { + let root = valid_era.payees_merkle_root_hash; + Self::proof_merkle_leaf(root, batch_proof).unwrap_or(false) + }, + None => false, + } + } + } + + impl sp_application_crypto::BoundToRuntimeAppPublic for Pallet { + type Public = T::AuthorityId; + } + + impl OneSessionHandler for Pallet { + type Key = T::AuthorityId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + { + log::info!("🙌Adding Validator from genesis session."); + let validators = validators + .map(|(_, k)| T::AccountId::decode(&mut &k.into().encode()[..]).unwrap()) + .collect::>(); + + ValidatorSet::::put(validators); // only active validators in session - this is NOT all the + // validators + } + + fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, _queued_authorities: I) + where + I: Iterator, + { + log::info!("🙌Adding Validator from new session."); + let validators = validators + .map(|(_, k)| T::AccountId::decode(&mut &k.into().encode()[..]).unwrap()) + .collect::>(); + ValidatorSet::::put(validators); + } + + fn on_disabled(_i: u32) {} + } +} diff --git a/pallets/ddc-verification/src/mock.rs b/pallets/ddc-verification/src/mock.rs new file mode 100644 index 000000000..f4faaaafb --- /dev/null +++ b/pallets/ddc-verification/src/mock.rs @@ -0,0 +1,688 @@ +use ddc_primitives::{ + crypto, sr25519, + traits::{ClusterManager, ClusterQuery}, + BucketId, ClusterNodeKind, ClusterNodeState, ClusterNodeStatus, ClusterNodesStats, + ClusterStatus, PayoutError, PayoutState, StorageNodePubKey, MAX_PAYOUT_BATCH_COUNT, + MAX_PAYOUT_BATCH_SIZE, +}; +use frame_election_provider_support::{ + bounds::{ElectionBounds, ElectionBoundsBuilder}, + onchain, SequentialPhragmen, +}; +use frame_support::{ + pallet_prelude::ConstU32, + parameter_types, + traits::{ConstU16, ConstU64}, + PalletId, +}; +use frame_system::mocking::MockBlock; +use pallet_staking::BalanceOf; +use sp_core::{ByteArray, H256}; +use sp_runtime::{ + curve::PiecewiseLinear, + testing::{TestXt, UintAuthorityId}, + traits::{BlakeTwo256, Extrinsic as ExtrinsicT, IdentifyAccount, IdentityLookup, Verify, Zero}, + BuildStorage, MultiSignature, Perbill, +}; +use sp_staking::{EraIndex, SessionIndex}; + +use crate::{self as pallet_ddc_verification, *}; + +type Block = MockBlock; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub struct Test { + System: frame_system, + DdcVerification: pallet_ddc_verification, + Timestamp: pallet_timestamp, + Balances: pallet_balances, + Staking: pallet_staking, + Session: pallet_session, + } +); + +pub type Extrinsic = TestXt; +pub type Signature = MultiSignature; +pub type AccountId = <::Signer as IdentifyAccount>::AccountId; +type Balance = u64; +type BlockNumber = u64; + +impl frame_system::Config for Test { + type BaseCallFilter = frame_support::traits::Everything; + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type RuntimeOrigin = RuntimeOrigin; + type RuntimeCall = RuntimeCall; + type Nonce = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Block = Block; + type RuntimeEvent = RuntimeEvent; + type BlockHashCount = ConstU64<250>; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = ConstU16<42>; + type OnSetCode = (); + type MaxConsumers = frame_support::traits::ConstU32<16>; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type MaxReserves = (); + type ReserveIdentifier = [u8; 8]; + type Balance = Balance; + type RuntimeEvent = RuntimeEvent; + type DustRemoval = (); + type ExistentialDeposit = ConstU64<1>; + type AccountStore = System; + type WeightInfo = (); + type FreezeIdentifier = (); + type MaxFreezes = (); + type RuntimeHoldReason = (); + type MaxHolds = (); +} + +pallet_staking_reward_curve::build! { + const REWARD_CURVE: PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000u64, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} + +parameter_types! { + pub static ElectionsBounds: ElectionBounds = ElectionBoundsBuilder::default().build(); +} + +pub struct OnChainSeqPhragmen; +impl onchain::Config for OnChainSeqPhragmen { + type System = Test; + type Solver = SequentialPhragmen; + type DataProvider = Staking; + type WeightInfo = (); + type MaxWinners = ConstU32<100>; + type Bounds = ElectionsBounds; +} +parameter_types! { + pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; + pub static Offset: BlockNumber = 0; + pub const Period: BlockNumber = 1; + pub static SessionsPerEra: SessionIndex = 6; + pub static SlashDeferDuration: EraIndex = 2; + pub const BondingDuration: EraIndex = 3; + pub static LedgerSlashPerEra: (BalanceOf, BTreeMap>) = (Zero::zero(), BTreeMap::new()); + pub const OffendingValidatorsThreshold: Perbill = Perbill::from_percent(17); +} + +impl pallet_staking::Config for Test { + type Currency = Balances; + type CurrencyBalance = ::Balance; + type UnixTime = Timestamp; + type CurrencyToVote = (); + type RewardRemainder = (); + type RuntimeEvent = RuntimeEvent; + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type SlashDeferDuration = SlashDeferDuration; + type AdminOrigin = frame_system::EnsureRoot; + type BondingDuration = BondingDuration; + type SessionInterface = Self; + type EraPayout = pallet_staking::ConvertCurve; + type NextNewSession = Session; + type MaxNominatorRewardedPerValidator = ConstU32<64>; + type OffendingValidatorsThreshold = OffendingValidatorsThreshold; + type ElectionProvider = onchain::OnChainExecution; + type GenesisElectionProvider = Self::ElectionProvider; + type TargetList = pallet_staking::UseValidatorsMap; + type NominationsQuota = pallet_staking::FixedNominationsQuota<16>; + type MaxUnlockingChunks = ConstU32<32>; + type HistoryDepth = ConstU32<84>; + type VoterList = pallet_staking::UseNominatorsAndValidatorsMap; + type EventListeners = (); + type BenchmarkingConfig = pallet_staking::TestBenchmarkingConfig; + type WeightInfo = (); +} + +pub struct OtherSessionHandler; +impl OneSessionHandler for OtherSessionHandler { + type Key = UintAuthorityId; + + fn on_genesis_session<'a, I: 'a>(_: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_new_session<'a, I: 'a>(_: bool, _: I, _: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_disabled(_validator_index: u32) {} +} + +impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { + type Public = UintAuthorityId; +} + +impl pallet_session::historical::Config for Test { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; +} + +sp_runtime::impl_opaque_keys! { + pub struct SessionKeys { + pub other: OtherSessionHandler, + } +} + +impl pallet_session::Config for Test { + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type Keys = SessionKeys; + type ShouldEndSession = pallet_session::PeriodicSessions; + type SessionHandler = (OtherSessionHandler,); + type RuntimeEvent = RuntimeEvent; + type ValidatorId = AccountId; + type ValidatorIdOf = pallet_staking::StashOf; + type NextSessionRotation = pallet_session::PeriodicSessions; + type WeightInfo = (); +} + +impl pallet_timestamp::Config for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = ConstU64<5>; + type WeightInfo = (); +} +parameter_types! { + pub const VerificationPalletId: PalletId = PalletId(*b"verifypa"); +} + +impl crate::Config for Test { + type RuntimeEvent = RuntimeEvent; + type PalletId = VerificationPalletId; + type WeightInfo = (); + type ClusterManager = TestClusterManager; + type NodeVisitor = MockNodeVisitor; + type PayoutVisitor = MockPayoutVisitor; + type AuthorityId = sr25519::AuthorityId; + type OffchainIdentifierId = crypto::OffchainIdentifierId; + type ActivityHasher = sp_runtime::traits::BlakeTwo256; + const MAJORITY: u8 = 67; + const BLOCK_TO_START: u16 = 100; + const MIN_DAC_NODES_FOR_CONSENSUS: u16 = 3; + const MAX_PAYOUT_BATCH_SIZE: u16 = MAX_PAYOUT_BATCH_SIZE; + const MAX_PAYOUT_BATCH_COUNT: u16 = MAX_PAYOUT_BATCH_COUNT; + type ActivityHash = H256; + type StakingVisitor = Staking; +} + +// Build genesis storage according to the mock runtime. +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut storage = frame_system::GenesisConfig::::default().build_storage().unwrap(); + + let balances = vec![ + // validator1 stash; has to be equal to the OCW key in the current implementation + (AccountId::from([0xa; 32]), 10000), + // validator1 controller + (AccountId::from([0xaa; 32]), 10000), + // validator2 stash + (AccountId::from([0xb; 32]), 10000), + // validator2 controller + (AccountId::from([0xbb; 32]), 10000), + // validator3 stash + (AccountId::from([0xc; 32]), 10000), + // validator3 controller + (AccountId::from([0xcc; 32]), 10000), + // validator4 stash + (AccountId::from([0xd; 32]), 10000), + // validator4 controller + (AccountId::from([0xdd; 32]), 10000), + // validator5 stash + (AccountId::from([0xe; 32]), 10000), + // validator5 controller + (AccountId::from([0xee; 32]), 10000), + // validator6 stash + (AccountId::from([0xf; 32]), 10000), + // validator6 controller + (AccountId::from([0xff; 32]), 10000), + ]; + let _ = pallet_balances::GenesisConfig:: { balances }.assimilate_storage(&mut storage); + + let stakers = vec![ + ( + AccountId::from([0xa; 32]), + AccountId::from([0xaa; 32]), + 1000, + pallet_staking::StakerStatus::Validator, + ), + ( + AccountId::from([0xb; 32]), + AccountId::from([0xbb; 32]), + 1000, + pallet_staking::StakerStatus::Validator, + ), + ( + AccountId::from([0xc; 32]), + AccountId::from([0xcc; 32]), + 1000, + pallet_staking::StakerStatus::Validator, + ), + ( + AccountId::from([0xd; 32]), + AccountId::from([0xdd; 32]), + 1000, + pallet_staking::StakerStatus::Validator, + ), + ( + AccountId::from([0xe; 32]), + AccountId::from([0xee; 32]), + 1000, + pallet_staking::StakerStatus::Validator, + ), + ( + AccountId::from([0xf; 32]), + AccountId::from([0xff; 32]), + 1000, + pallet_staking::StakerStatus::Validator, + ), + ]; + let _ = + pallet_staking::GenesisConfig:: { stakers: stakers.clone(), ..Default::default() } + .assimilate_storage(&mut storage); + + sp_io::TestExternalities::new(storage) +} + +pub struct MockPayoutVisitor; +impl PayoutVisitor for MockPayoutVisitor { + fn get_next_customer_batch_for_payment( + _cluster_id: &ClusterId, + _era_id: DdcEra, + ) -> Result, PayoutError> { + Ok(None) + } + + fn get_next_provider_batch_for_payment( + _cluster_id: &ClusterId, + _era_id: DdcEra, + ) -> Result, PayoutError> { + Ok(None) + } + + fn all_customer_batches_processed(_cluster_id: &ClusterId, _era_id: DdcEra) -> bool { + true + } + + fn all_provider_batches_processed(_cluster_id: &ClusterId, _era_id: DdcEra) -> bool { + true + } + + fn get_billing_report_status(_cluster_id: &ClusterId, _era_id: DdcEra) -> PayoutState { + PayoutState::NotInitialized + } + + fn begin_billing_report( + _origin: T::AccountId, + _cluster_id: ClusterId, + _era_id: DdcEra, + _start_era: i64, + _end_era: i64, + ) -> DispatchResult { + Ok(()) + } + + fn begin_charging_customers( + _origin: T::AccountId, + _cluster_id: ClusterId, + _era_id: DdcEra, + _max_batch_index: BatchIndex, + ) -> DispatchResult { + Ok(()) + } + + fn send_charging_customers_batch( + _origin: T::AccountId, + _cluster_id: ClusterId, + _era_id: DdcEra, + _batch_index: BatchIndex, + _payers: &[(T::AccountId, BucketId, CustomerUsage)], + _batch_proof: MMRProof, + ) -> DispatchResult { + Ok(()) + } + + fn end_charging_customers( + _origin: T::AccountId, + _cluster_id: ClusterId, + _era_id: DdcEra, + ) -> DispatchResult { + Ok(()) + } + + fn begin_rewarding_providers( + _origin: T::AccountId, + _cluster_id: ClusterId, + _era_id: DdcEra, + _max_batch_index: BatchIndex, + _total_node_usage: NodeUsage, + ) -> DispatchResult { + Ok(()) + } + + fn send_rewarding_providers_batch( + _origin: T::AccountId, + _cluster_id: ClusterId, + _era_id: DdcEra, + _batch_index: BatchIndex, + _payees: &[(T::AccountId, NodeUsage)], + _batch_proof: MMRProof, + ) -> DispatchResult { + Ok(()) + } + + fn end_rewarding_providers( + _origin: T::AccountId, + _cluster_id: ClusterId, + _era_id: DdcEra, + ) -> DispatchResult { + Ok(()) + } + + fn end_billing_report( + _origin: T::AccountId, + _cluster_id: ClusterId, + _era_id: DdcEra, + ) -> DispatchResult { + Ok(()) + } +} + +pub struct MockNodeVisitor; +impl NodeVisitor for MockNodeVisitor { + fn get_node_params(node_pub_key: &NodePubKey) -> Result { + let key1 = + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a", + ))); + let key2 = + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "48594f1fd4f05135914c42b03e63b61f6a3e4c537ccee3dbac555ef6df371b7e", + ))); + let key3 = + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "dcb83f51e6554fb3fca04807f98336d160419bf0c54f479d760b76df1e04bda2", + ))); + + let key4 = + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "48dbb875df3f77816cd01b5a8ce6f32944ae4ac3b4453b9345c3320689445e88", + ))); + let key5 = + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "302f937df3a0ec4c658e8122439e748d227442ebd493cef521a1e14943844395", + ))); + let key6 = + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "f2f521014e436b426e4277b23267655ae04d1858c84756d9ed970d17271d19e4", + ))); + + let key7 = + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "1f50f1455f60f5774564233d321a116ca45ae3188b2200999445706d04839d72", + ))); + let key8 = + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "69b1897f5f7a8a775ee3a4e00f32e20bb9d30e1cdd42149ce1bd50a9aa206040", + ))); + let _key9 = + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "bf5ca1c9406094b4dea7981ba076f1520c218f18ace853300a3300c5cfe9c2af", + ))); + + let storage_node_params = if node_pub_key == &key1 { + StorageNodeParams { + mode: StorageNodeMode::DAC, + host: "178.251.228.236".as_bytes().to_vec(), + domain: vec![2u8; 255], + ssl: false, + http_port: 8080u16, + grpc_port: 25000u16, + p2p_port: 15000u16, + } + } else if node_pub_key == &key2 { + StorageNodeParams { + mode: StorageNodeMode::DAC, + host: "95.217.8.119".as_bytes().to_vec(), + domain: vec![2u8; 255], + ssl: false, + http_port: 8080u16, + grpc_port: 25000u16, + p2p_port: 15000u16, + } + } else if node_pub_key == &key3 { + StorageNodeParams { + mode: StorageNodeMode::DAC, + host: "178.251.228.42".as_bytes().to_vec(), + domain: vec![2u8; 255], + ssl: false, + http_port: 8080u16, + grpc_port: 25000u16, + p2p_port: 15000u16, + } + } else if node_pub_key == &key4 { + StorageNodeParams { + mode: StorageNodeMode::DAC, + host: "37.27.30.47".as_bytes().to_vec(), + domain: vec![2u8; 255], + ssl: false, + http_port: 8080u16, + grpc_port: 25000u16, + p2p_port: 15000u16, + } + } else if node_pub_key == &key5 { + StorageNodeParams { + mode: StorageNodeMode::DAC, + host: "178.251.228.49".as_bytes().to_vec(), + domain: vec![2u8; 255], + ssl: false, + http_port: 8080u16, + grpc_port: 25000u16, + p2p_port: 15000u16, + } + } else if node_pub_key == &key6 { + StorageNodeParams { + mode: StorageNodeMode::DAC, + host: "159.69.207.65".as_bytes().to_vec(), + domain: vec![2u8; 255], + ssl: false, + http_port: 8080u16, + grpc_port: 25000u16, + p2p_port: 15000u16, + } + } else if node_pub_key == &key7 { + StorageNodeParams { + mode: StorageNodeMode::DAC, + host: "178.251.228.165".as_bytes().to_vec(), + domain: vec![2u8; 255], + ssl: false, + http_port: 8080u16, + grpc_port: 25000u16, + p2p_port: 15000u16, + } + } else if node_pub_key == &key8 { + StorageNodeParams { + mode: StorageNodeMode::DAC, + host: "49.13.211.157".as_bytes().to_vec(), + domain: vec![2u8; 255], + ssl: false, + http_port: 8080u16, + grpc_port: 25000u16, + p2p_port: 15000u16, + } + } else { + StorageNodeParams { + mode: StorageNodeMode::DAC, + host: "178.251.228.44".as_bytes().to_vec(), + domain: vec![2u8; 255], + ssl: false, + http_port: 8080u16, + grpc_port: 25000u16, + p2p_port: 15000u16, + } + }; + + Ok(NodeParams::StorageParams(storage_node_params)) + } + + fn get_cluster_id(_node_pub_key: &NodePubKey) -> Result, DispatchError> { + unimplemented!() + } + fn exists(_node_pub_key: &NodePubKey) -> bool { + unimplemented!() + } + + fn get_node_provider_id(_node_pub_key: &NodePubKey) -> Result { + let temp: AccountId = AccountId::from([0xa; 32]); + let account_1 = T::AccountId::decode(&mut &temp.as_slice()[..]).unwrap(); + + Ok(account_1) + } +} + +pub struct TestClusterManager; +impl ClusterQuery for TestClusterManager { + fn cluster_exists(_cluster_id: &ClusterId) -> bool { + unimplemented!() + } + fn get_cluster_status(_cluster_id: &ClusterId) -> Result { + unimplemented!() + } + fn get_manager_and_reserve_id( + _cluster_id: &ClusterId, + ) -> Result<(T::AccountId, T::AccountId), DispatchError> { + unimplemented!() + } +} + +impl ClusterManager for TestClusterManager { + fn contains_node( + _cluster_id: &ClusterId, + _node_pub_key: &NodePubKey, + _validation_status: Option, + ) -> bool { + unimplemented!() + } + + fn get_nodes(_cluster_id: &ClusterId) -> Result, DispatchError> { + Ok(vec![ + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a", + ))), + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "48594f1fd4f05135914c42b03e63b61f6a3e4c537ccee3dbac555ef6df371b7e", + ))), + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "dcb83f51e6554fb3fca04807f98336d160419bf0c54f479d760b76df1e04bda2", + ))), + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "48dbb875df3f77816cd01b5a8ce6f32944ae4ac3b4453b9345c3320689445e88", + ))), + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "302f937df3a0ec4c658e8122439e748d227442ebd493cef521a1e14943844395", + ))), + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "f2f521014e436b426e4277b23267655ae04d1858c84756d9ed970d17271d19e4", + ))), + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "1f50f1455f60f5774564233d321a116ca45ae3188b2200999445706d04839d72", + ))), + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "69b1897f5f7a8a775ee3a4e00f32e20bb9d30e1cdd42149ce1bd50a9aa206040", + ))), + NodePubKey::StoragePubKey(StorageNodePubKey::new(array_bytes::hex_n_into_unchecked( + "bf5ca1c9406094b4dea7981ba076f1520c218f18ace853300a3300c5cfe9c2af", + ))), + ]) + } + + fn add_node( + _cluster_id: &ClusterId, + _node_pub_key: &NodePubKey, + _node_kind: &ClusterNodeKind, + ) -> Result<(), DispatchError> { + unimplemented!() + } + + fn remove_node( + _cluster_id: &ClusterId, + _node_pub_key: &NodePubKey, + ) -> Result<(), DispatchError> { + unimplemented!() + } + + fn get_manager_account_id(_cluster_id: &ClusterId) -> Result { + unimplemented!() + } + + fn get_node_state( + _cluster_id: &ClusterId, + _node_pub_key: &NodePubKey, + ) -> Result>, DispatchError> { + unimplemented!() + } + + fn get_nodes_stats(_cluster_id: &ClusterId) -> Result { + unimplemented!() + } + + fn validate_node( + _cluster_id: &ClusterId, + _node_pub_key: &NodePubKey, + _succeeded: bool, + ) -> Result<(), DispatchError> { + unimplemented!() + } +} + +impl frame_system::offchain::SigningTypes for Test { + type Public = ::Signer; + type Signature = Signature; +} + +impl frame_system::offchain::SendTransactionTypes for Test +where + RuntimeCall: From, +{ + type OverarchingCall = RuntimeCall; + type Extrinsic = Extrinsic; +} + +impl frame_system::offchain::CreateSignedTransaction for Test +where + RuntimeCall: From, +{ + fn create_transaction>( + call: RuntimeCall, + _public: ::Signer, + _account: AccountId, + nonce: u64, + ) -> Option<(RuntimeCall, ::SignaturePayload)> { + Some((call, (nonce, ()))) + } +} diff --git a/pallets/ddc-verification/src/tests.rs b/pallets/ddc-verification/src/tests.rs new file mode 100644 index 000000000..d4a3c2e3e --- /dev/null +++ b/pallets/ddc-verification/src/tests.rs @@ -0,0 +1,2039 @@ +use ddc_primitives::{ + ClusterId, MergeActivityHash, StorageNodeParams, StorageNodePubKey, KEY_TYPE, +}; +use frame_support::{assert_noop, assert_ok}; +use sp_core::{ + offchain::{ + testing::{PendingRequest, TestOffchainExt, TestTransactionPoolExt}, + OffchainDbExt, OffchainStorage, OffchainWorkerExt, Timestamp, TransactionPoolExt, + }, + Pair, +}; +use sp_io::TestExternalities; +use sp_keystore::{testing::MemoryKeystore, Keystore, KeystoreExt}; +use sp_runtime::AccountId32; + +use crate::{mock::*, Error, NodeActivity, OCWError, *}; + +#[allow(dead_code)] +fn register_validators(validators: Vec) { + ValidatorSet::::put(validators.clone()); + + for validator in validators { + assert_noop!( + DdcVerification::set_validator_key( + RuntimeOrigin::signed(validator.clone()), + validator.clone(), + ), + Error::::NotController + ); + } +} + +fn get_validators() -> Vec { + let validator1: AccountId32 = [1; 32].into(); + let validator2: AccountId32 = [2; 32].into(); + let validator3: AccountId32 = [3; 32].into(); + let validator4: AccountId32 = [4; 32].into(); + let validator5: AccountId32 = [5; 32].into(); + + vec![validator1, validator2, validator3, validator4, validator5] +} + +fn get_node_activities() -> Vec { + let node1 = NodeActivity { + node_id: "0".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }; + let node2 = NodeActivity { + node_id: "1".to_string(), + stored_bytes: 101, + transferred_bytes: 51, + number_of_puts: 11, + number_of_gets: 21, + }; + let node3 = NodeActivity { + node_id: "2".to_string(), + stored_bytes: 102, + transferred_bytes: 52, + number_of_puts: 12, + number_of_gets: 22, + }; + let node4 = NodeActivity { + node_id: "3".to_string(), + stored_bytes: 103, + transferred_bytes: 53, + number_of_puts: 13, + number_of_gets: 23, + }; + let node5 = NodeActivity { + node_id: "4".to_string(), + stored_bytes: 104, + transferred_bytes: 54, + number_of_puts: 14, + number_of_gets: 24, + }; + vec![node1, node2, node3, node4, node5] +} + +#[test] +fn fetch_node_usage_works() { + let mut ext = TestExternalities::default(); + let (offchain, offchain_state) = TestOffchainExt::new(); + let (pool, _) = TestTransactionPoolExt::new(); + + ext.register_extension(OffchainWorkerExt::new(offchain.clone())); + ext.register_extension(OffchainDbExt::new(Box::new(offchain))); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext.execute_with(|| { + let mut offchain_state = offchain_state.write(); + offchain_state.timestamp = Timestamp::from_unix_millis(0); + let host = "example.com"; + let port = 80; + let era_id = 1; + + // Create a sample NodeActivity instance + let node_activity1 = NodeActivity { + node_id: "1".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }; + let node_activity2 = NodeActivity { + node_id: "2".to_string(), + stored_bytes: 110, + transferred_bytes: 510, + number_of_puts: 110, + number_of_gets: 210, + }; + let nodes_activity_json = + serde_json::to_string(&vec![node_activity1.clone(), node_activity2.clone()]).unwrap(); + + // Mock HTTP request and response + let pending_request = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/nodes?eraId={}", host, port, era_id), + response: Some(nodes_activity_json.as_bytes().to_vec()), + sent: true, + ..Default::default() + }; + offchain_state.expect_request(pending_request); + drop(offchain_state); + + let era_id = 1; + let cluster_id = ClusterId::from([1; 20]); + let node_params = StorageNodeParams { + ssl: false, + host: host.as_bytes().to_vec(), + http_port: port, + mode: StorageNodeMode::DAC, + p2p_port: 5555, + grpc_port: 4444, + domain: b"example2.com".to_vec(), + }; + + let result = Pallet::::fetch_node_usage(&cluster_id, era_id, &node_params); + assert!(result.is_ok()); + let activities = result.unwrap(); + assert_eq!(activities[0].number_of_gets, node_activity1.number_of_gets); + assert_eq!(activities[0].number_of_puts, node_activity1.number_of_puts); + assert_eq!(activities[0].transferred_bytes, node_activity1.transferred_bytes); + assert_eq!(activities[0].stored_bytes, node_activity1.stored_bytes); + + assert_eq!(activities[1].number_of_gets, node_activity2.number_of_gets); + assert_eq!(activities[1].number_of_puts, node_activity2.number_of_puts); + assert_eq!(activities[1].transferred_bytes, node_activity2.transferred_bytes); + assert_eq!(activities[1].stored_bytes, node_activity2.stored_bytes); + }); +} + +#[test] +fn fetch_customers_usage_works() { + let mut ext = TestExternalities::default(); + let (offchain, offchain_state) = TestOffchainExt::new(); + let (pool, _) = TestTransactionPoolExt::new(); + + ext.register_extension(OffchainWorkerExt::new(offchain.clone())); + ext.register_extension(OffchainDbExt::new(Box::new(offchain))); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext.execute_with(|| { + let mut offchain_state = offchain_state.write(); + offchain_state.timestamp = Timestamp::from_unix_millis(0); + let host = "example.com"; + let port = 80; + let era_id = 1; + + // Create a sample NodeActivity instance + let customer_activity1 = CustomerActivity { + bucket_id: 111, + customer_id: "1".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }; + let customer_activity2 = CustomerActivity { + bucket_id: 222, + customer_id: "2".to_string(), + stored_bytes: 1000, + transferred_bytes: 500, + number_of_puts: 100, + number_of_gets: 200, + }; + let customers_activity_json = + serde_json::to_string(&vec![customer_activity1.clone(), customer_activity2.clone()]) + .unwrap(); + + // Mock HTTP request and response + let pending_request = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/buckets?eraId={}", host, port, era_id), + response: Some(customers_activity_json.as_bytes().to_vec()), + sent: true, + ..Default::default() + }; + offchain_state.expect_request(pending_request); + drop(offchain_state); + + let era_id = 1; + let cluster_id = ClusterId::from([1; 20]); + let node_params = StorageNodeParams { + ssl: false, + host: host.as_bytes().to_vec(), + http_port: port, + mode: StorageNodeMode::DAC, + p2p_port: 5555, + grpc_port: 4444, + domain: b"example2.com".to_vec(), + }; + + let result = Pallet::::fetch_customers_usage(&cluster_id, era_id, &node_params); + assert!(result.is_ok()); + let activities = result.unwrap(); + assert_eq!(activities[0].number_of_gets, customer_activity1.number_of_gets); + assert_eq!(activities[0].number_of_puts, customer_activity1.number_of_puts); + assert_eq!(activities[0].transferred_bytes, customer_activity1.transferred_bytes); + assert_eq!(activities[0].stored_bytes, customer_activity1.stored_bytes); + + assert_eq!(activities[1].number_of_gets, customer_activity2.number_of_gets); + assert_eq!(activities[1].number_of_puts, customer_activity2.number_of_puts); + assert_eq!(activities[1].transferred_bytes, customer_activity2.transferred_bytes); + assert_eq!(activities[1].stored_bytes, customer_activity2.stored_bytes); + }); +} + +#[test] +fn test_reach_consensus_empty() { + let activities: Vec = Vec::new(); + let result = DdcVerification::reach_consensus(&activities, 3); + assert!(result.is_none()); +} + +#[test] +fn test_reach_consensus_success() { + let activities = vec![ + CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }, + CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }, + CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }, + ]; + let result = DdcVerification::reach_consensus(&activities, 3); + assert!(result.is_some()); + assert_eq!(result.unwrap().stored_bytes, 100); +} + +#[test] +fn test_reach_consensus_failure() { + let activities = vec![ + CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }, + CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 200, + transferred_bytes: 100, + number_of_puts: 20, + number_of_gets: 40, + }, + CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 300, + transferred_bytes: 150, + number_of_puts: 30, + number_of_gets: 60, + }, + ]; + let result = DdcVerification::reach_consensus(&activities, 3); + assert!(result.is_none()); +} + +#[test] +fn test_reach_consensus_threshold() { + let activities = vec![ + CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }, + CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }, + CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 200, + transferred_bytes: 100, + number_of_puts: 20, + number_of_gets: 40, + }, + ]; + + let mut result = DdcVerification::reach_consensus(&activities, 2); + assert!(result.is_some()); + assert_eq!(result.unwrap().stored_bytes, 100); + result = DdcVerification::reach_consensus(&activities, 3); + assert!(result.is_none()); +} + +#[test] +fn test_reach_consensus_exact_threshold() { + let activities = vec![ + CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }, + CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }, + ]; + let result = DdcVerification::reach_consensus(&activities, 3); + assert!(result.is_none()); +} + +#[test] +fn test_get_consensus_customers_activity_success() { + let cluster_id = ClusterId::from([1; 20]); + let era_id = 1; + let min_nodes = 3; + let threshold = Percent::from_percent(67); + + let node_pubkey_0 = NodePubKey::StoragePubKey(AccountId32::new([0; 32])); + let node_pubkey_1 = NodePubKey::StoragePubKey(AccountId32::new([1; 32])); + let node_pubkey_2 = NodePubKey::StoragePubKey(AccountId32::new([2; 32])); + + let customers_activity = vec![ + ( + node_pubkey_0, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_2, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ]; + + let result = DdcVerification::get_consensus_for_activities( + &cluster_id, + era_id, + &customers_activity, + min_nodes, + threshold, + ); + assert!(result.is_ok()); + let consensus_activities = result.unwrap(); + assert_eq!(consensus_activities.len(), 1); + assert_eq!(consensus_activities[0].stored_bytes, 100); +} + +#[test] +fn test_get_consensus_customers_activity_success2() { + let cluster_id = ClusterId::from([1; 20]); + let era_id = 1; + let min_nodes = 3; + let threshold = Percent::from_percent(67); + + let node_pubkey_0 = NodePubKey::StoragePubKey(AccountId32::new([0; 32])); + let node_pubkey_1 = NodePubKey::StoragePubKey(AccountId32::new([1; 32])); + let node_pubkey_2 = NodePubKey::StoragePubKey(AccountId32::new([2; 32])); + + let customers_activity = vec![ + ( + node_pubkey_0.clone(), + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1.clone(), + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_2.clone(), + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_0, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 2, + stored_bytes: 110, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 2, + stored_bytes: 110, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_2, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 2, + stored_bytes: 110, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ]; + + let result = DdcVerification::get_consensus_for_activities( + &cluster_id, + era_id, + &customers_activity, + min_nodes, + threshold, + ); + assert!(result.is_ok()); + let consensus_activities = result.unwrap(); + assert_eq!(consensus_activities.len(), 2); + assert_eq!(consensus_activities[1].stored_bytes, 100); + assert_eq!(consensus_activities[1].bucket_id, 1); + assert_eq!(consensus_activities[0].stored_bytes, 110); + assert_eq!(consensus_activities[0].bucket_id, 2); +} + +#[test] +fn test_get_consensus_nodes_activity_success() { + let cluster_id = ClusterId::from([1; 20]); + let era_id = 1; + let min_nodes = 3; + let threshold = Percent::from_percent(67); + + let node_pubkey_0 = NodePubKey::StoragePubKey(AccountId32::new([0; 32])); + let node_pubkey_1 = NodePubKey::StoragePubKey(AccountId32::new([1; 32])); + let node_pubkey_2 = NodePubKey::StoragePubKey(AccountId32::new([2; 32])); + + let customers_activity = vec![ + ( + node_pubkey_0, + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1, + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_2, + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ]; + + let result = DdcVerification::get_consensus_for_activities( + &cluster_id, + era_id, + &customers_activity, + min_nodes, + threshold, + ); + assert!(result.is_ok()); + let consensus_activities = result.unwrap(); + assert_eq!(consensus_activities.len(), 1); + assert_eq!(consensus_activities[0].stored_bytes, 100); +} + +#[test] +fn test_get_consensus_customers_activity_empty() { + let cluster_id = ClusterId::from([1; 20]); + let era_id = 1; + let min_nodes = 3; + let threshold = Percent::from_percent(67); + + let node_pubkey_0 = NodePubKey::StoragePubKey(AccountId32::new([0; 32])); + let node_pubkey_1 = NodePubKey::StoragePubKey(AccountId32::new([1; 32])); + let node_pubkey_2 = NodePubKey::StoragePubKey(AccountId32::new([2; 32])); + + let customers_activity = vec![ + (node_pubkey_0.clone(), Vec::::new()), + (node_pubkey_1.clone(), Vec::::new()), + (node_pubkey_2.clone(), Vec::::new()), + ]; + + let result = DdcVerification::get_consensus_for_activities( + &cluster_id, + era_id, + &customers_activity, + min_nodes, + threshold, + ); + assert!(result.is_ok()); + let consensus_activities = result.unwrap(); + assert_eq!(consensus_activities.len(), 0); +} + +#[test] +fn test_get_consensus_customers_activity_not_enough_nodes() { + let cluster_id1 = ClusterId::from([1; 20]); + let era_id1 = 1; + let min_nodes = 3; + let threshold = Percent::from_percent(67); + let node_pubkey_0 = NodePubKey::StoragePubKey(AccountId32::new([0; 32])); + let node_pubkey_1 = NodePubKey::StoragePubKey(AccountId32::new([1; 32])); + + let customers_activity = vec![ + ( + node_pubkey_0, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ]; + + let result = DdcVerification::get_consensus_for_activities( + &cluster_id1, + era_id1, + &customers_activity, + min_nodes, + threshold, + ); + assert!(result.is_err()); + let errors = result.err().unwrap(); + assert_eq!(errors.len(), 1); + match &errors[0] { + OCWError::NotEnoughNodesForConsensus { cluster_id, era_id, id } => { + assert_eq!(*id, customers_activity[0].1[0].get_consensus_id::()); + assert_eq!(*cluster_id, cluster_id1); + assert_eq!(*era_id, era_id1); + }, + _ => panic!("Expected NotEnoughNodes error"), + } +} + +#[test] +fn test_get_consensus_nodes_activity_not_enough_nodes() { + let cluster_id1 = ClusterId::from([1; 20]); + let era_id1 = 1; + let min_nodes = 3; + let threshold = Percent::from_percent(67); + let node_pubkey_0 = NodePubKey::StoragePubKey(AccountId32::new([0; 32])); + let node_pubkey_1 = NodePubKey::StoragePubKey(AccountId32::new([1; 32])); + + let nodes_activity = vec![ + ( + node_pubkey_0, + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1, + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ]; + + let result = DdcVerification::get_consensus_for_activities( + &cluster_id1, + era_id1, + &nodes_activity, + min_nodes, + threshold, + ); + assert!(result.is_err()); + let errors = result.err().unwrap(); + assert_eq!(errors.len(), 1); + match &errors[0] { + OCWError::NotEnoughNodesForConsensus { cluster_id, era_id, id } => { + assert_eq!(*id, nodes_activity[0].1[0].get_consensus_id::()); + assert_eq!(*cluster_id, cluster_id1); + assert_eq!(*era_id, era_id1); + }, + _ => panic!("Expected NotEnoughNodes error"), + } +} + +#[test] +fn test_get_consensus_customers_activity_not_in_consensus() { + let cluster_id1 = ClusterId::from([1; 20]); + let era_id1 = 1; + let min_nodes = 3; + let threshold = Percent::from_percent(67); + + let node_pubkey_0 = NodePubKey::StoragePubKey(AccountId32::new([0; 32])); + let node_pubkey_1 = NodePubKey::StoragePubKey(AccountId32::new([1; 32])); + let node_pubkey_2 = NodePubKey::StoragePubKey(AccountId32::new([2; 32])); + + let customers_activity = vec![ + ( + node_pubkey_0, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 200, + transferred_bytes: 100, + number_of_puts: 20, + number_of_gets: 40, + }], + ), + ( + node_pubkey_2, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 300, + transferred_bytes: 150, + number_of_puts: 30, + number_of_gets: 60, + }], + ), + ]; + + let result = DdcVerification::get_consensus_for_activities( + &cluster_id1, + era_id1, + &customers_activity, + min_nodes, + threshold, + ); + assert!(result.is_err()); + let errors = result.err().unwrap(); + assert_eq!(errors.len(), 1); + match &errors[0] { + OCWError::ActivityNotInConsensus { cluster_id, era_id, id } => { + assert_eq!(*id, customers_activity[0].1[0].get_consensus_id::()); + assert_eq!(*cluster_id, cluster_id1); + assert_eq!(*era_id, era_id1); + }, + _ => panic!("Expected CustomerActivityNotInConsensus error"), + } +} + +#[test] +fn test_get_consensus_customers_activity_not_in_consensus_2() { + let cluster_id1 = ClusterId::from([1; 20]); + let era_id1 = 1; + let min_nodes = 3; + let threshold = Percent::from_percent(67); + + let node_pubkey_0 = NodePubKey::StoragePubKey(AccountId32::new([0; 32])); + let node_pubkey_1 = NodePubKey::StoragePubKey(AccountId32::new([1; 32])); + let node_pubkey_2 = NodePubKey::StoragePubKey(AccountId32::new([2; 32])); + + let customers_activity = vec![ + ( + node_pubkey_0.clone(), + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1.clone(), + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 200, + transferred_bytes: 100, + number_of_puts: 20, + number_of_gets: 40, + }], + ), + ( + node_pubkey_2.clone(), + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 300, + transferred_bytes: 150, + number_of_puts: 30, + number_of_gets: 60, + }], + ), + ( + node_pubkey_0, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 2, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 2, + stored_bytes: 200, + transferred_bytes: 100, + number_of_puts: 20, + number_of_gets: 40, + }], + ), + ( + node_pubkey_2, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 2, + stored_bytes: 300, + transferred_bytes: 150, + number_of_puts: 30, + number_of_gets: 60, + }], + ), + ]; + + let result = DdcVerification::get_consensus_for_activities( + &cluster_id1, + era_id1, + &customers_activity, + min_nodes, + threshold, + ); + assert!(result.is_err()); + let errors = result.err().unwrap(); + assert_eq!(errors.len(), 2); + match &errors[1] { + OCWError::ActivityNotInConsensus { cluster_id, era_id, id } => { + assert_eq!(*id, customers_activity[0].1[0].get_consensus_id::()); + assert_eq!(*cluster_id, cluster_id1); + assert_eq!(*era_id, era_id1); + }, + _ => panic!("Expected CustomerActivityNotInConsensus error"), + } + match &errors[0] { + OCWError::ActivityNotInConsensus { cluster_id, era_id, id } => { + assert_eq!(*id, customers_activity[3].1[0].get_consensus_id::()); + assert_eq!(*cluster_id, cluster_id1); + assert_eq!(*era_id, era_id1); + }, + _ => panic!("Expected CustomerActivityNotInConsensus error"), + } +} + +#[test] +fn test_get_consensus_customers_activity_diff_errors() { + let cluster_id1 = ClusterId::from([1; 20]); + let era_id1 = 1; + let min_nodes = 3; + let threshold = Percent::from_percent(67); + + let node_pubkey_0 = NodePubKey::StoragePubKey(AccountId32::new([0; 32])); + let node_pubkey_1 = NodePubKey::StoragePubKey(AccountId32::new([1; 32])); + let node_pubkey_2 = NodePubKey::StoragePubKey(AccountId32::new([2; 32])); + + let customers_activity = vec![ + ( + node_pubkey_0.clone(), + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1.clone(), + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 200, + transferred_bytes: 100, + number_of_puts: 20, + number_of_gets: 40, + }], + ), + ( + node_pubkey_2.clone(), + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 1, + stored_bytes: 300, + transferred_bytes: 150, + number_of_puts: 30, + number_of_gets: 60, + }], + ), + ( + node_pubkey_0, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 2, + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1, + vec![CustomerActivity { + customer_id: "0".to_string(), + bucket_id: 2, + stored_bytes: 200, + transferred_bytes: 100, + number_of_puts: 20, + number_of_gets: 40, + }], + ), + ]; + + let result = DdcVerification::get_consensus_for_activities( + &cluster_id1, + era_id1, + &customers_activity, + min_nodes, + threshold, + ); + assert!(result.is_err()); + let errors = result.err().unwrap(); + assert_eq!(errors.len(), 2); + match &errors[1] { + OCWError::ActivityNotInConsensus { cluster_id, era_id, id } => { + assert_eq!(*id, customers_activity[0].1[0].get_consensus_id::()); + assert_eq!(*cluster_id, cluster_id1); + assert_eq!(*era_id, era_id1); + }, + _ => panic!("Expected CustomerActivityNotInConsensus error"), + } + match &errors[0] { + OCWError::NotEnoughNodesForConsensus { cluster_id, era_id, id } => { + assert_eq!(*id, customers_activity[3].1[0].get_consensus_id::()); + assert_eq!(*cluster_id, cluster_id1); + assert_eq!(*era_id, era_id1); + }, + _ => panic!("Expected CustomerActivityNotInConsensus error"), + } +} + +#[test] +fn test_get_consensus_nodes_activity_not_in_consensus() { + let cluster_id1 = ClusterId::from([1; 20]); + let era_id1 = 1; + let min_nodes = 3; + let threshold = Percent::from_percent(67); + + let node_pubkey_0 = NodePubKey::StoragePubKey(AccountId32::new([0; 32])); + let node_pubkey_1 = NodePubKey::StoragePubKey(AccountId32::new([1; 32])); + let node_pubkey_2 = NodePubKey::StoragePubKey(AccountId32::new([2; 32])); + + let nodes_activity = vec![ + ( + node_pubkey_0, + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1, + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 200, + transferred_bytes: 100, + number_of_puts: 20, + number_of_gets: 40, + }], + ), + ( + node_pubkey_2, + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 300, + transferred_bytes: 150, + number_of_puts: 30, + number_of_gets: 60, + }], + ), + ]; + + let result = DdcVerification::get_consensus_for_activities( + &cluster_id1, + era_id1, + &nodes_activity, + min_nodes, + threshold, + ); + assert!(result.is_err()); + let errors = result.err().unwrap(); + assert_eq!(errors.len(), 1); + match &errors[0] { + OCWError::ActivityNotInConsensus { cluster_id, era_id, id } => { + assert_eq!(*id, nodes_activity[0].1[0].get_consensus_id::()); + assert_eq!(*cluster_id, cluster_id1); + assert_eq!(*era_id, era_id1); + }, + _ => panic!("Expected CustomerActivityNotInConsensus error"), + } +} + +#[test] +fn test_convert_to_batch_merkle_roots() { + let nodes = get_node_activities(); + let activities_batch_1 = vec![nodes[0].clone(), nodes[1].clone(), nodes[2].clone()]; + let activities_batch_2 = vec![nodes[3].clone(), nodes[4].clone()]; + let cluster_id = ClusterId::default(); + let era_id_1 = 1; + + let result_roots = DdcVerification::convert_to_batch_merkle_roots( + &cluster_id, + era_id_1, + vec![activities_batch_1.clone(), activities_batch_2.clone()], + ) + .unwrap(); + let expected_roots: Vec = vec![ + DdcVerification::create_merkle_root( + &cluster_id, + era_id_1, + &activities_batch_1.iter().map(|a| a.hash::()).collect::>(), + ) + .unwrap(), + DdcVerification::create_merkle_root( + &cluster_id, + era_id_1, + &activities_batch_2.iter().map(|a| a.hash::()).collect::>(), + ) + .unwrap(), + ]; + + assert_eq!(result_roots, expected_roots); +} + +#[test] +fn test_convert_to_batch_merkle_roots_empty() { + let cluster_id = ClusterId::default(); + let era_id_1 = 1; + let result_roots = DdcVerification::convert_to_batch_merkle_roots( + &cluster_id, + era_id_1, + Vec::>::new(), + ) + .unwrap(); + let expected_roots: Vec = Vec::::new(); + + assert_eq!(result_roots, expected_roots); +} + +#[test] +fn test_split_to_batches_empty_activities() { + let activities: Vec = vec![]; + let result = DdcVerification::split_to_batches(&activities, 3); + assert_eq!(result, Vec::>::new()); +} + +#[test] +fn test_split_to_batches_single_batch() { + let nodes = get_node_activities(); + let activities = vec![nodes[0].clone(), nodes[1].clone(), nodes[2].clone()]; + let mut sorted_activities = vec![nodes[0].clone(), nodes[1].clone(), nodes[2].clone()]; + + sorted_activities.sort(); + let result = DdcVerification::split_to_batches(&activities, 5); + assert_eq!(result, vec![sorted_activities]); +} + +#[test] +fn test_split_to_batches_exact_batches() { + let nodes = get_node_activities(); + let activities = vec![nodes[0].clone(), nodes[1].clone(), nodes[2].clone(), nodes[3].clone()]; + let mut sorted_activities = + vec![nodes[0].clone(), nodes[1].clone(), nodes[2].clone(), nodes[3].clone()]; + sorted_activities.sort(); + let result = DdcVerification::split_to_batches(&activities, 2); + assert_eq!( + result, + vec![ + [sorted_activities[0].clone(), sorted_activities[1].clone()], + [sorted_activities[2].clone(), sorted_activities[3].clone()] + ] + ); +} +#[test] +#[allow(clippy::vec_init_then_push)] +fn test_split_to_batches_non_exact_batches() { + let nodes = get_node_activities(); + let activities = vec![ + nodes[0].clone(), + nodes[1].clone(), + nodes[2].clone(), + nodes[3].clone(), + nodes[4].clone(), + ]; + let mut sorted_activities = vec![ + nodes[0].clone(), + nodes[1].clone(), + nodes[2].clone(), + nodes[3].clone(), + nodes[4].clone(), + ]; + sorted_activities.sort(); + let result = DdcVerification::split_to_batches(&activities, 2); + let mut expected: Vec> = Vec::new(); + expected.push(vec![sorted_activities[0].clone(), sorted_activities[1].clone()]); + expected.push(vec![sorted_activities[2].clone(), sorted_activities[3].clone()]); + expected.push(vec![sorted_activities[4].clone()]); + + assert_eq!(result, expected); +} + +#[test] +fn test_get_consensus_nodes_activity_not_in_consensus2() { + let cluster_id1 = ClusterId::from([1; 20]); + let era_id1 = 1; + let min_nodes = 3; + let threshold = Percent::from_percent(67); + + let node_pubkey_0 = NodePubKey::StoragePubKey(AccountId32::new([0; 32])); + let node_pubkey_1 = NodePubKey::StoragePubKey(AccountId32::new([1; 32])); + let node_pubkey_2 = NodePubKey::StoragePubKey(AccountId32::new([2; 32])); + + let nodes_activity = vec![ + ( + node_pubkey_0.clone(), + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1.clone(), + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 200, + transferred_bytes: 100, + number_of_puts: 20, + number_of_gets: 40, + }], + ), + ( + node_pubkey_2.clone(), + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 300, + transferred_bytes: 150, + number_of_puts: 30, + number_of_gets: 60, + }], + ), + ( + node_pubkey_0, + vec![NodeActivity { + node_id: "1".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1, + vec![NodeActivity { + node_id: "1".to_string(), + stored_bytes: 200, + transferred_bytes: 100, + number_of_puts: 20, + number_of_gets: 40, + }], + ), + ( + node_pubkey_2, + vec![NodeActivity { + node_id: "1".to_string(), + stored_bytes: 300, + transferred_bytes: 150, + number_of_puts: 30, + number_of_gets: 60, + }], + ), + ]; + + let result = DdcVerification::get_consensus_for_activities( + &cluster_id1, + era_id1, + &nodes_activity, + min_nodes, + threshold, + ); + assert!(result.is_err()); + let errors = result.err().unwrap(); + assert_eq!(errors.len(), 2); + match &errors[0] { + OCWError::ActivityNotInConsensus { cluster_id, era_id, id } => { + assert_eq!(*id, nodes_activity[0].1[0].get_consensus_id::()); + assert_eq!(*cluster_id, cluster_id1); + assert_eq!(*era_id, era_id1); + }, + _ => panic!("Expected CustomerActivityNotInConsensus error"), + } + match &errors[1] { + OCWError::ActivityNotInConsensus { cluster_id, era_id, id } => { + assert_eq!(*id, nodes_activity[3].1[0].get_consensus_id::()); + assert_eq!(*cluster_id, cluster_id1); + assert_eq!(*era_id, era_id1); + }, + _ => panic!("Expected CustomerActivityNotInConsensus error"), + } +} + +#[test] +fn test_get_consensus_nodes_activity_diff_errors() { + let cluster_id1 = ClusterId::from([1; 20]); + let era_id1 = 1; + let min_nodes = 3; + let threshold = Percent::from_percent(67); + + let node_pubkey_0 = NodePubKey::StoragePubKey(AccountId32::new([0; 32])); + let node_pubkey_1 = NodePubKey::StoragePubKey(AccountId32::new([1; 32])); + let node_pubkey_2 = NodePubKey::StoragePubKey(AccountId32::new([2; 32])); + + let nodes_activity = vec![ + ( + node_pubkey_0.clone(), + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1.clone(), + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 200, + transferred_bytes: 100, + number_of_puts: 20, + number_of_gets: 40, + }], + ), + ( + node_pubkey_2.clone(), + vec![NodeActivity { + node_id: "0".to_string(), + stored_bytes: 300, + transferred_bytes: 150, + number_of_puts: 30, + number_of_gets: 60, + }], + ), + ( + node_pubkey_0, + vec![NodeActivity { + node_id: "1".to_string(), + stored_bytes: 100, + transferred_bytes: 50, + number_of_puts: 10, + number_of_gets: 20, + }], + ), + ( + node_pubkey_1, + vec![NodeActivity { + node_id: "1".to_string(), + stored_bytes: 200, + transferred_bytes: 100, + number_of_puts: 20, + number_of_gets: 40, + }], + ), + ]; + + let result = DdcVerification::get_consensus_for_activities( + &cluster_id1, + era_id1, + &nodes_activity, + min_nodes, + threshold, + ); + assert!(result.is_err()); + let errors = result.err().unwrap(); + assert_eq!(errors.len(), 2); + match &errors[0] { + OCWError::ActivityNotInConsensus { cluster_id, era_id, id } => { + assert_eq!(*id, nodes_activity[0].1[0].get_consensus_id::()); + assert_eq!(*cluster_id, cluster_id1); + assert_eq!(*era_id, era_id1); + }, + _ => panic!("Expected CustomerActivityNotInConsensus error"), + } + match &errors[1] { + OCWError::NotEnoughNodesForConsensus { cluster_id, era_id, id } => { + assert_eq!(*id, nodes_activity[3].1[0].get_consensus_id::()); + assert_eq!(*cluster_id, cluster_id1); + assert_eq!(*era_id, era_id1); + }, + _ => panic!("Expected CustomerActivityNotInConsensus error"), + } +} + +#[test] +fn fetch_processed_era_works() { + let mut ext = TestExternalities::default(); + let (offchain, offchain_state) = TestOffchainExt::new(); + let (pool, _) = TestTransactionPoolExt::new(); + + ext.register_extension(OffchainWorkerExt::new(offchain.clone())); + ext.register_extension(OffchainDbExt::new(Box::new(offchain))); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext.execute_with(|| { + let mut offchain_state = offchain_state.write(); + offchain_state.timestamp = Timestamp::from_unix_millis(0); + let host = "example1.com"; + let port = 80; + + // Create a sample EraActivity instance + let era_activity1 = EraActivity { id: 17, start: 1, end: 2 }; + let era_activity2 = EraActivity { id: 18, start: 1, end: 2 }; + let era_activity3 = EraActivity { id: 19, start: 1, end: 2 }; + let era_activity_json = serde_json::to_string(&vec![ + era_activity1.clone(), + era_activity2.clone(), + era_activity3, + ]) + .unwrap(); + + // Mock HTTP request and response + let pending_request = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host, port), + response: Some(era_activity_json.as_bytes().to_vec()), + sent: true, + ..Default::default() + }; + offchain_state.expect_request(pending_request); + drop(offchain_state); + + let node_params = StorageNodeParams { + ssl: false, + host: host.as_bytes().to_vec(), + http_port: port, + mode: StorageNodeMode::DAC, + p2p_port: 5555, + grpc_port: 4444, + domain: b"example2.com".to_vec(), + }; + + let result = Pallet::::fetch_processed_era(&node_params); + assert!(result.is_ok()); + let activities = result.unwrap(); + assert_eq!(activities[0].id, era_activity1.id); + assert_eq!(activities[1].id, era_activity2.id); + }); +} + +#[test] +fn get_era_for_validation_works() { + let mut ext = TestExternalities::default(); + let (offchain, offchain_state) = TestOffchainExt::new(); + let (pool, _) = TestTransactionPoolExt::new(); + + ext.register_extension(OffchainWorkerExt::new(offchain.clone())); + ext.register_extension(OffchainDbExt::new(Box::new(offchain.clone()))); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext.execute_with(|| { + let key = format!("offchain::validator::{:?}", KEY_TYPE).into_bytes(); + + let mut offchain_state = offchain_state.write(); + offchain_state.persistent_storage.set( + b"", + &key, + b"9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a".as_ref(), + ); + offchain_state.timestamp = Timestamp::from_unix_millis(0); + let host1 = "example1.com"; + let host2 = "example2.com"; + let host3 = "example3.com"; + let host4 = "example4.com"; + let port = 80; + let era_activity1 = EraActivity { id: 16, start: 1, end: 2 }; + let era_activity2 = EraActivity { id: 17, start: 1, end: 2 }; + let era_activity3 = EraActivity { id: 18, start: 1, end: 2 }; + let era_activity4 = EraActivity { id: 19, start: 1, end: 2 }; + let era_activity_json1 = serde_json::to_string(&vec![ + era_activity1.clone(), //16 + era_activity2.clone(), //17 + era_activity3.clone(), //18 + era_activity4.clone(), //19 + ]) + .unwrap(); + let era_activity_json2 = serde_json::to_string(&vec![ + era_activity1.clone(), //16 + era_activity2.clone(), //17 + era_activity3.clone(), //18 + ]) + .unwrap(); + let era_activity_json3 = serde_json::to_string(&vec![ + era_activity1.clone(), //16 + era_activity2.clone(), //17 + era_activity3.clone(), //18 + ]) + .unwrap(); + let era_activity_json4 = serde_json::to_string(&vec![ + era_activity1.clone(), //16 + era_activity2.clone(), //17 + era_activity3.clone(), //18 + ]) + .unwrap(); + let pending_request1 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host1, port), + response: Some(era_activity_json1.as_bytes().to_vec()), + sent: true, + ..Default::default() + }; + let pending_request2 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host2, port), + response: Some(era_activity_json2.as_bytes().to_vec()), + sent: true, + ..Default::default() + }; + let pending_request3 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host3, port), + response: Some(era_activity_json3.as_bytes().to_vec()), + sent: true, + ..Default::default() + }; + let pending_request4 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host4, port), + response: Some(era_activity_json4.as_bytes().to_vec()), + sent: true, + ..Default::default() + }; + offchain_state.expect_request(pending_request1); + offchain_state.expect_request(pending_request2); + offchain_state.expect_request(pending_request3); + offchain_state.expect_request(pending_request4); + + drop(offchain_state); + + let node_params1 = StorageNodeParams { + ssl: false, + host: host1.as_bytes().to_vec(), + http_port: port, + mode: StorageNodeMode::DAC, + p2p_port: 5555, + grpc_port: 4444, + domain: b"example2.com".to_vec(), + }; + + let node_params2 = StorageNodeParams { + ssl: false, + host: host2.as_bytes().to_vec(), + http_port: port, + mode: StorageNodeMode::DAC, + p2p_port: 5555, + grpc_port: 4444, + domain: b"example3.com".to_vec(), + }; + + let node_params3 = StorageNodeParams { + ssl: false, + host: host3.as_bytes().to_vec(), + http_port: port, + mode: StorageNodeMode::DAC, + p2p_port: 5555, + grpc_port: 4444, + domain: b"example4.com".to_vec(), + }; + + let node_params4 = StorageNodeParams { + ssl: false, + host: host4.as_bytes().to_vec(), + http_port: port, + mode: StorageNodeMode::DAC, + p2p_port: 5555, + grpc_port: 4444, + domain: b"example5.com".to_vec(), + }; + + let dac_nodes: Vec<(NodePubKey, StorageNodeParams)> = vec![ + (NodePubKey::StoragePubKey(StorageNodePubKey::new([1; 32])), node_params1), + (NodePubKey::StoragePubKey(StorageNodePubKey::new([2; 32])), node_params2), + (NodePubKey::StoragePubKey(StorageNodePubKey::new([3; 32])), node_params3), + (NodePubKey::StoragePubKey(StorageNodePubKey::new([4; 32])), node_params4), + ]; + + let cluster_id = ClusterId::from([12; 20]); + let result = Pallet::::get_era_for_validation(&cluster_id, &dac_nodes); + assert_eq!(result.unwrap().unwrap(), era_activity1); //16 + }); +} + +#[test] +fn test_get_last_validated_era() { + let cluster_id1 = ClusterId::from([12; 20]); + let cluster_id2 = ClusterId::from([13; 20]); + let era_1 = 1; + let era_2 = 2; + let payers_root: ActivityHash = [1; 32]; + let payees_root: ActivityHash = [2; 32]; + let validators = get_validators(); + + new_test_ext().execute_with(|| { + assert_ok!(Pallet::::get_last_validated_era(&cluster_id1, validators[0].clone()) + .map(|era| { + assert_eq!(era, None); + })); + + let mut validators_map_1 = BTreeMap::new(); + validators_map_1.insert( + (payers_root, payees_root), + vec![validators[1].clone(), validators[2].clone(), validators[3].clone()], + ); + + let validation_1 = EraValidation { + validators: validators_map_1, + start_era: 1, + end_era: 2, + payers_merkle_root_hash: payers_root, + payees_merkle_root_hash: payees_root, + status: EraValidationStatus::ValidatingData, + }; + + >::insert(cluster_id1, era_1, validation_1); + + // still no - different accountid + assert_ok!(Pallet::::get_last_validated_era(&cluster_id1, validators[0].clone()) + .map(|era| { + assert_eq!(era, None); + })); + + // still no - different cluster id + assert_ok!(Pallet::::get_last_validated_era(&cluster_id2, validators[1].clone()) + .map(|era| { + assert_eq!(era, None); + })); + + let mut validators_map_2 = BTreeMap::new(); + validators_map_2 + .insert((payers_root, payees_root), vec![validators[2].clone(), validators[3].clone()]); + + let validation_2 = EraValidation { + validators: validators_map_2, + start_era: 1, + end_era: 2, + payers_merkle_root_hash: payers_root, + payees_merkle_root_hash: payees_root, + status: EraValidationStatus::ValidatingData, + }; + + >::insert(cluster_id1, era_2, validation_2); + + // Now the last validated era should be ERA_2 + assert_ok!(Pallet::::get_last_validated_era(&cluster_id1, validators[2].clone()) + .map(|era| { + assert_eq!(era, Some(era_2)); + })); + + assert_ok!(Pallet::::get_last_validated_era(&cluster_id1, validators[1].clone()) + .map(|era| { + assert_eq!(era, Some(era_1)); + })); + }); +} + +#[test] +fn test_get_era_for_payout() { + // Initialize test data + let cluster_id = ClusterId::default(); // Replace with actual initialization + let status = EraValidationStatus::ReadyForPayout; // Test with different statuses + + // Insert some era validations into storage + let era_id_1 = 1; + let era_id_2 = 2; + let era_validation_1 = EraValidation:: { + validators: Default::default(), + start_era: 0, + end_era: 0, + payers_merkle_root_hash: Default::default(), + payees_merkle_root_hash: Default::default(), + status: EraValidationStatus::ReadyForPayout, + }; + let era_validation_2 = EraValidation:: { + validators: Default::default(), + start_era: 0, + end_era: 0, + payers_merkle_root_hash: Default::default(), + payees_merkle_root_hash: Default::default(), + status: EraValidationStatus::PayoutInProgress, + }; + + new_test_ext().execute_with(|| { + EraValidations::::insert(cluster_id, era_id_1, &era_validation_1); + EraValidations::::insert(cluster_id, era_id_2, &era_validation_2); + + let mut result = Pallet::::get_era_for_payout(&cluster_id, status); + assert_eq!(result, Some((era_id_1, 0, 0))); + + result = + Pallet::::get_era_for_payout(&cluster_id, EraValidationStatus::PayoutSuccess); + assert_eq!(result, None); + }); +} + +#[test] +fn create_merkle_root_works() { + new_test_ext().execute_with(|| { + let a: ActivityHash = [0; 32]; + let b: ActivityHash = [1; 32]; + let c: ActivityHash = [2; 32]; + let d: ActivityHash = [3; 32]; + let e: ActivityHash = [4; 32]; + let cluster_id = ClusterId::default(); + let era_id_1 = 1; + + let leaves = vec![a, b, c, d, e]; + + let root = DdcVerification::create_merkle_root(&cluster_id, era_id_1, &leaves).unwrap(); + + assert_eq!( + root, + [ + 205, 34, 92, 22, 66, 39, 53, 146, 126, 111, 191, 174, 107, 224, 161, 127, 150, 69, + 255, 15, 237, 252, 116, 39, 186, 26, 40, 154, 180, 110, 185, 7 + ] + ); + }); +} + +#[test] +fn create_merkle_root_empty() { + new_test_ext().execute_with(|| { + let cluster_id = ClusterId::default(); + let era_id_1 = 1; + let leaves = Vec::::new(); + let root = DdcVerification::create_merkle_root(&cluster_id, era_id_1, &leaves).unwrap(); + + assert_eq!(root, ActivityHash::default()); + }); +} + +#[test] +fn proof_merkle_leaf_works() { + new_test_ext().execute_with(|| { + let a: ActivityHash = [0; 32]; + let b: ActivityHash = [1; 32]; + let c: ActivityHash = [2; 32]; + let d: ActivityHash = [3; 32]; + let e: ActivityHash = [4; 32]; + let f: ActivityHash = [5; 32]; + + let leaves = [a, b, c, d, e]; + let store = MemStore::default(); + let mut mmr: MMR> = + MemMMR::<_, MergeActivityHash>::new(0, &store); + let leaf_position_map: Vec<(ActivityHash, u64)> = + leaves.iter().map(|a| (*a, mmr.push(*a).unwrap())).collect(); + + let leaf_position: Vec<(u64, ActivityHash)> = leaf_position_map + .iter() + .filter(|&(l, _)| l == &c) + .map(|&(ref l, p)| (p, *l)) + .collect(); + let position: Vec = leaf_position.clone().into_iter().map(|(p, _)| p).collect(); + let root = mmr.get_root().unwrap(); + + assert_eq!(leaf_position.len(), 1); + assert_eq!(position.len(), 1); + assert!(DdcVerification::proof_merkle_leaf( + root, + &MMRProof { + mmr_size: mmr.mmr_size(), + proof: mmr.gen_proof(position.clone()).unwrap().proof_items().to_vec(), + leaf_with_position: leaf_position[0] + } + ) + .unwrap()); + + assert_noop!( + DdcVerification::proof_merkle_leaf( + root, + &MMRProof { + mmr_size: 0, + proof: mmr.gen_proof(position).unwrap().proof_items().to_vec(), + leaf_with_position: (6, f) + } + ), + Error::::FailToVerifyMerkleProof + ); + }); +} + +#[test] +fn test_single_ocw_pallet_integration() { + let mut ext = new_test_ext(); + let (offchain, offchain_state) = TestOffchainExt::new(); + let (pool, _pool_state) = TestTransactionPoolExt::new(); + + let (pair, _seed) = sp_core::sr25519::Pair::from_phrase( + "spider sell nice animal border success square soda stem charge caution echo", + None, + ) + .unwrap(); + let keystore = MemoryKeystore::new(); + keystore + .insert( + KEY_TYPE, + "0xb6186f80dce7190294665ab53860de2841383bb202c562bb8b81a624351fa318", + pair.public().as_ref(), + ) + .unwrap(); + + ext.register_extension(OffchainWorkerExt::new(offchain.clone())); + ext.register_extension(OffchainDbExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + ext.register_extension(KeystoreExt::new(keystore)); + + ext.execute_with(|| { + let mut offchain_state = offchain_state.write(); + let key = format!("offchain::validator::{:?}", KEY_TYPE).into_bytes(); + offchain_state.persistent_storage.set( + b"", + &key, + b"9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a".as_ref(), + ); + offchain_state.timestamp = Timestamp::from_unix_millis(0); + let host1 = "178.251.228.236"; + let host2 = "95.217.8.119"; + let host3 = "178.251.228.42"; + let host4 = "37.27.30.47"; + let host5 = "178.251.228.49"; + let host6 = "159.69.207.65"; + let host7 = "178.251.228.165"; + let host8 = "49.13.211.157"; + let host9 = "178.251.228.44"; + let port = 8080; + + let pending_request1 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host1, port), + response: Some(br#"[{"id":476814,"start":0,"end":1716533999999,"processing_time_ms":0,"total_records":0,"total_buckets":0},{"id":476815,"start":1716534000000,"end":1716537599999,"processing_time_ms":2,"total_records":54,"total_buckets":2},{"id":476816,"start":1716537600000,"end":1716541199999,"processing_time_ms":10,"total_records":803,"total_buckets":29},{"id":476817,"start":1716541200000,"end":1716544799999,"processing_time_ms":11,"total_records":986,"total_buckets":28}]"#.to_vec()), + sent: true, + ..Default::default() + }; + let pending_request2 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host2, port), + response: Some(br#"[{"id":476814,"start":0,"end":1716533999999,"processing_time_ms":0,"total_records":0,"total_buckets":0},{"id":476815,"start":1716534000000,"end":1716537599999,"processing_time_ms":2,"total_records":54,"total_buckets":2},{"id":476816,"start":1716537600000,"end":1716541199999,"processing_time_ms":10,"total_records":803,"total_buckets":29},{"id":476817,"start":1716541200000,"end":1716544799999,"processing_time_ms":11,"total_records":986,"total_buckets":28}]"#.to_vec()), + sent: true, + ..Default::default() + }; + let pending_request3 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host3, port), + response: Some(br#"[{"id":476814,"start":0,"end":1716533999999,"processing_time_ms":0,"total_records":0,"total_buckets":0},{"id":476815,"start":1716534000000,"end":1716537599999,"processing_time_ms":2,"total_records":54,"total_buckets":2},{"id":476816,"start":1716537600000,"end":1716541199999,"processing_time_ms":10,"total_records":803,"total_buckets":29},{"id":476817,"start":1716541200000,"end":1716544799999,"processing_time_ms":11,"total_records":986,"total_buckets":28}]"#.to_vec()), + sent: true, + ..Default::default() + }; + let pending_request4 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host4, port), + response: Some(br#"[{"id":476814,"start":0,"end":1716533999999,"processing_time_ms":0,"total_records":0,"total_buckets":0},{"id":476815,"start":1716534000000,"end":1716537599999,"processing_time_ms":2,"total_records":54,"total_buckets":2},{"id":476816,"start":1716537600000,"end":1716541199999,"processing_time_ms":10,"total_records":803,"total_buckets":29},{"id":476817,"start":1716541200000,"end":1716544799999,"processing_time_ms":11,"total_records":986,"total_buckets":28}]"#.to_vec()), + sent: true, + ..Default::default() + }; + let pending_request5 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host5, port), + response: Some(br#"[{"id":476814,"start":0,"end":1716533999999,"processing_time_ms":0,"total_records":0,"total_buckets":0},{"id":476815,"start":1716534000000,"end":1716537599999,"processing_time_ms":2,"total_records":54,"total_buckets":2},{"id":476816,"start":1716537600000,"end":1716541199999,"processing_time_ms":10,"total_records":803,"total_buckets":29},{"id":476817,"start":1716541200000,"end":1716544799999,"processing_time_ms":11,"total_records":986,"total_buckets":28}]"#.to_vec()), + sent: true, + ..Default::default() + }; + let pending_request6 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host6, port), + response: Some(br#"[{"id":476814,"start":0,"end":1716533999999,"processing_time_ms":0,"total_records":0,"total_buckets":0},{"id":476815,"start":1716534000000,"end":1716537599999,"processing_time_ms":2,"total_records":54,"total_buckets":2},{"id":476816,"start":1716537600000,"end":1716541199999,"processing_time_ms":10,"total_records":803,"total_buckets":29},{"id":476817,"start":1716541200000,"end":1716544799999,"processing_time_ms":11,"total_records":986,"total_buckets":28}]"#.to_vec()), + sent: true, + ..Default::default() + }; + let pending_request7 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host7, port), + response: Some(br#"[{"id":476814,"start":0,"end":1716533999999,"processing_time_ms":0,"total_records":0,"total_buckets":0},{"id":476815,"start":1716534000000,"end":1716537599999,"processing_time_ms":2,"total_records":54,"total_buckets":2},{"id":476816,"start":1716537600000,"end":1716541199999,"processing_time_ms":10,"total_records":803,"total_buckets":29},{"id":476817,"start":1716541200000,"end":1716544799999,"processing_time_ms":11,"total_records":986,"total_buckets":28}]"#.to_vec()), + sent: true, + ..Default::default() + }; + let pending_request8 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host8, port), + response: Some(br#"[{"id":476814,"start":0,"end":1716533999999,"processing_time_ms":0,"total_records":0,"total_buckets":0},{"id":476815,"start":1716534000000,"end":1716537599999,"processing_time_ms":2,"total_records":54,"total_buckets":2},{"id":476816,"start":1716537600000,"end":1716541199999,"processing_time_ms":10,"total_records":803,"total_buckets":29},{"id":476817,"start":1716541200000,"end":1716544799999,"processing_time_ms":11,"total_records":986,"total_buckets":28}]"#.to_vec()), + sent: true, + ..Default::default() + }; + let pending_request9 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/eras", host9, port), + response: Some(br#"[{"id":476814,"start":0,"end":1716533999999,"processing_time_ms":0,"total_records":0,"total_buckets":0},{"id":476815,"start":1716534000000,"end":1716537599999,"processing_time_ms":2,"total_records":54,"total_buckets":2},{"id":476816,"start":1716537600000,"end":1716541199999,"processing_time_ms":10,"total_records":803,"total_buckets":29},{"id":476817,"start":1716541200000,"end":1716544799999,"processing_time_ms":11,"total_records":986,"total_buckets":28}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + + let node_pending_request1 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/nodes?eraId=476814", host1, port), + response: Some(br#"[{"node_id": "0x48594f1fd4f05135914c42b03e63b61f6a3e4c537ccee3dbac555ef6df371b7e","provider_id": "0xf6a3e4c537ccee3dbac555ef6df371b7e48594f1fd4f05135914c42b03e63b61","stored_bytes": 675613289,"transferred_bytes": 1097091579,"number_of_puts": 889,"number_of_gets": 97},{"node_id": "0x9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a","provider_id": "0x8d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a9ef98ad9c3626ba725e7","stored_bytes": 0, "transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let node_pending_request2 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/nodes?eraId=476814", host2, port), + response: Some(br#"[{"node_id": "0x48594f1fd4f05135914c42b03e63b61f6a3e4c537ccee3dbac555ef6df371b7e","provider_id": "0xf6a3e4c537ccee3dbac555ef6df371b7e48594f1fd4f05135914c42b03e63b61","stored_bytes": 675613289,"transferred_bytes": 1097091579,"number_of_puts": 889,"number_of_gets": 97},{"node_id": "0x9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a","provider_id": "0x8d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a9ef98ad9c3626ba725e7","stored_bytes": 0, "transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let node_pending_request3 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/nodes?eraId=476814", host3, port), + response: Some(br#"[{"node_id": "0x48594f1fd4f05135914c42b03e63b61f6a3e4c537ccee3dbac555ef6df371b7e","provider_id": "0xf6a3e4c537ccee3dbac555ef6df371b7e48594f1fd4f05135914c42b03e63b61","stored_bytes": 675613289,"transferred_bytes": 1097091579,"number_of_puts": 889,"number_of_gets": 97},{"node_id": "0x9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a","provider_id": "0x8d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a9ef98ad9c3626ba725e7","stored_bytes": 0, "transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let node_pending_request4 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/nodes?eraId=476814", host4, port), + response: Some(br#"[{"node_id": "0x48594f1fd4f05135914c42b03e63b61f6a3e4c537ccee3dbac555ef6df371b7e","provider_id": "0xf6a3e4c537ccee3dbac555ef6df371b7e48594f1fd4f05135914c42b03e63b61","stored_bytes": 675613289,"transferred_bytes": 1097091579,"number_of_puts": 889,"number_of_gets": 97},{"node_id": "0x9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a","provider_id": "0x8d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a9ef98ad9c3626ba725e7","stored_bytes": 0, "transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let node_pending_request5 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/nodes?eraId=476814", host5, port), + response: Some(br#"[{"node_id": "0x48594f1fd4f05135914c42b03e63b61f6a3e4c537ccee3dbac555ef6df371b7e","provider_id": "0xf6a3e4c537ccee3dbac555ef6df371b7e48594f1fd4f05135914c42b03e63b61","stored_bytes": 675613289,"transferred_bytes": 1097091579,"number_of_puts": 889,"number_of_gets": 97},{"node_id": "0x9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a","provider_id": "0x8d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a9ef98ad9c3626ba725e7","stored_bytes": 0, "transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let node_pending_request6 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/nodes?eraId=476814", host6, port), + response: Some(br#"[{"node_id": "0x48594f1fd4f05135914c42b03e63b61f6a3e4c537ccee3dbac555ef6df371b7e","provider_id": "0xf6a3e4c537ccee3dbac555ef6df371b7e48594f1fd4f05135914c42b03e63b61","stored_bytes": 675613289,"transferred_bytes": 1097091579,"number_of_puts": 889,"number_of_gets": 97},{"node_id": "0x9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a","provider_id": "0x8d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a9ef98ad9c3626ba725e7","stored_bytes": 0, "transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let node_pending_request7 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/nodes?eraId=476814", host7, port), + response: Some(br#"[{"node_id": "0x48594f1fd4f05135914c42b03e63b61f6a3e4c537ccee3dbac555ef6df371b7e","provider_id": "0xf6a3e4c537ccee3dbac555ef6df371b7e48594f1fd4f05135914c42b03e63b61","stored_bytes": 675613289,"transferred_bytes": 1097091579,"number_of_puts": 889,"number_of_gets": 97},{"node_id": "0x9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a","provider_id": "0x8d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a9ef98ad9c3626ba725e7","stored_bytes": 0, "transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let node_pending_request8 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/nodes?eraId=476814", host8, port), + response: Some(br#"[{"node_id": "0x48594f1fd4f05135914c42b03e63b61f6a3e4c537ccee3dbac555ef6df371b7e","provider_id": "0xf6a3e4c537ccee3dbac555ef6df371b7e48594f1fd4f05135914c42b03e63b61","stored_bytes": 675613289,"transferred_bytes": 1097091579,"number_of_puts": 889,"number_of_gets": 97},{"node_id": "0x9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a","provider_id": "0x8d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a9ef98ad9c3626ba725e7","stored_bytes": 0, "transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let node_pending_request9 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/nodes?eraId=476814", host9, port), + response: Some(br#"[{"node_id": "0x48594f1fd4f05135914c42b03e63b61f6a3e4c537ccee3dbac555ef6df371b7e","provider_id": "0xf6a3e4c537ccee3dbac555ef6df371b7e48594f1fd4f05135914c42b03e63b61","stored_bytes": 675613289,"transferred_bytes": 1097091579,"number_of_puts": 889,"number_of_gets": 97},{"node_id": "0x9ef98ad9c3626ba725e78d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a","provider_id": "0x8d76cfcfc4b4d07e84f0388465bc7eb992e3e117234a9ef98ad9c3626ba725e7","stored_bytes": 0, "transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let bucket_pending_request1 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/buckets?eraId=476814", host1, port), + response: Some(br#"[{"bucket_id": 90235,"customer_id": "0xbe26b2458fb0c9df4ec26ec5ba083051402b2a3b9d4a7fe6106fe9f8b5efde2c","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1},{"bucket_id": 90236,"customer_id": "0x9cc588b1d749b6d727d665463641cfeb1c8c843e81faf468d21922d6296b6f45","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let bucket_pending_request2 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/buckets?eraId=476814", host2, port), + response: Some(br#"[{"bucket_id": 90235,"customer_id": "0xbe26b2458fb0c9df4ec26ec5ba083051402b2a3b9d4a7fe6106fe9f8b5efde2c","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1},{"bucket_id": 90236,"customer_id": "0x9cc588b1d749b6d727d665463641cfeb1c8c843e81faf468d21922d6296b6f45","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let bucket_pending_request3 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/buckets?eraId=476814", host3, port), + response: Some(br#"[{"bucket_id": 90235,"customer_id": "0xbe26b2458fb0c9df4ec26ec5ba083051402b2a3b9d4a7fe6106fe9f8b5efde2c","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1},{"bucket_id": 90236,"customer_id": "0x9cc588b1d749b6d727d665463641cfeb1c8c843e81faf468d21922d6296b6f45","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let bucket_pending_request4 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/buckets?eraId=476814", host4, port), + response: Some(br#"[{"bucket_id": 90235,"customer_id": "0xbe26b2458fb0c9df4ec26ec5ba083051402b2a3b9d4a7fe6106fe9f8b5efde2c","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1},{"bucket_id": 90236,"customer_id": "0x9cc588b1d749b6d727d665463641cfeb1c8c843e81faf468d21922d6296b6f45","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let bucket_pending_request5 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/buckets?eraId=476814", host5, port), + response: Some(br#"[{"bucket_id": 90235,"customer_id": "0xbe26b2458fb0c9df4ec26ec5ba083051402b2a3b9d4a7fe6106fe9f8b5efde2c","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1},{"bucket_id": 90236,"customer_id": "0x9cc588b1d749b6d727d665463641cfeb1c8c843e81faf468d21922d6296b6f45","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let bucket_pending_request6 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/buckets?eraId=476814", host6, port), + response: Some(br#"[{"bucket_id": 90235,"customer_id": "0xbe26b2458fb0c9df4ec26ec5ba083051402b2a3b9d4a7fe6106fe9f8b5efde2c","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1},{"bucket_id": 90236,"customer_id": "0x9cc588b1d749b6d727d665463641cfeb1c8c843e81faf468d21922d6296b6f45","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let bucket_pending_request7 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/buckets?eraId=476814", host7, port), + response: Some(br#"[{"bucket_id": 90235,"customer_id": "0xbe26b2458fb0c9df4ec26ec5ba083051402b2a3b9d4a7fe6106fe9f8b5efde2c","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1},{"bucket_id": 90236,"customer_id": "0x9cc588b1d749b6d727d665463641cfeb1c8c843e81faf468d21922d6296b6f45","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let bucket_pending_request8 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/buckets?eraId=476814", host8, port), + response: Some(br#"[{"bucket_id": 90235,"customer_id": "0xbe26b2458fb0c9df4ec26ec5ba083051402b2a3b9d4a7fe6106fe9f8b5efde2c","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1},{"bucket_id": 90236,"customer_id": "0x9cc588b1d749b6d727d665463641cfeb1c8c843e81faf468d21922d6296b6f45","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + let bucket_pending_request9 = PendingRequest { + method: "GET".to_string(), + uri: format!("http://{}:{}/activity/buckets?eraId=476814", host9, port), + response: Some(br#"[{"bucket_id": 90235,"customer_id": "0xbe26b2458fb0c9df4ec26ec5ba083051402b2a3b9d4a7fe6106fe9f8b5efde2c","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1},{"bucket_id": 90236,"customer_id": "0x9cc588b1d749b6d727d665463641cfeb1c8c843e81faf468d21922d6296b6f45","stored_bytes": 0,"transferred_bytes": 38,"number_of_puts": 0,"number_of_gets": 1}]"#.to_vec()), + sent: true, + ..Default::default() + }; + + offchain_state.expect_request(pending_request1); + offchain_state.expect_request(pending_request2); + offchain_state.expect_request(pending_request3); + offchain_state.expect_request(pending_request4); + offchain_state.expect_request(pending_request5); + offchain_state.expect_request(pending_request6); + offchain_state.expect_request(pending_request7); + offchain_state.expect_request(pending_request8); + offchain_state.expect_request(pending_request9); + offchain_state.expect_request(node_pending_request1); + offchain_state.expect_request(node_pending_request2); + offchain_state.expect_request(node_pending_request3); + offchain_state.expect_request(node_pending_request4); + offchain_state.expect_request(node_pending_request5); + offchain_state.expect_request(node_pending_request6); + offchain_state.expect_request(node_pending_request7); + offchain_state.expect_request(node_pending_request8); + offchain_state.expect_request(node_pending_request9); + offchain_state.expect_request(bucket_pending_request1); + offchain_state.expect_request(bucket_pending_request2); + offchain_state.expect_request(bucket_pending_request3); + offchain_state.expect_request(bucket_pending_request4); + offchain_state.expect_request(bucket_pending_request5); + offchain_state.expect_request(bucket_pending_request6); + offchain_state.expect_request(bucket_pending_request7); + offchain_state.expect_request(bucket_pending_request8); + offchain_state.expect_request(bucket_pending_request9); + drop(offchain_state); + + // // Offchain worker should be triggered if block number is divided by 100 + let block = 500; + System::set_block_number(block); + let cluster_id = ClusterId::from([12; 20]); + + ClusterToValidate::::put(cluster_id); + DdcVerification::offchain_worker(block); + }); +} diff --git a/pallets/ddc-verification/src/weights.rs b/pallets/ddc-verification/src/weights.rs new file mode 100644 index 000000000..6603465a4 --- /dev/null +++ b/pallets/ddc-verification/src/weights.rs @@ -0,0 +1,59 @@ +//! Autogenerated weights for pallet_ddc_verification +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2024-05-21, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! HOSTNAME: `192.168.1.4`, CPU: `` +//! EXECUTION: , WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 + +// Executed Command: +// ./target/release/cere +// benchmark +// pallet +// --chain +// dev +// --wasm-execution=compiled +// --pallet +// pallet_ddc_verification +// --extrinsic +// * +// --steps +// 50 +// --repeat +// 20 +// --output=./pallets/ddc-verification/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + +#![cfg_attr(rustfmt, rustfmt_skip)] +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_ddc_verification. +pub trait WeightInfo { + fn create_billing_reports() -> Weight; +} + +/// Weights for pallet_ddc_verification using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + // Storage: `DdcVerification::ActiveBillingReports` (r:1 w:1) + // Proof: `DdcVerification::ActiveBillingReports` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn create_billing_reports() -> Weight { + Weight::from_parts(11_000_000_u64, 0) + .saturating_add(T::DbWeight::get().reads(1_u64)) + .saturating_add(T::DbWeight::get().writes(1_u64)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + // Storage: `DdcVerification::ActiveBillingReports` (r:1 w:1) + // Proof: `DdcVerification::ActiveBillingReports` (`max_values`: None, `max_size`: None, mode: `Measured`) + fn create_billing_reports() -> Weight { + Weight::from_parts(11_000_000_u64, 0) + .saturating_add(RocksDbWeight::get().reads(1_u64)) + .saturating_add(RocksDbWeight::get().writes(1_u64)) + } +} diff --git a/primitives/Cargo.toml b/primitives/Cargo.toml index 7e1b48fd8..e89aebc6d 100644 --- a/primitives/Cargo.toml +++ b/primitives/Cargo.toml @@ -9,12 +9,14 @@ readme.workspace = true repository.workspace = true [dependencies] +blake2 = { workspace = true } codec = { workspace = true } -scale-info = { workspace = true } -serde = { workspace = true } - frame-support = { workspace = true } frame-system = { workspace = true } +polkadot-ckb-merkle-mountain-range = { workspace = true } +scale-info = { workspace = true } +serde = { workspace = true } +sp-application-crypto = { workspace = true } sp-core = { workspace = true } sp-runtime = { workspace = true } sp-std = { workspace = true } @@ -22,11 +24,14 @@ sp-std = { workspace = true } [features] default = ["std"] std = [ + "blake2/std", + "polkadot-ckb-merkle-mountain-range/std", "codec/std", "scale-info/std", "sp-core/std", "sp-std/std", "sp-runtime/std", + "sp-application-crypto/std", ] runtime-benchmarks = [ "frame-support/runtime-benchmarks", diff --git a/primitives/src/lib.rs b/primitives/src/lib.rs index ac3885855..6af0b6fa6 100644 --- a/primitives/src/lib.rs +++ b/primitives/src/lib.rs @@ -1,21 +1,50 @@ #![cfg_attr(not(feature = "std"), no_std)] +use blake2::{Blake2s256, Digest}; use codec::{Decode, Encode}; +use frame_support::parameter_types; +use polkadot_ckb_merkle_mountain_range::Merge; use scale_info::{prelude::vec::Vec, TypeInfo}; use serde::{Deserialize, Serialize}; -use sp_core::hash::H160; +use sp_core::{crypto::KeyTypeId, hash::H160}; use sp_runtime::{AccountId32, Perquintill, RuntimeDebug}; pub mod traits; +parameter_types! { + pub MaxHostLen: u8 = 255; + pub MaxDomainLen: u8 = 255; +} + +pub const MAX_PAYOUT_BATCH_COUNT: u16 = 1000; +pub const MAX_PAYOUT_BATCH_SIZE: u16 = 1000; pub const MILLICENTS: u128 = 100_000; pub const CENTS: u128 = 1_000 * MILLICENTS; // assume this is worth about a cent. pub const DOLLARS: u128 = 100 * CENTS; pub type ClusterId = H160; pub type DdcEra = u32; pub type BucketId = u64; -pub type StorageNodePubKey = AccountId32; pub type ClusterNodesCount = u16; +pub type StorageNodePubKey = AccountId32; +pub type ActivityHash = [u8; 32]; +pub type BatchIndex = u16; + +pub struct MergeActivityHash; +impl Merge for MergeActivityHash { + type Item = ActivityHash; + fn merge( + lhs: &Self::Item, // Left side of tree + rhs: &Self::Item, // Right side of tree + ) -> Result { + let mut hasher = Blake2s256::new(); + + hasher.update(lhs.as_slice()); + hasher.update(rhs.as_slice()); + let hash = hasher.finalize(); + + Ok(ActivityHash::from(sp_core::H256::from_slice(hash.as_slice()))) + } +} // ClusterParams includes Governance non-sensetive parameters only #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] @@ -115,6 +144,8 @@ pub enum StorageNodeMode { Storage = 2, /// DDC Storage node operates with enabled caching in RAM and doesn't store data in Hard Drive Cache = 3, + // DAC node + DAC = 4, } #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] @@ -187,3 +218,109 @@ pub struct ClusterNodesStats { pub validation_succeeded: ClusterNodesCount, pub validation_failed: ClusterNodesCount, } + +/// Stores usage of customers +#[derive( + PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo, Default, Clone, Serialize, Deserialize, +)] +pub struct CustomerUsage { + pub transferred_bytes: u64, + pub stored_bytes: u64, + pub number_of_puts: u64, + pub number_of_gets: u64, +} + +/// Stores usage of node provider +#[derive( + PartialEq, Eq, Encode, Decode, RuntimeDebug, TypeInfo, Default, Clone, Serialize, Deserialize, +)] +pub struct NodeUsage { + pub transferred_bytes: u64, + pub stored_bytes: u64, + pub number_of_puts: u64, + pub number_of_gets: u64, +} + +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Default)] +pub struct MMRProof { + pub mmr_size: u64, + pub proof: Vec, + pub leaf_with_position: (u64, ActivityHash), +} + +#[derive(Debug, PartialEq)] +pub enum NodeRepositoryError { + StorageNodeAlreadyExists, + StorageNodeDoesNotExist, +} + +#[derive(Debug, PartialEq)] +pub enum BucketVisitorError { + NoBucketWithId, + NotBucketOwner, +} + +#[derive(Debug, PartialEq)] +pub enum PayoutError { + BillingReportDoesNotExist, +} + +#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Default)] +// don't remove or change numbers, if needed add a new state to the end with new number +// DAC uses the state value for integration! +pub enum PayoutState { + #[default] + NotInitialized = 1, + Initialized = 2, + ChargingCustomers = 3, + CustomersChargedWithFees = 4, + RewardingProviders = 5, + ProvidersRewarded = 6, + Finalized = 7, +} + +pub const KEY_TYPE: KeyTypeId = KeyTypeId(*b"cer!"); + +pub mod sr25519 { + mod app_sr25519 { + use scale_info::prelude::string::String; + use sp_application_crypto::{app_crypto, sr25519}; + + use crate::KEY_TYPE; + app_crypto!(sr25519, KEY_TYPE); + } + + sp_application_crypto::with_pair! { + pub type AuthorityPair = app_sr25519::Pair; + } + pub type AuthoritySignature = app_sr25519::Signature; + pub type AuthorityId = app_sr25519::Public; +} + +pub mod crypto { + use scale_info::prelude::string::String; + use sp_core::sr25519::Signature as Sr25519Signature; + use sp_runtime::{ + app_crypto::{app_crypto, sr25519}, + traits::Verify, + MultiSignature, MultiSigner, + }; + + use super::KEY_TYPE; + app_crypto!(sr25519, KEY_TYPE); + pub struct OffchainIdentifierId; + impl frame_system::offchain::AppCrypto for OffchainIdentifierId { + type RuntimeAppPublic = Public; + type GenericSignature = sp_core::sr25519::Signature; + type GenericPublic = sp_core::sr25519::Public; + } + + // implemented for mock runtime in test + impl frame_system::offchain::AppCrypto<::Signer, Sr25519Signature> + for OffchainIdentifierId + { + type RuntimeAppPublic = Public; + type GenericSignature = sp_core::sr25519::Signature; + type GenericPublic = sp_core::sr25519::Public; + } +} diff --git a/primitives/src/traits/bucket.rs b/primitives/src/traits/bucket.rs new file mode 100644 index 000000000..908d005d0 --- /dev/null +++ b/primitives/src/traits/bucket.rs @@ -0,0 +1,20 @@ +use sp_runtime::DispatchResult; + +use crate::{BucketId, BucketVisitorError, ClusterId, CustomerUsage}; + +pub trait BucketManager { + fn inc_total_customer_usage( + cluster_id: &ClusterId, + bucket_id: BucketId, + content_owner: T::AccountId, + customer_usage: &CustomerUsage, + ) -> DispatchResult; +} + +pub trait BucketVisitor { + fn get_total_customer_usage( + cluster_id: &ClusterId, + bucket_id: BucketId, + content_owner: &T::AccountId, + ) -> Result, BucketVisitorError>; +} diff --git a/primitives/src/traits/cluster.rs b/primitives/src/traits/cluster.rs index df3024a1d..7bd0c64b8 100644 --- a/primitives/src/traits/cluster.rs +++ b/primitives/src/traits/cluster.rs @@ -1,5 +1,6 @@ use frame_system::{pallet_prelude::BlockNumberFor, Config}; use sp_runtime::{DispatchError, DispatchResult}; +use sp_std::prelude::*; use crate::{ ClusterBondingParams, ClusterFeesParams, ClusterId, ClusterNodeKind, ClusterNodeState, @@ -71,6 +72,8 @@ pub trait ClusterManager: ClusterQuery { validation_status: Option, ) -> bool; + fn get_nodes(cluster_id: &ClusterId) -> Result, DispatchError>; + fn add_node( cluster_id: &ClusterId, node_pub_key: &NodePubKey, diff --git a/primitives/src/traits/customer.rs b/primitives/src/traits/customer.rs index ac428137f..f4bf29cde 100644 --- a/primitives/src/traits/customer.rs +++ b/primitives/src/traits/customer.rs @@ -2,10 +2,15 @@ use core::u128; use sp_runtime::DispatchError; +use crate::{BucketId, ClusterId, CustomerUsage}; + pub trait CustomerCharger { fn charge_content_owner( + cluster_id: &ClusterId, + bucket_id: BucketId, content_owner: T::AccountId, billing_vault: T::AccountId, + customer_usage: &CustomerUsage, amount: u128, ) -> Result; } diff --git a/primitives/src/traits/mod.rs b/primitives/src/traits/mod.rs index 0ca1d74f9..31bce3b53 100644 --- a/primitives/src/traits/mod.rs +++ b/primitives/src/traits/mod.rs @@ -1,15 +1,19 @@ +pub mod bucket; pub mod cluster; pub mod cluster_gov; pub mod customer; pub mod node; pub mod pallet; +pub mod payout; pub mod staking; pub mod validator; +pub use bucket::*; pub use cluster::*; pub use cluster_gov::*; pub use customer::*; pub use node::*; pub use pallet::*; +pub use payout::*; pub use staking::*; pub use validator::*; diff --git a/primitives/src/traits/node.rs b/primitives/src/traits/node.rs index 03976bfe3..fe51dd6fb 100644 --- a/primitives/src/traits/node.rs +++ b/primitives/src/traits/node.rs @@ -8,6 +8,7 @@ pub trait NodeVisitor { fn get_cluster_id(node_pub_key: &NodePubKey) -> Result, DispatchError>; fn exists(node_pub_key: &NodePubKey) -> bool; fn get_node_provider_id(node_pub_key: &NodePubKey) -> Result; + fn get_node_params(node_pub_key: &NodePubKey) -> Result; } pub trait NodeCreator { diff --git a/primitives/src/traits/payout.rs b/primitives/src/traits/payout.rs new file mode 100644 index 000000000..e8c6a6351 --- /dev/null +++ b/primitives/src/traits/payout.rs @@ -0,0 +1,93 @@ +use sp_runtime::DispatchResult; + +use crate::{ + BatchIndex, BucketId, ClusterId, CustomerUsage, DdcEra, MMRProof, NodeUsage, PayoutError, + PayoutState, +}; + +pub trait PayoutProcessor {} + +pub trait PayoutVisitor { + // todo! factor out into PayoutProcessor + fn begin_billing_report( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + start_era: i64, + end_era: i64, + ) -> DispatchResult; + + // todo! factor out into PayoutProcessor + fn begin_charging_customers( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + max_batch_index: BatchIndex, + ) -> DispatchResult; + + // todo! factor out into PayoutProcessor + fn send_charging_customers_batch( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + batch_index: BatchIndex, + payers: &[(T::AccountId, BucketId, CustomerUsage)], + batch_proof: MMRProof, + ) -> DispatchResult; + + // todo! factor out into PayoutProcessor + fn end_charging_customers( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + ) -> DispatchResult; + + // todo! factor out into PayoutProcessor + fn begin_rewarding_providers( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + max_batch_index: BatchIndex, + total_node_usage: NodeUsage, + ) -> DispatchResult; + + // todo! factor out into PayoutProcessor + fn send_rewarding_providers_batch( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + batch_index: BatchIndex, + payees: &[(T::AccountId, NodeUsage)], + batch_proof: MMRProof, + ) -> DispatchResult; + + // todo! factor out into PayoutProcessor + fn end_rewarding_providers( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + ) -> DispatchResult; + + // todo! factor out into PayoutProcessor + fn end_billing_report( + origin: T::AccountId, + cluster_id: ClusterId, + era_id: DdcEra, + ) -> DispatchResult; + + fn get_billing_report_status(cluster_id: &ClusterId, era: DdcEra) -> PayoutState; + + fn all_customer_batches_processed(cluster_id: &ClusterId, era_id: DdcEra) -> bool; + + fn all_provider_batches_processed(cluster_id: &ClusterId, era_id: DdcEra) -> bool; + + fn get_next_customer_batch_for_payment( + cluster_id: &ClusterId, + era_id: DdcEra, + ) -> Result, PayoutError>; + + fn get_next_provider_batch_for_payment( + cluster_id: &ClusterId, + era_id: DdcEra, + ) -> Result, PayoutError>; +} diff --git a/primitives/src/traits/staking.rs b/primitives/src/traits/staking.rs index 68a2dd668..62d81ea4a 100644 --- a/primitives/src/traits/staking.rs +++ b/primitives/src/traits/staking.rs @@ -12,6 +12,8 @@ pub trait StakingVisitor { fn has_stake(node_pub_key: &NodePubKey) -> bool; fn has_chilling_attempt(node_pub_key: &NodePubKey) -> Result; + + fn stash_by_ctrl(controller: &T::AccountId) -> Result; } pub trait StakerCreator { @@ -33,4 +35,5 @@ pub trait StakerCreator { pub enum StakingVisitorError { NodeStakeDoesNotExist, NodeStakeIsInBadState, + ControllerDoesNotExist, } diff --git a/primitives/src/traits/validator.rs b/primitives/src/traits/validator.rs index af7e29dc1..0dbc2a73a 100644 --- a/primitives/src/traits/validator.rs +++ b/primitives/src/traits/validator.rs @@ -1,6 +1,23 @@ use frame_system::Config; use sp_std::prelude::*; +use crate::{BatchIndex, BucketId, ClusterId, CustomerUsage, DdcEra, MMRProof, NodeUsage}; + pub trait ValidatorVisitor { - fn get_active_validators() -> Vec; + fn setup_validators(validators: Vec); + fn is_ocw_validator(caller: T::AccountId) -> bool; + fn is_customers_batch_valid( + cluster_id: ClusterId, + era: DdcEra, + batch_index: BatchIndex, + payers: &[(T::AccountId, BucketId, CustomerUsage)], + batch_proof: &MMRProof, + ) -> bool; + fn is_providers_batch_valid( + cluster_id: ClusterId, + era: DdcEra, + batch_index: BatchIndex, + payees: &[(T::AccountId, NodeUsage)], + batch_proof: &MMRProof, + ) -> bool; } diff --git a/runtime/cere-dev/Cargo.toml b/runtime/cere-dev/Cargo.toml index 7cbd06a64..f33bc325d 100644 --- a/runtime/cere-dev/Cargo.toml +++ b/runtime/cere-dev/Cargo.toml @@ -102,6 +102,7 @@ pallet-ddc-customers = { workspace = true } pallet-ddc-nodes = { workspace = true } pallet-ddc-payouts = { workspace = true } pallet-ddc-staking = { workspace = true } +pallet-ddc-verification = { workspace = true } pallet-erc20 = { workspace = true } pallet-erc721 = { workspace = true } pallet-origins = { workspace = true } @@ -186,6 +187,7 @@ std = [ "pallet-ddc-staking/std", "pallet-ddc-customers/std", "pallet-ddc-clusters/std", + "pallet-ddc-verification/std", "cere-runtime-common/std", "pallet-conviction-voting/std", "pallet-referenda/std", @@ -245,6 +247,7 @@ runtime-benchmarks = [ "pallet-preimage/runtime-benchmarks", "pallet-ddc-clusters-gov/runtime-benchmarks", "pallet-origins/runtime-benchmarks", + "pallet-ddc-verification/runtime-benchmarks", ] try-runtime = [ "frame-executive/try-runtime", @@ -299,4 +302,5 @@ try-runtime = [ "pallet-preimage/try-runtime", "pallet-origins/try-runtime", "pallet-ddc-clusters-gov/try-runtime", + "pallet-ddc-verification/try-runtime", ] diff --git a/runtime/cere-dev/src/lib.rs b/runtime/cere-dev/src/lib.rs index 49ed42cdd..11f672d2e 100644 --- a/runtime/cere-dev/src/lib.rs +++ b/runtime/cere-dev/src/lib.rs @@ -23,7 +23,10 @@ #![recursion_limit = "256"] use codec::{Decode, Encode, MaxEncodedLen}; -use ddc_primitives::traits::pallet::{GetDdcOrigin, PalletVisitor}; +use ddc_primitives::{ + traits::pallet::{GetDdcOrigin, PalletVisitor}, + MAX_PAYOUT_BATCH_COUNT, MAX_PAYOUT_BATCH_SIZE, +}; use frame_election_provider_support::{ bounds::ElectionBoundsBuilder, onchain, BalancingConfig, SequentialPhragmen, VoteWeight, }; @@ -76,7 +79,7 @@ pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdj use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; use sp_api::impl_runtime_apis; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H256}; use sp_inherents::{CheckInherentsResult, InherentData}; use sp_io::hashing::blake2_128; #[cfg(any(feature = "std", test))] @@ -115,6 +118,7 @@ use governance::{ ClusterProtocolActivator, ClusterProtocolUpdater, GeneralAdmin, StakingAdmin, Treasurer, TreasurySpender, }; + /// Generated voter bag information. mod voter_bags; @@ -142,7 +146,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 54004, + spec_version: 54100, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 18, @@ -491,15 +495,41 @@ impl pallet_authorship::Config for Runtime { type EventHandler = (Staking, ImOnline); } +impl_opaque_keys! { + pub struct OldSessionKeys { + pub grandpa: Grandpa, + pub babe: Babe, + pub im_online: ImOnline, + pub authority_discovery: AuthorityDiscovery, + } +} + impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, pub babe: Babe, pub im_online: ImOnline, pub authority_discovery: AuthorityDiscovery, + pub ddc_verification: DdcVerification, } } +fn transform_session_keys(v: AccountId, old: OldSessionKeys) -> SessionKeys { + SessionKeys { + grandpa: old.grandpa, + babe: old.babe, + im_online: old.im_online, + authority_discovery: old.authority_discovery, + ddc_verification: { + let mut id: ddc_primitives::sr25519::AuthorityId = + sp_core::sr25519::Public::from_raw([0u8; 32]).into(); + let id_raw: &mut [u8] = id.as_mut(); + id_raw[0..32].copy_from_slice(v.as_ref()); + id_raw[0..4].copy_from_slice(b"cer!"); + id + }, + } +} impl pallet_session::Config for Runtime { type RuntimeEvent = RuntimeEvent; type ValidatorId = ::AccountId; @@ -1176,6 +1206,7 @@ impl pallet_ddc_payouts::Config for Runtime { type PalletId = PayoutsPalletId; type Currency = Balances; type CustomerCharger = DdcCustomers; + type BucketVisitor = DdcCustomers; type CustomerDepositor = DdcCustomers; type ClusterProtocol = DdcClusters; type TreasuryVisitor = TreasuryWrapper; @@ -1183,6 +1214,7 @@ impl pallet_ddc_payouts::Config for Runtime { type ClusterCreator = DdcClusters; type WeightInfo = pallet_ddc_payouts::weights::SubstrateWeight; type VoteScoreToU64 = IdentityConvert; // used for UseNominatorsAndValidatorsMap + type ValidatorVisitor = pallet_ddc_verification::Pallet; } parameter_types! { @@ -1255,6 +1287,28 @@ impl, T: frame_system::Config> GetDdcOrigin } } +parameter_types! { + pub const VerificationPalletId: PalletId = PalletId(*b"verifypa"); +} +impl pallet_ddc_verification::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = VerificationPalletId; + type WeightInfo = pallet_ddc_verification::weights::SubstrateWeight; + type ClusterManager = pallet_ddc_clusters::Pallet; + type NodeVisitor = pallet_ddc_nodes::Pallet; + type PayoutVisitor = pallet_ddc_payouts::Pallet; + type AuthorityId = ddc_primitives::sr25519::AuthorityId; + type OffchainIdentifierId = ddc_primitives::crypto::OffchainIdentifierId; + type ActivityHasher = BlakeTwo256; + const MAJORITY: u8 = 67; + const BLOCK_TO_START: u16 = 100; // every 100 blocks + const MIN_DAC_NODES_FOR_CONSENSUS: u16 = 3; + const MAX_PAYOUT_BATCH_SIZE: u16 = MAX_PAYOUT_BATCH_SIZE; + const MAX_PAYOUT_BATCH_COUNT: u16 = MAX_PAYOUT_BATCH_COUNT; + type ActivityHash = H256; + type StakingVisitor = pallet_staking::Pallet; +} + construct_runtime!( pub struct Runtime { @@ -1300,6 +1354,7 @@ construct_runtime!( DdcNodes: pallet_ddc_nodes, DdcClusters: pallet_ddc_clusters, DdcPayouts: pallet_ddc_payouts, + DdcVerification: pallet_ddc_verification, // Start OpenGov. ConvictionVoting: pallet_conviction_voting::{Pallet, Call, Storage, Event}, Referenda: pallet_referenda::{Pallet, Call, Storage, Event}, @@ -1346,7 +1401,7 @@ pub type SignedPayload = generic::SignedPayload; pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Runtime migrations -type Migrations = (); +type Migrations = (pallet_ddc_customers::migration::MigrateToV2, migrations::Unreleased); /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< @@ -1358,6 +1413,22 @@ pub type Executive = frame_executive::Executive< Migrations, >; +pub mod migrations { + use super::*; + + /// When this is removed, should also remove `OldSessionKeys`. + pub struct UpgradeSessionKeys; + impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { + fn on_runtime_upgrade() -> Weight { + Session::upgrade_keys::(transform_session_keys); + Perbill::from_percent(50) * RuntimeBlockWeights::get().max_block + } + } + + /// Unreleased migrations. Add new ones here: + pub type Unreleased = (UpgradeSessionKeys,); +} + type EventRecord = frame_system::EventRecord< ::RuntimeEvent, ::Hash, diff --git a/runtime/cere/Cargo.toml b/runtime/cere/Cargo.toml index e8f295a15..cd536a347 100644 --- a/runtime/cere/Cargo.toml +++ b/runtime/cere/Cargo.toml @@ -100,6 +100,7 @@ pallet-ddc-customers = { workspace = true } pallet-ddc-nodes = { workspace = true } pallet-ddc-payouts = { workspace = true } pallet-ddc-staking = { workspace = true } +pallet-ddc-verification = { workspace = true } pallet-erc20 = { workspace = true } pallet-erc721 = { workspace = true } pallet-origins = { workspace = true } @@ -184,6 +185,7 @@ std = [ "pallet-ddc-nodes/std", "pallet-ddc-payouts/std", "pallet-ddc-staking/std", + "pallet-ddc-verification/std", "cere-runtime-common/std", "sp-arithmetic/std", "pallet-conviction-voting/std", @@ -243,6 +245,7 @@ runtime-benchmarks = [ "pallet-preimage/runtime-benchmarks", "pallet-ddc-clusters-gov/runtime-benchmarks", "pallet-origins/runtime-benchmarks", + "pallet-ddc-verification/runtime-benchmarks", ] try-runtime = [ "frame-executive/try-runtime", @@ -297,4 +300,5 @@ try-runtime = [ "pallet-preimage/try-runtime", "pallet-origins/try-runtime", "pallet-ddc-clusters-gov/try-runtime", + "pallet-ddc-verification/try-runtime", ] diff --git a/runtime/cere/constants/src/lib.rs b/runtime/cere/constants/src/lib.rs new file mode 100644 index 000000000..21a3936b4 --- /dev/null +++ b/runtime/cere/constants/src/lib.rs @@ -0,0 +1,78 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2022 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A set of constant values used in substrate runtime. + +#![cfg_attr(not(feature = "std"), no_std)] + +/// Money matters. +pub mod currency { + use node_primitives::Balance; + + pub const MILLICENTS: Balance = 100_000; + pub const CENTS: Balance = 1_000 * MILLICENTS; // assume this is worth about a cent. + pub const DOLLARS: Balance = 100 * CENTS; + pub const GRAND: Balance = DOLLARS * 1_000; + + pub const fn deposit(items: u32, bytes: u32) -> Balance { + items as Balance * 15 * CENTS + (bytes as Balance) * 6 * CENTS + } +} + +/// Time. +pub mod time { + use node_primitives::{BlockNumber, Moment}; + + /// Since BABE is probabilistic this is the average expected block time that + /// we are targeting. Blocks will be produced at a minimum duration defined + /// by `SLOT_DURATION`, but some slots will not be allocated to any + /// authority and hence no block will be produced. We expect to have this + /// block time on average following the defined slot duration and the value + /// of `c` configured for BABE (where `1 - c` represents the probability of + /// a slot being empty). + /// This value is only used indirectly to define the unit constants below + /// that are expressed in blocks. The rest of the code should use + /// `SLOT_DURATION` instead (like the Timestamp pallet for calculating the + /// minimum period). + /// + /// If using BABE with secondary slots (default) then all of the slots will + /// always be assigned, in which case `MILLISECS_PER_BLOCK` and + /// `SLOT_DURATION` should have the same value. + /// + /// + pub const MILLISECS_PER_BLOCK: Moment = 6000; + pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000; + + // NOTE: Currently it is not possible to change the slot duration after the chain has started. + // Attempting to do so will brick block production. + pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; + + // 1 in 4 blocks (on average, not counting collisions) will be primary BABE blocks. + pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); + + pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 4 * HOURS; + pub const EPOCH_DURATION_IN_SLOTS: u64 = { + const SLOT_FILL_RATE: f64 = MILLISECS_PER_BLOCK as f64 / SLOT_DURATION as f64; + + (EPOCH_DURATION_IN_BLOCKS as f64 * SLOT_FILL_RATE) as u64 + }; + + // These time units are defined in number of blocks. + pub const MINUTES: BlockNumber = 60 / (SECS_PER_BLOCK as BlockNumber); + pub const HOURS: BlockNumber = MINUTES * 60; + pub const DAYS: BlockNumber = HOURS * 24; +} diff --git a/runtime/cere/src/lib.rs b/runtime/cere/src/lib.rs index c7a64bb37..4b69773cd 100644 --- a/runtime/cere/src/lib.rs +++ b/runtime/cere/src/lib.rs @@ -22,7 +22,10 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] use codec::{Decode, Encode, MaxEncodedLen}; -use ddc_primitives::traits::pallet::PalletVisitor; +use ddc_primitives::{ + traits::pallet::{GetDdcOrigin, PalletVisitor}, + MAX_PAYOUT_BATCH_COUNT, MAX_PAYOUT_BATCH_SIZE, +}; use frame_election_provider_support::{ bounds::ElectionBoundsBuilder, onchain, BalancingConfig, SequentialPhragmen, VoteWeight, }; @@ -70,7 +73,7 @@ pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdj use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; use sp_api::impl_runtime_apis; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use sp_core::{crypto::KeyTypeId, OpaqueMetadata}; +use sp_core::{crypto::KeyTypeId, OpaqueMetadata, H256}; use sp_inherents::{CheckInherentsResult, InherentData}; use sp_io::hashing::blake2_128; #[cfg(any(feature = "std", test))] @@ -100,10 +103,10 @@ use cere_runtime_common::{ constants::{currency::*, time::*}, CurrencyToVote, }; -use ddc_primitives::traits::GetDdcOrigin; use impls::Author; use sp_runtime::generic::Era; use sp_std::marker::PhantomData; + // Governance configurations. pub mod governance; use governance::{ @@ -137,7 +140,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 54004, + spec_version: 54100, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 18, @@ -486,12 +489,39 @@ impl pallet_authorship::Config for Runtime { type EventHandler = (Staking, ImOnline); } +impl_opaque_keys! { + pub struct OldSessionKeys { + pub grandpa: Grandpa, + pub babe: Babe, + pub im_online: ImOnline, + pub authority_discovery: AuthorityDiscovery, + } +} + impl_opaque_keys! { pub struct SessionKeys { pub grandpa: Grandpa, pub babe: Babe, pub im_online: ImOnline, pub authority_discovery: AuthorityDiscovery, + pub ddc_verification: DdcVerification, + } +} + +fn transform_session_keys(v: AccountId, old: OldSessionKeys) -> SessionKeys { + SessionKeys { + grandpa: old.grandpa, + babe: old.babe, + im_online: old.im_online, + authority_discovery: old.authority_discovery, + ddc_verification: { + let mut id: ddc_primitives::sr25519::AuthorityId = + sp_core::sr25519::Public::from_raw([0u8; 32]).into(); + let id_raw: &mut [u8] = id.as_mut(); + id_raw[0..32].copy_from_slice(v.as_ref()); + id_raw[0..4].copy_from_slice(b"cer!"); + id + }, } } @@ -1165,13 +1195,15 @@ impl pallet_ddc_payouts::Config for Runtime { type PalletId = PayoutsPalletId; type Currency = Balances; type CustomerCharger = DdcCustomers; + type BucketVisitor = DdcCustomers; type CustomerDepositor = DdcCustomers; type ClusterProtocol = DdcClusters; type TreasuryVisitor = TreasuryWrapper; type NominatorsAndValidatorsList = pallet_staking::UseNominatorsAndValidatorsMap; type ClusterCreator = DdcClusters; type WeightInfo = pallet_ddc_payouts::weights::SubstrateWeight; - type VoteScoreToU64 = IdentityConvert; // used for UseNominatorsAndValidatorsMap + type VoteScoreToU64 = IdentityConvert; + type ValidatorVisitor = pallet_ddc_verification::Pallet; } parameter_types! { @@ -1262,6 +1294,29 @@ impl, T: frame_system::Config> GetDdcOrigin } } +parameter_types! { + pub const VerificationPalletId: PalletId = PalletId(*b"verifypa"); +} + +impl pallet_ddc_verification::Config for Runtime { + type RuntimeEvent = RuntimeEvent; + type PalletId = VerificationPalletId; + type WeightInfo = pallet_ddc_verification::weights::SubstrateWeight; + type ClusterManager = pallet_ddc_clusters::Pallet; + type NodeVisitor = pallet_ddc_nodes::Pallet; + type PayoutVisitor = pallet_ddc_payouts::Pallet; + type AuthorityId = ddc_primitives::sr25519::AuthorityId; + type OffchainIdentifierId = ddc_primitives::crypto::OffchainIdentifierId; + type ActivityHasher = BlakeTwo256; + const MAJORITY: u8 = 67; + const BLOCK_TO_START: u16 = 100; // every 100 blocks + const MIN_DAC_NODES_FOR_CONSENSUS: u16 = 3; + const MAX_PAYOUT_BATCH_SIZE: u16 = MAX_PAYOUT_BATCH_SIZE; + const MAX_PAYOUT_BATCH_COUNT: u16 = MAX_PAYOUT_BATCH_COUNT; + type ActivityHash = H256; + type StakingVisitor = pallet_staking::Pallet; +} + construct_runtime!( pub struct Runtime { @@ -1315,6 +1370,7 @@ construct_runtime!( // End OpenGov. TechComm: pallet_collective::, DdcClustersGov: pallet_ddc_clusters_gov, + DdcVerification: pallet_ddc_verification, } ); @@ -1356,8 +1412,25 @@ pub type CheckedExtrinsic = generic::CheckedExtrinsic, pallet_ddc_staking::migrations::v1::MigrateToV1, + pallet_ddc_customers::migration::MigrateToV2, + migrations::Unreleased, ); +pub mod migrations { + use super::*; + + /// When this is removed, should also remove `OldSessionKeys`. + pub struct UpgradeSessionKeys; + impl frame_support::traits::OnRuntimeUpgrade for UpgradeSessionKeys { + fn on_runtime_upgrade() -> Weight { + Session::upgrade_keys::(transform_session_keys); + Perbill::from_percent(50) * RuntimeBlockWeights::get().max_block + } + } + + /// Unreleased migrations. Add new ones here: + pub type Unreleased = (UpgradeSessionKeys,); +} /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< Runtime, diff --git a/scripts/start_validator1.sh b/scripts/start_validator1.sh new file mode 100644 index 000000000..601a97554 --- /dev/null +++ b/scripts/start_validator1.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +../target/release/cere \ +--base-path /tmp/alice \ +--chain local \ +--alice \ +--port 30333 \ +--rpc-port 9945 \ +--telemetry-url "wss://telemetry.polkadot.io/submit/ 0" \ +--validator diff --git a/scripts/start_validator2.sh b/scripts/start_validator2.sh new file mode 100644 index 000000000..439b3f86d --- /dev/null +++ b/scripts/start_validator2.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +../target/release/cere \ +--base-path /tmp/bob \ +--chain local \ +--bob \ +--port 30334 \ +--rpc-port 9946 \ +--telemetry-url "wss://telemetry.polkadot.io/submit/ 0" \ +--validator diff --git a/scripts/start_validator3.sh b/scripts/start_validator3.sh new file mode 100644 index 000000000..0c5acd01a --- /dev/null +++ b/scripts/start_validator3.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +../target/release/cere \ +--base-path /tmp/charlie \ +--chain local \ +--charlie \ +--port 30334 \ +--rpc-port 9947 \ +--telemetry-url "wss://telemetry.polkadot.io/submit/ 0" \ +--validator