From be79416b8982836e4579843df99f6df1cad96985 Mon Sep 17 00:00:00 2001 From: girazoki Date: Fri, 22 Nov 2024 15:58:12 +0100 Subject: [PATCH] Container candidates in dev service (#747) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * wip * wip * add flume for messages * wip * wip * test related to validator rewards * toml· * clippy * linting * more lint * start cleaning * work * pr comm * remove logs --- Cargo.lock | 2 + .../node/tanssi-relay-service/Cargo.toml | 2 + .../node/tanssi-relay-service/src/dev_rpcs.rs | 81 ++++ .../tanssi-relay-service/src/dev_service.rs | 448 ++++++++++++++++-- .../node/tanssi-relay-service/src/lib.rs | 2 + .../test_external_validator_rewards.ts | 67 +++ .../test_paras_candidate_inherent.ts | 41 ++ ...lashes_are_removed_after_bonding_period.ts | 4 +- test/util/relayInterface.ts | 10 +- 9 files changed, 607 insertions(+), 50 deletions(-) create mode 100644 solo-chains/node/tanssi-relay-service/src/dev_rpcs.rs create mode 100644 test/suites/dev-tanssi-relay/external-validators-rewards/test_external_validator_rewards.ts create mode 100644 test/suites/dev-tanssi-relay/paras-candidate-inherent/test_paras_candidate_inherent.ts diff --git a/Cargo.lock b/Cargo.lock index 19abd6ad5..ab1d18e3d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -17506,6 +17506,7 @@ dependencies = [ "dancelight-runtime-constants", "dp-container-chain-genesis-data", "env_logger 0.11.3", + "flume 0.10.14", "frame-benchmarking", "frame-benchmarking-cli", "frame-support", @@ -17599,6 +17600,7 @@ dependencies = [ "sp-block-builder", "sp-blockchain", "sp-consensus", + "sp-consensus-aura", "sp-consensus-babe", "sp-consensus-beefy", "sp-core", diff --git a/solo-chains/node/tanssi-relay-service/Cargo.toml b/solo-chains/node/tanssi-relay-service/Cargo.toml index f088ac744..49dbf03dd 100644 --- a/solo-chains/node/tanssi-relay-service/Cargo.toml +++ b/solo-chains/node/tanssi-relay-service/Cargo.toml @@ -47,6 +47,7 @@ sp-api = { workspace = true } sp-authority-discovery = { workspace = true } sp-block-builder = { workspace = true } sp-blockchain = { workspace = true } +sp-consensus-aura = { workspace = true } sp-consensus-babe = { workspace = true } sp-core = { workspace = true, features = [ "std" ] } sp-inherents = { workspace = true, features = [ "std" ] } @@ -81,6 +82,7 @@ async-io = { workspace = true } async-trait = { workspace = true } bitvec = { workspace = true, optional = true } codec = { workspace = true } +flume = { workspace = true } futures = { workspace = true } gum = { workspace = true } hex-literal = { workspace = true } diff --git a/solo-chains/node/tanssi-relay-service/src/dev_rpcs.rs b/solo-chains/node/tanssi-relay-service/src/dev_rpcs.rs new file mode 100644 index 000000000..264670ca8 --- /dev/null +++ b/solo-chains/node/tanssi-relay-service/src/dev_rpcs.rs @@ -0,0 +1,81 @@ +// Copyright (C) Moondance Labs Ltd. +// This file is part of Tanssi. + +// Tanssi is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Tanssi is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Tanssi. If not, see + +//! Development Polkadot service. Adapted from `polkadot_service` crate +//! and removed un-necessary components which are not required in dev node. + +use codec::Encode; +use jsonrpsee::{ + core::RpcResult, + proc_macros::rpc, + types::{ + error::{INTERNAL_ERROR_CODE, INTERNAL_ERROR_MSG}, + ErrorObjectOwned, + }, +}; + +/// This RPC interface is used to provide methods in dev mode only +#[rpc(server)] +#[jsonrpsee::core::async_trait] +pub trait DevApi { + /// Indicate the mock parachain candidate insertion to be active + #[method(name = "mock_enableParaInherentCandidate")] + async fn enable_para_inherent_candidate(&self) -> RpcResult<()>; + + /// Indicate the mock parachain candidate insertion to be disabled + #[method(name = "mock_disableParaInherentCandidate")] + async fn disable_para_inherent_candidate(&self) -> RpcResult<()>; +} + +pub struct DevRpc { + pub mock_para_inherent_channel: flume::Sender>, +} + +#[jsonrpsee::core::async_trait] +impl DevApiServer for DevRpc { + async fn enable_para_inherent_candidate(&self) -> RpcResult<()> { + let mock_para_inherent_channel = self.mock_para_inherent_channel.clone(); + // Push the message to the shared channel where it will be queued up + // to be injected in to an upcoming block. + mock_para_inherent_channel + .send_async(true.encode()) + .await + .map_err(|err| internal_err(err.to_string()))?; + + Ok(()) + } + + async fn disable_para_inherent_candidate(&self) -> RpcResult<()> { + let mock_para_inherent_channel = self.mock_para_inherent_channel.clone(); + // Push the message to the shared channel where it will be queued up + // to be injected in to an upcoming block. + mock_para_inherent_channel + .send_async(false.encode()) + .await + .map_err(|err| internal_err(err.to_string()))?; + + Ok(()) + } +} + +// This bit cribbed from frontier. +pub fn internal_err(message: T) -> ErrorObjectOwned { + ErrorObjectOwned::owned( + INTERNAL_ERROR_CODE, + INTERNAL_ERROR_MSG, + Some(message.to_string()), + ) +} diff --git a/solo-chains/node/tanssi-relay-service/src/dev_service.rs b/solo-chains/node/tanssi-relay-service/src/dev_service.rs index bd12767b2..b8c355b66 100644 --- a/solo-chains/node/tanssi-relay-service/src/dev_service.rs +++ b/solo-chains/node/tanssi-relay-service/src/dev_service.rs @@ -31,6 +31,7 @@ //! by incrementing timestamp by slot duration. use { + crate::dev_rpcs::{DevApiServer, DevRpc}, async_io::Timer, babe::{BabeBlockImport, BabeLink}, codec::{Decode, Encode}, @@ -42,7 +43,12 @@ use { polkadot_core_primitives::{AccountId, Balance, Block, Hash, Nonce}, polkadot_node_core_parachains_inherent::Error as InherentError, polkadot_overseer::Handle, - polkadot_primitives::InherentData as ParachainsInherentData, + polkadot_primitives::{ + runtime_api::ParachainHost, BackedCandidate, CandidateCommitments, CandidateDescriptor, + CollatorPair, CommittedCandidateReceipt, CompactStatement, EncodeAs, + InherentData as ParachainsInherentData, OccupiedCoreAssumption, SigningContext, + ValidityAttestation, + }, polkadot_rpc::{DenyUnsafe, RpcExtension}, polkadot_service::{ BlockT, Error, IdentifyVariant, NewFullParams, OverseerGen, SelectRelayChain, @@ -54,16 +60,24 @@ use { run_manual_seal, EngineCommand, ManualSealParams, }, sc_executor::{HeapAllocStrategy, WasmExecutor, DEFAULT_HEAP_ALLOC_STRATEGY}, + sc_keystore::Keystore, sc_transaction_pool_api::{OffchainTransactionPoolFactory, TransactionPool}, service::{Configuration, KeystoreContainer, RpcHandlers, TaskManager}, sp_api::ProvideRuntimeApi, sp_block_builder::BlockBuilder, sp_blockchain::{HeaderBackend, HeaderMetadata}, + sp_consensus_aura::{inherents::InherentType as AuraInherentType, AURA_ENGINE_ID}, sp_consensus_babe::SlotDuration, + sp_core::{ByteArray, Pair, H256}, + sp_keystore::KeystorePtr, + sp_runtime::{traits::BlakeTwo256, DigestItem, RuntimeAppPublic}, std::{cmp::max, ops::Add, sync::Arc, time::Duration}, telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}, }; +// We use this key to store whether we want the para inherent mocker to be active +const PARA_INHERENT_SELECTOR_AUX_KEY: &[u8] = b"__DEV_PARA_INHERENT_SELECTOR"; + pub type FullBackend = service::TFullBackend; pub type FullClient = service::TFullClient< @@ -97,6 +111,8 @@ struct DevDeps { pub deny_unsafe: DenyUnsafe, /// Manual seal command sink pub command_sink: Option>>, + /// Channels for dev rpcs + pub dev_rpc_data: Option>>, } fn create_dev_rpc_extension( @@ -106,6 +122,7 @@ fn create_dev_rpc_extension( chain_spec, deny_unsafe, command_sink: maybe_command_sink, + dev_rpc_data: maybe_dev_rpc_data, }: DevDeps, ) -> Result> where @@ -145,15 +162,21 @@ where io.merge(ManualSeal::new(command_sink).into_rpc())?; } + if let Some(mock_para_inherent_channel) = maybe_dev_rpc_data { + io.merge( + DevRpc { + mock_para_inherent_channel, + } + .into_rpc(), + )?; + } + Ok(io) } /// We use EmptyParachainsInherentDataProvider to insert an empty parachain inherent in the block /// to satisfy runtime -struct EmptyParachainsInherentDataProvider> { - pub client: Arc, - pub parent: Hash, -} +struct EmptyParachainsInherentDataProvider; /// Copied from polkadot service just so that this code retains same structure as /// polkadot_service crate. @@ -165,12 +188,8 @@ struct Basics { telemetry: Option, } -impl> EmptyParachainsInherentDataProvider { - pub fn new(client: Arc, parent: Hash) -> Self { - EmptyParachainsInherentDataProvider { client, parent } - } - - pub async fn create( +impl EmptyParachainsInherentDataProvider { + pub async fn create>( client: Arc, parent: Hash, ) -> Result { @@ -189,35 +208,6 @@ impl> EmptyParachainsInherentDataProvider { } } -#[async_trait::async_trait] -impl> sp_inherents::InherentDataProvider - for EmptyParachainsInherentDataProvider -{ - async fn provide_inherent_data( - &self, - dst_inherent_data: &mut sp_inherents::InherentData, - ) -> Result<(), sp_inherents::Error> { - let inherent_data = - EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent) - .await - .map_err(|e| sp_inherents::Error::Application(Box::new(e)))?; - - dst_inherent_data.put_data( - polkadot_primitives::PARACHAINS_INHERENT_IDENTIFIER, - &inherent_data, - ) - } - - async fn try_handle_error( - &self, - _identifier: &sp_inherents::InherentIdentifier, - _error: &[u8], - ) -> Option> { - // Inherent isn't checked and can not return any error - None - } -} - /// Creates new development full node with manual seal pub fn build_full( sealing: Sealing, @@ -245,6 +235,300 @@ pub fn build_full( } } +/// We use MockParachainsInherentDataProvider to insert an parachain inherent with mocked +/// candidates +/// We detect whether any of the keys in our keystore is assigned to a core and provide +/// a mocked candidate in such core +struct MockParachainsInherentDataProvider + ProvideRuntimeApi> { + pub client: Arc, + pub parent: Hash, + pub keystore: KeystorePtr, +} + +impl + ProvideRuntimeApi> MockParachainsInherentDataProvider +where + C::Api: ParachainHost, +{ + pub fn new(client: Arc, parent: Hash, keystore: KeystorePtr) -> Self { + MockParachainsInherentDataProvider { + client, + parent, + keystore, + } + } + + pub async fn create( + client: Arc, + parent: Hash, + keystore: KeystorePtr, + ) -> Result { + let parent_header = match client.header(parent) { + Ok(Some(h)) => h, + Ok(None) => return Err(InherentError::ParentHeaderNotFound(parent)), + Err(err) => return Err(InherentError::Blockchain(err)), + }; + + // Strategy: + // we usually have 1 validator per core, and we usually run with --alice + // the idea is that at least alice will be assigned to one core + // if we find in the keystore the validator attached to a particular core, + // we generate a signature for the parachain assigned to that core + // To retrieve the validator keys, cal runtime api: + + // this following piece of code predicts whether the validator is assigned to a particular + // core where a candidate for a parachain needs to be created + let runtime_api = client.runtime_api(); + + // we get all validators + + // we get the current claim queue to know core availability + let claim_queue = runtime_api.claim_queue(parent).unwrap(); + + // we get the validator groups + let (groups, rotation_info) = runtime_api.validator_groups(parent).unwrap(); + + // we calculate rotation since start, which will define the core assignation + // to validators + let rotations_since_session_start = (parent_header.number + - rotation_info.session_start_block) + / rotation_info.group_rotation_frequency; + + // Get all the available keys in the keystore + let available_keys = keystore + .keys(polkadot_primitives::PARACHAIN_KEY_TYPE_ID) + .unwrap(); + + // create a slot number identical to the parent block num + let slot_number = AuraInherentType::from(u64::from(parent_header.number)); + + // create a mocked header + let parachain_mocked_header = sp_runtime::generic::Header:: { + parent_hash: Default::default(), + number: parent_header.number, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: sp_runtime::generic::Digest { + logs: vec![DigestItem::PreRuntime(AURA_ENGINE_ID, slot_number.encode())], + }, + }; + + // retrieve availability cores + let availability_cores = runtime_api.availability_cores(parent).unwrap(); + + // retrieve current session_idx + let session_idx = runtime_api.session_index_for_child(parent).unwrap(); + + // retrieve all validators + let all_validators = runtime_api.validators(parent).unwrap(); + + // construct full availability bitvec + let availability_bitvec = availability_bitvec(1, availability_cores.len()); + + let signature_ctx = SigningContext { + parent_hash: parent, + session_index: session_idx, + }; + + // we generate the availability bitfield sigs + // TODO: here we assume all validator keys are able to sign with our keystore + // we need to make sure the key is there before we try to sign + // this is mostly to indicate that the erasure coding chunks where received by all val + let bitfields: Vec> = all_validators + .iter() + .enumerate() + .map(|(i, public)| { + keystore_sign( + &keystore, + availability_bitvec.clone(), + &signature_ctx, + ValidatorIndex(i as u32), + &public, + ) + .unwrap() + .unwrap() + }) + .collect(); + + // generate a random collator pair + let collator_pair = CollatorPair::generate().0; + let mut backed_cand: Vec> = vec![]; + + // iterate over every core|para pair + for (core, para) in claim_queue { + // check which group is assigned to each core + let group_assigned_to_core = + core.0 + rotations_since_session_start % groups.len() as u32; + // check validator indices associated to the core + let indices_associated_to_core = groups.get(group_assigned_to_core as usize).unwrap(); + for index in indices_associated_to_core { + // fetch validator keys + let validator_keys_to_find = all_validators.get(index.0 as usize).unwrap(); + // Iterate keys until we find an eligible one, or run out of candidates. + for type_public_pair in &available_keys { + if let Ok(validator) = + polkadot_primitives::ValidatorId::from_slice(&type_public_pair) + { + // if we find the validator in keystore, we try to create a backed cand + if validator_keys_to_find == &validator { + // we work with the previous included data + let mut persisted_validation_data = runtime_api + .persisted_validation_data( + parent, + para[0], + OccupiedCoreAssumption::Included, + ) + .unwrap() + .unwrap(); + + // if we dont do this we have a backed candidate every 2 blocks + // TODO: figure out why + persisted_validation_data.relay_parent_storage_root = + parent_header.state_root; + + let persisted_validation_data_hash = persisted_validation_data.hash(); + // retrieve the validation code hash + let validation_code_hash = runtime_api + .validation_code_hash( + parent, + para[0], + OccupiedCoreAssumption::Included, + ) + .unwrap() + .unwrap(); + let pov_hash = Default::default(); + // generate a fake collator signature + let payload = polkadot_primitives::collator_signature_payload( + &parent, + ¶[0], + &persisted_validation_data_hash, + &pov_hash, + &validation_code_hash, + ); + let collator_signature = collator_pair.sign(&payload); + // generate a candidate with most of the values mocked + let candidate = CommittedCandidateReceipt:: { + descriptor: CandidateDescriptor:: { + para_id: para[0], + relay_parent: parent, + collator: collator_pair.public(), + persisted_validation_data_hash, + pov_hash, + erasure_root: Default::default(), + signature: collator_signature, + para_head: parachain_mocked_header.clone().hash(), + validation_code_hash, + }, + commitments: CandidateCommitments:: { + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + head_data: parachain_mocked_header.clone().encode().into(), + processed_downward_messages: 0, + hrmp_watermark: parent_header.number, + }, + }; + let candidate_hash = candidate.hash(); + let payload = CompactStatement::Valid(candidate_hash); + + let signature_ctx = SigningContext { + parent_hash: parent, + session_index: session_idx, + }; + + // sign the candidate with the validator key + let signature = keystore_sign( + &keystore, + payload, + &signature_ctx, + *index, + &validator, + ) + .unwrap() + .unwrap() + .benchmark_signature(); + + // construct a validity vote + let validity_votes = vec![ValidityAttestation::Explicit(signature)]; + + // push the candidate + backed_cand.push(BackedCandidate::::new( + candidate, + validity_votes.clone(), + bitvec::bitvec![u8, bitvec::order::Lsb0; 1; indices_associated_to_core.len()], + Some(core), + )); + } + } + } + } + } + + Ok(ParachainsInherentData { + bitfields: bitfields, + backed_candidates: backed_cand, + disputes: Vec::new(), + parent_header, + }) + } +} + +#[async_trait::async_trait] +impl + ProvideRuntimeApi> sp_inherents::InherentDataProvider + for MockParachainsInherentDataProvider +where + C::Api: ParachainHost, + C: AuxStore, +{ + async fn provide_inherent_data( + &self, + dst_inherent_data: &mut sp_inherents::InherentData, + ) -> Result<(), sp_inherents::Error> { + // fetch whether the para inherent selector has been set + let maybe_para_selector = self + .client + .get_aux(PARA_INHERENT_SELECTOR_AUX_KEY) + .expect("Should be able to query aux storage; qed"); + + let inherent_data = { + if let Some(aux) = maybe_para_selector { + // if it is true, the candidates need to be mocked + // else, we output the empty parachain inherent data provider + if aux == true.encode() { + MockParachainsInherentDataProvider::create( + self.client.clone(), + self.parent, + self.keystore.clone(), + ) + .await + .map_err(|e| sp_inherents::Error::Application(Box::new(e)))? + } else { + EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent) + .await + .map_err(|e| sp_inherents::Error::Application(Box::new(e)))? + } + } else { + EmptyParachainsInherentDataProvider::create(self.client.clone(), self.parent) + .await + .map_err(|e| sp_inherents::Error::Application(Box::new(e)))? + } + }; + + dst_inherent_data.put_data( + polkadot_primitives::PARACHAINS_INHERENT_IDENTIFIER, + &inherent_data, + ) + } + + async fn try_handle_error( + &self, + _identifier: &sp_inherents::InherentIdentifier, + _error: &[u8], + ) -> Option> { + // Inherent isn't checked and can not return any error + None + } +} + /// We store past timestamp we created in the aux storage, which enable us to return timestamp which is increased by /// slot duration from previous timestamp or current timestamp if in reality more time is passed. fn get_next_timestamp( @@ -321,6 +605,10 @@ fn new_full< let net_config = sc_network::config::FullNetworkConfiguration::<_, _, Network>::new(&config.network); + // Create channels for mocked parachain candidates. + let (downward_mock_para_inherent_sender, downward_mock_para_inherent_receiver) = + flume::bounded::>(100); + let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) = service::build_network(service::BuildNetworkParams { config: &config, @@ -403,6 +691,7 @@ fn new_full< }, )), }; + let keystore_clone = keystore.clone(); let babe_config = babe_link.config(); let babe_consensus_provider = BabeConsensusDataProvider::new( @@ -418,6 +707,7 @@ fn new_full< // Need to clone it and store here to avoid moving of `client` // variable in closure below. let client_clone = client.clone(); + task_manager.spawn_essential_handle().spawn_blocking( "authorship_task", Some("block-authoring"), @@ -430,13 +720,30 @@ fn new_full< select_chain, create_inherent_data_providers: move |parent, ()| { let client_clone = client_clone.clone(); - + let keystore = keystore_clone.clone(); + let downward_mock_para_inherent_receiver = downward_mock_para_inherent_receiver.clone(); async move { - let parachain = - EmptyParachainsInherentDataProvider::new( - client_clone.clone(), - parent, - ); + + let downward_mock_para_inherent_receiver = downward_mock_para_inherent_receiver.clone(); + // here we only take the last one + let para_inherent_decider_messages: Vec> = downward_mock_para_inherent_receiver.drain().collect(); + + // If there is a value to be updated, we update it + if let Some(value) = para_inherent_decider_messages.last() { + client_clone + .insert_aux( + &[(PARA_INHERENT_SELECTOR_AUX_KEY, value.as_slice())], + &[], + ) + .expect("Should be able to write to aux storage; qed"); + + } + + let parachain = MockParachainsInherentDataProvider::new( + client_clone.clone(), + parent, + keystore + ); let timestamp = get_next_timestamp(client_clone, slot_duration); @@ -454,6 +761,13 @@ fn new_full< ); } + // We dont need the flume receiver if we are not a validator + let dev_rpc_data = if role.clone().is_authority() { + Some(downward_mock_para_inherent_sender) + } else { + None + }; + let rpc_extensions_builder = { let client = client.clone(); let transaction_pool = transaction_pool.clone(); @@ -468,6 +782,7 @@ fn new_full< chain_spec: chain_spec.cloned_box(), deny_unsafe, command_sink: command_sink.clone(), + dev_rpc_data: dev_rpc_data.clone(), }; create_dev_rpc_extension(deps).map_err(Into::into) @@ -630,3 +945,44 @@ fn new_partial_basics( telemetry, }) } + +use polkadot_primitives::{AvailabilityBitfield, UncheckedSigned, ValidatorId, ValidatorIndex}; +use sp_keystore::Error as KeystoreError; +fn keystore_sign( + keystore: &KeystorePtr, + payload: Payload, + context: &SigningContext, + validator_index: ValidatorIndex, + key: &ValidatorId, +) -> Result>, KeystoreError> { + let data = payload_data(&payload, context); + let signature = keystore + .sr25519_sign(ValidatorId::ID, key.as_ref(), &data)? + .map(|sig| UncheckedSigned::new(payload, validator_index, sig.into())); + Ok(signature) +} + +fn payload_data( + payload: &Payload, + context: &SigningContext, +) -> Vec { + // equivalent to (`real_payload`, context).encode() + let mut out = payload.encode_as(); + out.extend(context.encode()); + out +} + +/// Create an `AvailabilityBitfield` with size `total_cores`. The first `used_cores` set to true (occupied), +/// and the remaining to false (available). +fn availability_bitvec(used_cores: usize, total_cores: usize) -> AvailabilityBitfield { + let mut bitfields = bitvec::bitvec![u8, bitvec::order::Lsb0; 0; 0]; + for i in 0..total_cores { + if i < used_cores { + bitfields.push(true); + } else { + bitfields.push(false) + } + } + + bitfields.into() +} diff --git a/solo-chains/node/tanssi-relay-service/src/lib.rs b/solo-chains/node/tanssi-relay-service/src/lib.rs index 0ad8f1d95..46a0e5030 100644 --- a/solo-chains/node/tanssi-relay-service/src/lib.rs +++ b/solo-chains/node/tanssi-relay-service/src/lib.rs @@ -17,3 +17,5 @@ pub mod chain_spec; pub mod dev_service; + +pub mod dev_rpcs; diff --git a/test/suites/dev-tanssi-relay/external-validators-rewards/test_external_validator_rewards.ts b/test/suites/dev-tanssi-relay/external-validators-rewards/test_external_validator_rewards.ts new file mode 100644 index 000000000..dc4ef52ea --- /dev/null +++ b/test/suites/dev-tanssi-relay/external-validators-rewards/test_external_validator_rewards.ts @@ -0,0 +1,67 @@ +import "@tanssi/api-augment"; +import { describeSuite, customDevRpcRequest, expect, beforeAll } from "@moonwall/cli"; +import { ApiPromise, Keyring } from "@polkadot/api"; +import { jumpToSession } from "util/block"; + +describeSuite({ + id: "DTR1601", + title: "Paras inherent tests", + foundationMethods: "dev", + + testCases: ({ it, context }) => { + let polkadotJs: ApiPromise; + + beforeAll(async () => { + polkadotJs = context.polkadotJs(); + }); + + it({ + id: "E01", + title: "para candidates should trigger reward info", + test: async function () { + const keyring = new Keyring({ type: "sr25519" }); + const aliceStash = keyring.addFromUri("//Alice//stash"); + await context.createBlock(); + // Send RPC call to enable para inherent candidate generation + await customDevRpcRequest("mock_enableParaInherentCandidate", []); + // Since collators are not assigned until session 2, we need to go till session 2 to actually see heads being injected + await jumpToSession(context, 3); + await context.createBlock(); + + // we are still in era 0 + const validatorRewards = await context + .polkadotJs() + .query.externalValidatorsRewards.rewardPointsForEra(0); + const totalRewards = validatorRewards.total.toBigInt(); + + expect(totalRewards).to.be.greaterThan(0n); + // All of them come from alice as she is the only one validating candidates + expect(validatorRewards.individual.toHuman()[aliceStash.address]).to.be.eq(totalRewards.toString()); + }, + }); + + it({ + id: "E02", + title: "Check rewards storage clears after historyDepth", + test: async function () { + const sessionsPerEra = await polkadotJs.consts.externalValidators.sessionsPerEra; + const historyDepth = await polkadotJs.consts.externalValidatorsRewards.historyDepth; + + const currentIndex = await polkadotJs.query.session.currentIndex(); + + const targetSession = + currentIndex.toNumber() + sessionsPerEra.toNumber() * (historyDepth.toNumber() + 1); + + await jumpToSession(context, targetSession); + + const validatorRewards = await context + .polkadotJs() + .query.externalValidatorsRewards.rewardPointsForEra(0); + const totalRewards = validatorRewards.total.toBigInt(); + + // rewards should have expired + expect(totalRewards).to.be.equal(0n); + }, + }); + }, +}); diff --git a/test/suites/dev-tanssi-relay/paras-candidate-inherent/test_paras_candidate_inherent.ts b/test/suites/dev-tanssi-relay/paras-candidate-inherent/test_paras_candidate_inherent.ts new file mode 100644 index 000000000..b1329ec6c --- /dev/null +++ b/test/suites/dev-tanssi-relay/paras-candidate-inherent/test_paras_candidate_inherent.ts @@ -0,0 +1,41 @@ +import "@tanssi/api-augment"; +import { describeSuite, customDevRpcRequest, expect, beforeAll } from "@moonwall/cli"; +import { ApiPromise } from "@polkadot/api"; +import { jumpToSession } from "util/block"; +import { getHeaderFromRelay } from "util/relayInterface.ts"; + +describeSuite({ + id: "DTR1401", + title: "Paras inherent tests", + foundationMethods: "dev", + + testCases: ({ it, context }) => { + let polkadotJs: ApiPromise; + + beforeAll(async () => { + polkadotJs = context.polkadotJs(); + }); + + it({ + id: "E01", + title: "Paras heads should be updated every block", + test: async function () { + const parasHeadGenesis = await polkadotJs.query.paras.heads(2000); + await context.createBlock(); + // Send RPC call to enable para inherent candidate generation + await customDevRpcRequest("mock_enableParaInherentCandidate", []); + // Since collators are not assigned until session 2, we need to go till session 2 to actually see heads being injected + await jumpToSession(context, 3); + await context.createBlock(); + const parasHeadAfterOneBlock = await polkadotJs.query.paras.heads(2000); + expect(parasHeadAfterOneBlock).to.not.be.eq(parasHeadGenesis); + await context.createBlock(); + // we create one more block to test we are persisting candidates every block + const parasHeadAfterTwoBlocks = await polkadotJs.query.paras.heads(2000); + expect(parasHeadAfterOneBlock).to.not.be.eq(parasHeadAfterTwoBlocks); + const header2000 = await getHeaderFromRelay(context.polkadotJs(), 2000); + expect(header2000.number.toBigInt()).to.be.equal(31n); + }, + }); + }, +}); diff --git a/test/suites/dev-tanssi-relay/slashes/test_slashes_are_removed_after_bonding_period.ts b/test/suites/dev-tanssi-relay/slashes/test_slashes_are_removed_after_bonding_period.ts index 91eb73304..3d289f1e0 100644 --- a/test/suites/dev-tanssi-relay/slashes/test_slashes_are_removed_after_bonding_period.ts +++ b/test/suites/dev-tanssi-relay/slashes/test_slashes_are_removed_after_bonding_period.ts @@ -78,8 +78,8 @@ describeSuite({ .signAsync(alice); await context.createBlock([addAliceFromInvulnerables]); - const sessionsPerEra = await polkadotJs.consts.externalValidators.sessionsPerEra; - const bondingPeriod = await polkadotJs.consts.externalValidatorSlashes.bondingDuration; + const sessionsPerEra = (await polkadotJs.consts.externalValidators.sessionsPerEra).toNumber(); + const bondingPeriod = (await polkadotJs.consts.externalValidatorSlashes.bondingDuration).toNumber(); const currentIndex = await polkadotJs.query.session.currentIndex(); diff --git a/test/util/relayInterface.ts b/test/util/relayInterface.ts index 80c92cb5f..048b3ad1a 100644 --- a/test/util/relayInterface.ts +++ b/test/util/relayInterface.ts @@ -1,9 +1,15 @@ import { ApiPromise } from "@polkadot/api"; -import type { Header, ParaId } from "@polkadot/types/interfaces"; +import type { Header, ParaId, HeadData } from "@polkadot/types/interfaces"; +import { Bytes } from "@polkadot/types-codec"; +import { TypeRegistry } from "@polkadot/types"; export async function getHeaderFromRelay(relayApi: ApiPromise, paraId: ParaId): Promise
{ // Get the latest header from relay storage const encoded = await relayApi.query.paras.heads(paraId); - const header = await relayApi.createType("Header", encoded); + const registry = new TypeRegistry(); + const headerEncoded: HeadData = await relayApi.createType("HeadData", encoded.toHex()); + const nonEncodedHeader = new Bytes(registry, headerEncoded.toU8a(true)).toHex(); + + const header = await relayApi.createType("SpRuntimeHeader", nonEncodedHeader); return header; }