Skip to content

Commit

Permalink
Add support for collators in Starlight
Browse files Browse the repository at this point in the history
  • Loading branch information
tmpolaczyk committed Sep 6, 2024
1 parent a896b54 commit 9b2d5cd
Show file tree
Hide file tree
Showing 18 changed files with 1,597 additions and 501 deletions.
650 changes: 326 additions & 324 deletions Cargo.lock

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions client/consensus/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ sp-version = { workspace = true }
substrate-prometheus-endpoint = { workspace = true }

# Own
dc-orchestrator-chain-interface = { workspace = true }
dp-consensus = { workspace = true, features = [ "std" ] }
pallet-registrar-runtime-api = { workspace = true, features = [ "std" ] }
pallet-xcm-core-buyer-runtime-api = { workspace = true, features = [ "std" ] }
Expand Down
9 changes: 8 additions & 1 deletion client/consensus/src/collators/lookahead.rs
Original file line number Diff line number Diff line change
Expand Up @@ -345,6 +345,7 @@ pub struct Params<
pub cancellation_token: CancellationToken,
pub orchestrator_tx_pool: Arc<TxPool>,
pub orchestrator_client: Arc<OClient>,
pub solochain: bool,
}

/// Run async-backing-friendly for Tanssi Aura.
Expand Down Expand Up @@ -636,7 +637,13 @@ where
let slot = inherent_providers.slot();
let container_chain_slot_duration = (params.get_current_slot_duration)(parent_header.hash());

match try_to_buy_core::<_, _, <<OBlock as BlockT>::Header as HeaderT>::Number, _, CIDP, _, _>(params.para_id, aux_data, inherent_providers, &params.keystore, params.orchestrator_client.clone(), params.orchestrator_tx_pool.clone(), parent_header, params.orchestrator_slot_duration, container_chain_slot_duration).await {
let buy_core_result = if params.solochain {
// TODO: implement parathread support for solochain
unimplemented!("Cannot buy core for parathread in solochain")
} else {
try_to_buy_core::<_, _, <<OBlock as BlockT>::Header as HeaderT>::Number, _, CIDP, _, _>(params.para_id, aux_data, inherent_providers, &params.keystore, params.orchestrator_client.clone(), params.orchestrator_tx_pool.clone(), parent_header, params.orchestrator_slot_duration, container_chain_slot_duration).await
};
match buy_core_result {
Ok(block_hash) => {
tracing::trace!(target: crate::LOG_TARGET, ?block_hash, "Sent unsigned extrinsic to buy the core");
},
Expand Down
59 changes: 27 additions & 32 deletions client/consensus/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@ mod tests;
pub use {
crate::consensus_orchestrator::OrchestratorAuraWorkerAuxData,
cumulus_primitives_core::ParaId,
cumulus_relay_chain_interface::{call_remote_runtime_function, RelayChainInterface},
dc_orchestrator_chain_interface::OrchestratorChainInterface,
dp_consensus::TanssiAuthorityAssignmentApi,
manual_seal::{
get_aura_id_from_seed, ContainerManualSealAuraConsensusDataProvider,
Expand All @@ -48,6 +50,7 @@ pub use {
sp_application_crypto::AppPublic,
sp_consensus::Error as ConsensusError,
sp_core::crypto::{ByteArray, Public},
sp_core::H256,
sp_keystore::{Keystore, KeystorePtr},
sp_runtime::traits::{Block as BlockT, Header as HeaderT, Member, NumberFor},
std::hash::Hash,
Expand Down Expand Up @@ -145,14 +148,13 @@ use {
/// and makes no guarantees which one as that depends on the keystore's iterator behavior.
/// This is the standard way of determining which key to author with.
/// It also returns its ParaId assignment
pub fn first_eligible_key<B: BlockT, C, P>(
pub async fn first_eligible_key<C, P>(
client: &C,
parent_hash: &B::Hash,
parent_hash: &H256,
keystore: KeystorePtr,
) -> Option<(AuthorityId<P>, ParaId)>
where
C: ProvideRuntimeApi<B>,
C::Api: TanssiAuthorityAssignmentApi<B, AuthorityId<P>>,
C: OrchestratorChainInterface + ?Sized,
P: Pair + Send + Sync,
P::Public: AppPublic + Hash + Member + Encode + Decode,
P::Signature: TryFrom<Vec<u8>> + Hash + Member + Encode + Decode,
Expand All @@ -170,45 +172,41 @@ where
return None;
}

let runtime_api = client.runtime_api();

// Iterate keys until we find an eligible one, or run out of candidates.
// If we are skipping prediction, then we author with the first key we find.
// prediction skipping only really makes sense when there is a single key in the keystore.
available_keys.into_iter().find_map(|type_public_pair| {
for type_public_pair in available_keys {
if let Ok(nimbus_id) = NimbusId::from_slice(&type_public_pair) {
// If we dont find any parachain that we are assigned to, return none

if let Ok(Some(para_id)) =
runtime_api.check_para_id_assignment(*parent_hash, nimbus_id.clone().into())
if let Ok(Some(para_id)) = client
.check_para_id_assignment(*parent_hash, nimbus_id.clone())
.await
{
log::debug!("Para id found for assignment {:?}", para_id);

Some((nimbus_id.into(), para_id))
return Some((nimbus_id.into(), para_id));
} else {
log::debug!("No Para id found for assignment {:?}", nimbus_id);

None
}
} else {
None
log::debug!("Invalid nimbus id: {:?}", type_public_pair);
}
})
}

None
}

/// Grab the first eligible nimbus key from the keystore
/// If multiple keys are eligible this function still only returns one
/// and makes no guarantees which one as that depends on the keystore's iterator behavior.
/// This is the standard way of determining which key to author with.
/// It also returns its ParaId assignment
pub fn first_eligible_key_next_session<B: BlockT, C, P>(
pub async fn first_eligible_key_next_session<C, P>(
client: &C,
parent_hash: &B::Hash,
parent_hash: &H256,
keystore: KeystorePtr,
) -> Option<(AuthorityId<P>, ParaId)>
where
C: ProvideRuntimeApi<B>,
C::Api: TanssiAuthorityAssignmentApi<B, AuthorityId<P>>,
C: OrchestratorChainInterface + ?Sized,
P: Pair + Send + Sync,
P::Public: AppPublic + Hash + Member + Encode + Decode,
P::Signature: TryFrom<Vec<u8>> + Hash + Member + Encode + Decode,
Expand All @@ -226,28 +224,25 @@ where
return None;
}

let runtime_api = client.runtime_api();

// Iterate keys until we find an eligible one, or run out of candidates.
// If we are skipping prediction, then we author with the first key we find.
// prediction skipping only really makes sense when there is a single key in the keystore.
available_keys.into_iter().find_map(|type_public_pair| {
for type_public_pair in available_keys {
if let Ok(nimbus_id) = NimbusId::from_slice(&type_public_pair) {
// If we dont find any parachain that we are assigned to, return none

if let Ok(Some(para_id)) = runtime_api
.check_para_id_assignment_next_session(*parent_hash, nimbus_id.clone().into())
if let Ok(Some(para_id)) = client
.check_para_id_assignment_next_session(*parent_hash, nimbus_id.clone())
.await
{
log::debug!("Para id found for assignment {:?}", para_id);

Some((nimbus_id.into(), para_id))
return Some((nimbus_id.into(), para_id));
} else {
log::debug!("No Para id found for assignment {:?}", nimbus_id);

None
}
} else {
None
log::debug!("Invalid nimbus id: {:?}", type_public_pair);
}
})
}

None
}
1 change: 1 addition & 0 deletions client/consensus/src/mocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -987,6 +987,7 @@ impl CollatorLookaheadTestBuilder {
orchestrator_client: environ.into(),
orchestrator_slot_duration: SlotDuration::from_millis(SLOT_DURATION_MS),
orchestrator_tx_pool: orchestrator_tx_pool.clone(),
solochain: false,
};
let (fut, exit_notification_receiver) = crate::collators::lookahead::run::<
_,
Expand Down
30 changes: 28 additions & 2 deletions client/orchestrator-chain-rpc-interface/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ use {
core::pin::Pin,
dc_orchestrator_chain_interface::{
BlockNumber, ContainerChainGenesisData, DataPreserverAssignment, DataPreserverProfileId,
OrchestratorChainError, OrchestratorChainInterface, OrchestratorChainResult, PHash,
PHeader,
NimbusId, OrchestratorChainError, OrchestratorChainInterface, OrchestratorChainResult,
PHash, PHeader,
},
dp_core::ParaId,
futures::{Stream, StreamExt},
Expand Down Expand Up @@ -344,4 +344,30 @@ impl OrchestratorChainInterface for OrchestratorChainRpcClient {
)
.await
}

async fn check_para_id_assignment(
&self,
orchestrator_parent: PHash,
authority: NimbusId,
) -> OrchestratorChainResult<Option<ParaId>> {
self.call_remote_runtime_function(
"TanssiAuthorityAssignmentApi_check_para_id_assignment",
orchestrator_parent,
Some(authority),
)
.await
}

async fn check_para_id_assignment_next_session(
&self,
orchestrator_parent: PHash,
authority: NimbusId,
) -> OrchestratorChainResult<Option<ParaId>> {
self.call_remote_runtime_function(
"TanssiAuthorityAssignmentApi_check_para_id_assignment_next_session",
orchestrator_parent,
Some(authority),
)
.await
}
}
17 changes: 17 additions & 0 deletions client/service-container-chain/src/data_preservers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ mod tests {
},
dp_container_chain_genesis_data::ContainerChainGenesisData,
futures::Stream,
nimbus_primitives::NimbusId,
polkadot_overseer::Handle,
sc_client_api::StorageProof,
sp_core::H256,
Expand Down Expand Up @@ -272,6 +273,22 @@ mod tests {
.cloned()
.unwrap_or(DataPreserverAssignment::NotAssigned))
}

async fn check_para_id_assignment(
&self,
_orchestrator_parent: PHash,
_authority: NimbusId,
) -> OrchestratorChainResult<Option<ParaId>> {
unimplemented!("not used in test")
}

async fn check_para_id_assignment_next_session(
&self,
_orchestrator_parent: PHash,
_authority: NimbusId,
) -> OrchestratorChainResult<Option<ParaId>> {
unimplemented!("not used in test")
}
}

#[derive(Debug, PartialEq, Eq, Hash)]
Expand Down
Loading

0 comments on commit 9b2d5cd

Please sign in to comment.