Skip to content

Commit

Permalink
Extended Cluster pallet by Cluster Configuration parameters (#332)
Browse files Browse the repository at this point in the history
## Description
Extended Cluster pallet by Cluster Configuration parameters

## Types of Changes
Please select the branch type you are merging and fill in the relevant
template.
<!--- Check the following box with an x if the following applies: -->
- [ ] Hotfix
- [ ] Release
- [X] Fix or Feature

## Fix or Feature
<!--- Check the following box with an x if the following applies: -->

### Types of Changes
<!--- What types of changes does your code introduce? -->
- [ ] Tech Debt (Code improvements)
- [ ] Bug fix (non-breaking change which fixes an issue)
- [X] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing
functionality to change)
- [ ] Dependency upgrade (A change in substrate or any 3rd party crate
version)

### Migrations and Hooks
<!--- Check the following box with an x if the following applies: -->
- [X] This change requires a runtime migration.
- [ ] Modifies `on_initialize`
- [ ] Modifies `on_finalize`

### Checklist for Fix or Feature
<!--- All boxes need to be checked. Follow this checklist before
requiring PR review -->
- [X] Change has been tested locally.
- [X] Change adds / updates tests if applicable.
- [X] Changelog doc updated.

## Checklist for Hotfix
<!--- All boxes need to be checked. Follow this checklist before
requiring PR review -->
- [ ] Change has been deployed to Testnet.
- [ ] Change has been tested in Testnet.
- [ ] Changelog has been updated.
- [ ] Crate version has been updated.
- [ ] Spec version has been updated.
- [ ] Transaction version has been updated if required.
- [ ] Pull Request to `dev` has been created.
- [ ] Pull Request to `staging` has been created.

## Checklist for Release
<!--- All boxes need to be checked. Follow this checklist before
requiring PR review -->
- [ ] Change has been deployed to Devnet.
- [ ] Change has been tested in Devnet.
- [ ] Change has been deployed to Qanet.
- [ ] Change has been tested in Qanet.
- [ ] Change has been deployed to Testnet.
- [ ] Change has been tested in Testnet.
- [X] Changelog has been updated.
- [ ] Crate version has been updated.
- [X] Spec version has been updated.
- [X] Transaction version has been updated if required.
  • Loading branch information
ayushmishra2005 authored May 10, 2024
1 parent 99feab3 commit b1afc1d
Show file tree
Hide file tree
Showing 16 changed files with 450 additions and 31 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

- [C,D] Updated Substrate to polkadot-v1.1.0
- [C,D] Introduction of the OpenGov
- [C,D] `pallet-ddc-clusters`: Added Erasure coding and Replication in cluster params

## [5.2.0]

Expand Down
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions pallets/ddc-clusters/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ repository.workspace = true
# 3rd-party depdencies
codec = { workspace = true }
hex-literal = { workspace = true }
log = { workspace = true }
scale-info = { workspace = true }
serde = { workspace = true }

Expand Down
24 changes: 21 additions & 3 deletions pallets/ddc-clusters/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,12 @@ benchmarks! {
create_cluster {
let cluster_id = ClusterId::from([1; 20]);
let user = account::<T::AccountId>("user", USER_SEED, 0u32);
let cluster_params = ClusterParams { node_provider_auth_contract: Some(user.clone()) };
let cluster_params = ClusterParams {
node_provider_auth_contract: Some(user.clone()),
erasure_coding_required: 4,
erasure_coding_total: 6,
replication_total: 3
};
let cluster_gov_params: ClusterGovParams<BalanceOf<T>, BlockNumberFor<T>> = ClusterGovParams {
treasury_share: Perquintill::default(),
validators_share: Perquintill::default(),
Expand Down Expand Up @@ -78,10 +83,23 @@ benchmarks! {
let user = account::<T::AccountId>("user", USER_SEED, 0u32);
let user_2 = account::<T::AccountId>("user", USER_SEED_2, 0u32);
let _ = config_cluster::<T>(user.clone(), cluster_id);
let new_cluster_params = ClusterParams { node_provider_auth_contract: Some(user_2.clone()) };
let new_cluster_params = ClusterParams {
node_provider_auth_contract: Some(user_2.clone()),
erasure_coding_required: 4,
erasure_coding_total: 6,
replication_total: 3
};
}: _(RawOrigin::Signed(user.clone()), cluster_id, new_cluster_params)
verify {
assert_eq!(Clusters::<T>::try_get(cluster_id).unwrap().props, ClusterProps { node_provider_auth_contract: Some(user_2) });
assert_eq!(
Clusters::<T>::try_get(cluster_id).unwrap().props,
ClusterProps {
node_provider_auth_contract: Some(user_2),
erasure_coding_required: 4,
erasure_coding_total: 6,
replication_total: 3
}
);
}

set_cluster_gov_params {
Expand Down
9 changes: 9 additions & 0 deletions pallets/ddc-clusters/src/cluster.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@ pub struct Cluster<AccountId> {
#[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo, PartialEq, Serialize, Deserialize)]
pub struct ClusterProps<AccountId> {
pub node_provider_auth_contract: Option<AccountId>,
pub erasure_coding_required: u32,
pub erasure_coding_total: u32,
pub replication_total: u32,
}

impl<AccountId> Cluster<AccountId> {
Expand All @@ -36,6 +39,9 @@ impl<AccountId> Cluster<AccountId> {
reserve_id,
props: ClusterProps {
node_provider_auth_contract: cluster_params.node_provider_auth_contract,
erasure_coding_required: cluster_params.erasure_coding_required,
erasure_coding_total: cluster_params.erasure_coding_total,
replication_total: cluster_params.replication_total,
},
})
}
Expand All @@ -46,6 +52,9 @@ impl<AccountId> Cluster<AccountId> {
) -> Result<(), ClusterError> {
self.props = ClusterProps {
node_provider_auth_contract: cluster_params.node_provider_auth_contract,
erasure_coding_required: cluster_params.erasure_coding_required,
erasure_coding_total: cluster_params.erasure_coding_total,
replication_total: cluster_params.replication_total,
};
Ok(())
}
Expand Down
45 changes: 42 additions & 3 deletions pallets/ddc-clusters/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ pub(crate) mod mock;
#[cfg(test)]
mod tests;

pub mod migration;

use ddc_primitives::{
traits::{
cluster::{ClusterCreator, ClusterVisitor, ClusterVisitorError},
Expand Down Expand Up @@ -66,7 +68,7 @@ pub mod pallet {

/// The current storage version.
const STORAGE_VERSION: frame_support::traits::StorageVersion =
frame_support::traits::StorageVersion::new(0);
frame_support::traits::StorageVersion::new(1);

#[pallet::pallet]
#[pallet::storage_version(STORAGE_VERSION)]
Expand All @@ -81,6 +83,12 @@ pub mod pallet {
type StakerCreator: StakerCreator<Self, BalanceOf<Self>>;
type Currency: LockableCurrency<Self::AccountId, Moment = BlockNumberFor<Self>>;
type WeightInfo: WeightInfo;
#[pallet::constant]
type MinErasureCodingRequiredLimit: Get<u32>;
#[pallet::constant]
type MinErasureCodingTotalLimit: Get<u32>;
#[pallet::constant]
type MinReplicationTotalLimit: Get<u32>;
}

#[pallet::event]
Expand Down Expand Up @@ -111,6 +119,9 @@ pub mod pallet {
NodeAuthContractCallFailed,
NodeAuthContractDeployFailed,
NodeAuthNodeAuthorizationNotSuccessful,
ErasureCodingRequiredDidNotMeetMinimum,
ErasureCodingTotalNotMeetMinimum,
ReplicationTotalDidNotMeetMinimum,
}

#[pallet::storage]
Expand Down Expand Up @@ -171,6 +182,9 @@ pub mod pallet {
.props
.node_provider_auth_contract
.clone(),
erasure_coding_required: 4,
erasure_coding_total: 6,
replication_total: 3
},
self.clusters_gov_params
.iter()
Expand Down Expand Up @@ -297,6 +311,18 @@ pub mod pallet {
let mut cluster =
Clusters::<T>::try_get(cluster_id).map_err(|_| Error::<T>::ClusterDoesNotExist)?;
ensure!(cluster.manager_id == caller_id, Error::<T>::OnlyClusterManager);
ensure!(
cluster_params.erasure_coding_required >= T::MinErasureCodingRequiredLimit::get(),
Error::<T>::ErasureCodingRequiredDidNotMeetMinimum
);
ensure!(
cluster_params.erasure_coding_total >= T::MinErasureCodingTotalLimit::get(),
Error::<T>::ErasureCodingTotalNotMeetMinimum
);
ensure!(
cluster_params.replication_total >= T::MinReplicationTotalLimit::get(),
Error::<T>::ReplicationTotalDidNotMeetMinimum
);
cluster.set_params(cluster_params).map_err(Into::<Error<T>>::into)?;
Clusters::<T>::insert(cluster_id, cluster);
Self::deposit_event(Event::<T>::ClusterParamsSet { cluster_id });
Expand Down Expand Up @@ -330,11 +356,24 @@ pub mod pallet {
cluster_params: ClusterParams<T::AccountId>,
cluster_gov_params: ClusterGovParams<BalanceOf<T>, BlockNumberFor<T>>,
) -> DispatchResult {
ensure!(!Clusters::<T>::contains_key(cluster_id), Error::<T>::ClusterAlreadyExists);

ensure!(
cluster_params.erasure_coding_required >= T::MinErasureCodingRequiredLimit::get(),
Error::<T>::ErasureCodingRequiredDidNotMeetMinimum
);
ensure!(
cluster_params.erasure_coding_total >= T::MinErasureCodingTotalLimit::get(),
Error::<T>::ErasureCodingTotalNotMeetMinimum
);
ensure!(
cluster_params.replication_total >= T::MinReplicationTotalLimit::get(),
Error::<T>::ReplicationTotalDidNotMeetMinimum
);

let cluster =
Cluster::new(cluster_id, cluster_manager_id, cluster_reserve_id, cluster_params)
.map_err(Into::<Error<T>>::into)?;
ensure!(!Clusters::<T>::contains_key(cluster_id), Error::<T>::ClusterAlreadyExists);

Clusters::<T>::insert(cluster_id, cluster);
ClustersGovParams::<T>::insert(cluster_id, cluster_gov_params);
Self::deposit_event(Event::<T>::ClusterCreated { cluster_id });
Expand Down
198 changes: 198 additions & 0 deletions pallets/ddc-clusters/src/migration.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
#[cfg(feature = "try-runtime")]
use frame_support::ensure;
use frame_support::{
storage_alias,
traits::{Get, GetStorageVersion, OnRuntimeUpgrade, StorageVersion},
weights::Weight,
};
use log::info;
#[cfg(feature = "try-runtime")]
use sp_runtime::DispatchError;
use sp_runtime::Saturating;

use super::*;
use crate::cluster::ClusterProps;

const LOG_TARGET: &str = "ddc-clusters";

pub mod v0 {
use frame_support::pallet_prelude::*;

use super::*;

#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)]
pub struct Cluster<AccountId> {
pub cluster_id: ClusterId,
pub manager_id: AccountId,
pub reserve_id: AccountId,
pub props: ClusterProps<AccountId>,
}

#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)]
pub struct ClusterProps<AccountId> {
pub node_provider_auth_contract: Option<AccountId>,
}

#[storage_alias]
pub(super) type Clusters<T: Config> = StorageMap<
crate::Pallet<T>,
Blake2_128Concat,
ClusterId,
Cluster<<T as frame_system::Config>::AccountId>,
>;
}

pub fn migrate_to_v1<T: Config>() -> Weight {
let on_chain_version = Pallet::<T>::on_chain_storage_version();
let current_version = Pallet::<T>::current_storage_version();

info!(
target: LOG_TARGET,
"Running migration with current storage version {:?} / onchain {:?}",
current_version,
on_chain_version
);

if on_chain_version == 0 && current_version == 1 {
let mut translated = 0u64;
let count = v0::Clusters::<T>::iter().count();
info!(
target: LOG_TARGET,
" >>> Updating DDC Cluster storage. Migrating {} clusters...", count
);

Clusters::<T>::translate::<v0::Cluster<T::AccountId>, _>(
|cluster_id: ClusterId, cluster: v0::Cluster<T::AccountId>| {
info!(target: LOG_TARGET, " Migrating cluster for cluster ID {:?}...", cluster_id);
translated.saturating_inc();
let props = ClusterProps {
node_provider_auth_contract: cluster.props.node_provider_auth_contract,
erasure_coding_required: 16,
erasure_coding_total: 48,
replication_total: 20,
};

Some(Cluster {
cluster_id: cluster.cluster_id,
manager_id: cluster.manager_id,
reserve_id: cluster.reserve_id,
props,
})
},
);

// Update storage version.
StorageVersion::new(1).put::<Pallet<T>>();
info!(
target: LOG_TARGET,
"Upgraded {} records, storage to version {:?}",
translated,
current_version
);

T::DbWeight::get().reads_writes(translated + 1, translated + 1)
} else {
info!(target: LOG_TARGET, " >>> Unused migration!");
T::DbWeight::get().reads(1)
}
}
pub struct MigrateToV1<T>(sp_std::marker::PhantomData<T>);
impl<T: Config> OnRuntimeUpgrade for MigrateToV1<T> {
fn on_runtime_upgrade() -> Weight {
migrate_to_v1::<T>()
}

#[cfg(feature = "try-runtime")]
fn pre_upgrade() -> Result<Vec<u8>, DispatchError> {
let prev_count = v0::Clusters::<T>::iter().count();

Ok((prev_count as u64).encode())
}

#[cfg(feature = "try-runtime")]
fn post_upgrade(prev_state: Vec<u8>) -> Result<(), DispatchError> {
let prev_count: u64 =
Decode::decode(&mut &prev_state[..]).expect("pre_upgrade provides a valid state; qed");

let post_count = Clusters::<T>::iter().count() as u64;
ensure!(
prev_count == post_count,
"the cluster count before and after the migration should be the same"
);

let current_version = Pallet::<T>::current_storage_version();
let on_chain_version = Pallet::<T>::on_chain_storage_version();

frame_support::ensure!(current_version == 1, "must_upgrade");
ensure!(
current_version == on_chain_version,
"after migration, the current_version and on_chain_version should be the same"
);
Ok(())
}
}

#[cfg(test)]
#[cfg(feature = "try-runtime")]
mod test {

use frame_support::pallet_prelude::StorageVersion;

use super::*;
use crate::mock::{Test as T, *};

#[test]
fn cluster_migration_works() {
ExtBuilder.build_and_execute(|| {
let cluster_id0 = ClusterId::from([0; 20]);
let cluster_id1 = ClusterId::from([1; 20]);
let cluster_id2 = ClusterId::from([2; 20]);
let cluster_manager_id = AccountId::from([1; 32]);
let cluster_reserve_id = AccountId::from([2; 32]);
let auth_contract = AccountId::from([3; 32]);

assert_eq!(StorageVersion::get::<Pallet<T>>(), 0);

let cluster1 = v0::Cluster {
cluster_id: cluster_id1,
manager_id: cluster_manager_id.clone(),
reserve_id: cluster_reserve_id.clone(),
props: v0::ClusterProps {
node_provider_auth_contract: Some(auth_contract.clone()),
},
};

v0::Clusters::<T>::insert(cluster_id1, cluster1);
let cluster2 = v0::Cluster {
cluster_id: cluster_id2,
manager_id: cluster_manager_id,
reserve_id: cluster_reserve_id,
props: v0::ClusterProps {
node_provider_auth_contract: Some(auth_contract.clone()),
},
};

v0::Clusters::<T>::insert(cluster_id2, cluster2);
let cluster_count = v0::Clusters::<T>::iter_values().count() as u32;

assert_eq!(cluster_count, 3);
let state = MigrateToV1::<T>::pre_upgrade().unwrap();
let _weight = MigrateToV1::<T>::on_runtime_upgrade();
MigrateToV1::<T>::post_upgrade(state).unwrap();

let cluster_count_after_upgrade = Clusters::<T>::iter_values().count() as u32;

assert_eq!(StorageVersion::get::<Pallet<T>>(), 1);
assert_eq!(cluster_count_after_upgrade, 3);
assert_eq!(Clusters::<T>::get(cluster_id0).unwrap().props.erasure_coding_required, 16);
assert_eq!(Clusters::<T>::get(cluster_id0).unwrap().props.erasure_coding_total, 48);
assert_eq!(Clusters::<T>::get(cluster_id0).unwrap().props.replication_total, 20);
assert_eq!(Clusters::<T>::get(cluster_id1).unwrap().props.erasure_coding_required, 16);
assert_eq!(Clusters::<T>::get(cluster_id1).unwrap().props.erasure_coding_total, 48);
assert_eq!(Clusters::<T>::get(cluster_id1).unwrap().props.replication_total, 20);
assert_eq!(Clusters::<T>::get(cluster_id2).unwrap().props.erasure_coding_required, 16);
assert_eq!(Clusters::<T>::get(cluster_id2).unwrap().props.erasure_coding_total, 48);
assert_eq!(Clusters::<T>::get(cluster_id2).unwrap().props.replication_total, 20);
});
}
}
Loading

0 comments on commit b1afc1d

Please sign in to comment.