Skip to content

Commit

Permalink
Changes to accept stored_bytes as signed input (#410)
Browse files Browse the repository at this point in the history
## Description
<!-- Describe what change this PR is implementing -->

## Types of Changes
Please select the branch type you are merging and fill in the relevant
template.
<!--- Check the following box with an x if the following applies: -->
- [ ] Hotfix
- [ ] Release
- [ ] Fix or Feature

## Fix or Feature
<!--- Check the following box with an x if the following applies: -->

### Types of Changes
<!--- What types of changes does your code introduce? -->
- [ ] Tech Debt (Code improvements)
- [ ] Bug fix (non-breaking change which fixes an issue)
- [ ] New feature (non-breaking change which adds functionality)
- [ ] Breaking change (fix or feature that would cause existing
functionality to change)
- [ ] Dependency upgrade (A change in substrate or any 3rd party crate
version)

### Migrations and Hooks
<!--- Check the following box with an x if the following applies: -->
- [ ] This change requires a runtime migration.
- [ ] Modifies `on_initialize`
- [ ] Modifies `on_finalize`

### Checklist for Fix or Feature
<!--- All boxes need to be checked. Follow this checklist before
requiring PR review -->
- [ ] Change has been tested locally.
- [ ] Change adds / updates tests if applicable.
- [ ] Changelog doc updated.
- [ ] `spec_version` has been incremented.
- [ ] `network-relayer`'s
[events](https://github.com/Cerebellum-Network/network-relayer/blob/dev-cere/shared/substrate/events.go)
have been updated according to the blockchain events if applicable.
- [ ] All CI checks have been passed successfully

## Checklist for Hotfix
<!--- All boxes need to be checked. Follow this checklist before
requiring PR review -->
- [ ] Change has been deployed to Testnet.
- [ ] Change has been tested in Testnet.
- [ ] Changelog has been updated.
- [ ] Crate version has been updated.
- [ ] `spec_version` has been incremented.
- [ ] Transaction version has been updated if required.
- [ ] Pull Request to `dev` has been created.
- [ ] Pull Request to `staging` has been created.
- [ ] `network-relayer`'s
[events](https://github.com/Cerebellum-Network/network-relayer/blob/dev-cere/shared/substrate/events.go)
have been updated according to the blockchain events if applicable.
- [ ] All CI checks have been passed successfully

## Checklist for Release
<!--- All boxes need to be checked. Follow this checklist before
requiring PR review -->
- [ ] Change has been deployed to Devnet.
- [ ] Change has been tested in Devnet.
- [ ] Change has been deployed to Qanet.
- [ ] Change has been tested in Qanet.
- [ ] Change has been deployed to Testnet.
- [ ] Change has been tested in Testnet.
- [ ] Changelog has been updated.
- [ ] Crate version has been updated.
- [ ] Spec version has been updated.
- [ ] Transaction version has been updated if required.
- [ ] All CI checks have been passed successfully
  • Loading branch information
ayushmishra2005 authored Aug 6, 2024
1 parent 57a3876 commit 00eed38
Show file tree
Hide file tree
Showing 8 changed files with 137 additions and 59 deletions.
4 changes: 2 additions & 2 deletions pallets/ddc-customers/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ pub mod pallet {
cluster_id: ClusterId,
bucket_id: BucketId,
transferred_bytes: u64,
stored_bytes: u64,
stored_bytes: i64,
number_of_puts: u64,
number_of_gets: u64,
},
Expand All @@ -213,7 +213,7 @@ pub mod pallet {
cluster_id: ClusterId,
bucket_id: BucketId,
transferred_bytes: u64,
stored_bytes: u64,
stored_bytes: i64,
number_of_puts: u64,
number_of_gets: u64,
},
Expand Down
2 changes: 1 addition & 1 deletion pallets/ddc-payouts/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ benchmarks! {
let total_distributed_reward : u128 = 0;
let total_node_usage = NodeUsage {
transferred_bytes: 200000000u64.saturating_mul(b.into()), // 200 mb per provider
stored_bytes: 100000000u64.saturating_mul(b.into()), // 100 mb per provider
stored_bytes: 100000000i64.saturating_mul(b.into()), // 100 mb per provider
number_of_gets: 10u64.saturating_mul(b.into()), // 10 gets per provider
number_of_puts: 10u64.saturating_mul(b.into()), // 5 puts per provider
};
Expand Down
9 changes: 7 additions & 2 deletions pallets/ddc-payouts/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1079,11 +1079,16 @@ pub mod pallet {
let fraction_of_month =
Perquintill::from_rational(duration_seconds as u64, seconds_in_month as u64);

let mut total_stored_bytes =
let mut total_stored_bytes: i64 =
T::BucketVisitor::get_total_customer_usage(cluster_id, bucket_id, customer_id)
.map_err(Into::<Error<T>>::into)?
.map_or(0, |customer_usage| customer_usage.stored_bytes);
total_stored_bytes += usage.stored_bytes;

ensure!(total_stored_bytes >= 0, Error::<T>::ArithmeticOverflow);

total_stored_bytes = total_stored_bytes
.checked_add(usage.stored_bytes)
.ok_or(Error::<T>::ArithmeticOverflow)?;

total.storage = fraction_of_month *
(|| -> Option<u128> {
Expand Down
69 changes: 47 additions & 22 deletions pallets/ddc-payouts/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,13 @@ fn send_charging_customers_batch_fails_uninitialised() {
let batch_index = 1;
let bucket_id1: BucketId = 1;
let bucket_id2: BucketId = 2;
let payers1 = vec![(user1, bucket_id1, CustomerUsage::default())];
let customer_usage = CustomerUsage {
transferred_bytes: 100,
stored_bytes: -800,
number_of_gets: 100,
number_of_puts: 200,
};
let payers1 = vec![(user1, bucket_id1, customer_usage)];
let payers2 = vec![(user2, bucket_id2, CustomerUsage::default())];
let start_date = NaiveDate::from_ymd_opt(2023, 4, 1).unwrap(); // April 1st

Expand Down Expand Up @@ -275,6 +281,19 @@ fn send_charging_customers_batch_fails_uninitialised() {
max_batch_index,
));

assert_noop!(
DdcPayouts::send_charging_customers_batch(
RuntimeOrigin::signed(dac_account),
cluster_id,
era,
batch_index,
payers1.clone(),
MMRProof::default(),
),
Error::<Test>::ArithmeticOverflow
);

let payers1 = vec![(user2, bucket_id2, CustomerUsage::default())];
assert_ok!(DdcPayouts::send_charging_customers_batch(
RuntimeOrigin::signed(dac_account),
cluster_id,
Expand Down Expand Up @@ -3390,8 +3409,10 @@ fn send_rewarding_providers_batch_works() {
);
let mut transfer_charge = ratio1_transfer * report_after.total_customer_charge.transfer;

let ratio1_storage =
Perquintill::from_rational(node_usage1.stored_bytes, total_nodes_usage.stored_bytes);
let ratio1_storage = Perquintill::from_rational(
node_usage1.stored_bytes as u64,
total_nodes_usage.stored_bytes as u64,
);
let mut storage_charge = ratio1_storage * report_after.total_customer_charge.storage;

let ratio1_puts = Perquintill::from_rational(
Expand Down Expand Up @@ -3428,8 +3449,10 @@ fn send_rewarding_providers_batch_works() {
);
transfer_charge = ratio2_transfer * report_after.total_customer_charge.transfer;

let ratio2_storage =
Perquintill::from_rational(node_usage2.stored_bytes, total_nodes_usage.stored_bytes);
let ratio2_storage = Perquintill::from_rational(
node_usage2.stored_bytes as u64,
total_nodes_usage.stored_bytes as u64,
);
storage_charge = ratio2_storage * report_after.total_customer_charge.storage;

let ratio2_puts = Perquintill::from_rational(
Expand Down Expand Up @@ -3476,8 +3499,10 @@ fn send_rewarding_providers_batch_works() {
);
transfer_charge = ratio3_transfer * report_after.total_customer_charge.transfer;

let ratio3_storage =
Perquintill::from_rational(node_usage3.stored_bytes, total_nodes_usage.stored_bytes);
let ratio3_storage = Perquintill::from_rational(
node_usage3.stored_bytes as u64,
total_nodes_usage.stored_bytes as u64,
);
storage_charge = ratio3_storage * report_after.total_customer_charge.storage;

let ratio3_puts = Perquintill::from_rational(
Expand Down Expand Up @@ -3621,7 +3646,7 @@ fn send_rewarding_providers_batch_100_nodes_small_usage_works() {

let mut user_usage = usage1.clone();
user_usage.transferred_bytes = ratio * user_usage.transferred_bytes;
user_usage.stored_bytes = ratio * user_usage.stored_bytes;
user_usage.stored_bytes = (ratio * user_usage.stored_bytes as u64) as i64;
user_usage.number_of_puts = ratio * user_usage.number_of_puts;
user_usage.number_of_gets = ratio * user_usage.number_of_gets;

Expand Down Expand Up @@ -3758,8 +3783,8 @@ fn send_rewarding_providers_batch_100_nodes_small_usage_works() {
let transfer_charge = ratio1_transfer * report_after.total_customer_charge.transfer;

let ratio1_storage = Perquintill::from_rational(
node_usage1.stored_bytes,
total_nodes_usage.stored_bytes,
node_usage1.stored_bytes as u64,
total_nodes_usage.stored_bytes as u64,
);
let storage_charge = ratio1_storage * report_after.total_customer_charge.storage;

Expand Down Expand Up @@ -3865,7 +3890,7 @@ fn send_rewarding_providers_batch_100_nodes_large_usage_works() {
_ => unreachable!(),
};
node_usage.transferred_bytes = ratio * node_usage.transferred_bytes;
node_usage.stored_bytes = ratio * node_usage.stored_bytes;
node_usage.stored_bytes = (ratio * node_usage.stored_bytes as u64) as i64;
node_usage.number_of_puts = ratio * node_usage.number_of_puts;
node_usage.number_of_gets = ratio * node_usage.number_of_gets;

Expand Down Expand Up @@ -3899,7 +3924,7 @@ fn send_rewarding_providers_batch_100_nodes_large_usage_works() {

let mut user_usage = usage1.clone();
user_usage.transferred_bytes = ratio * user_usage.transferred_bytes;
user_usage.stored_bytes = ratio * user_usage.stored_bytes;
user_usage.stored_bytes = (ratio * user_usage.stored_bytes as u64) as i64;
user_usage.number_of_puts = ratio * user_usage.number_of_puts;
user_usage.number_of_gets = ratio * user_usage.number_of_gets;

Expand Down Expand Up @@ -4036,8 +4061,8 @@ fn send_rewarding_providers_batch_100_nodes_large_usage_works() {
let transfer_charge = ratio1_transfer * report_after.total_customer_charge.transfer;

let ratio1_storage = Perquintill::from_rational(
node_usage1.stored_bytes,
total_nodes_usage.stored_bytes,
node_usage1.stored_bytes as u64,
total_nodes_usage.stored_bytes as u64,
);
let storage_charge = ratio1_storage * report_after.total_customer_charge.storage;

Expand Down Expand Up @@ -4142,7 +4167,7 @@ fn send_rewarding_providers_batch_100_nodes_small_large_usage_works() {
_ => unreachable!(),
};
node_usage.transferred_bytes = ratio * node_usage.transferred_bytes;
node_usage.stored_bytes = ratio * node_usage.stored_bytes;
node_usage.stored_bytes = (ratio * node_usage.stored_bytes as u64) as i64;
node_usage.number_of_puts = ratio * node_usage.number_of_puts;
node_usage.number_of_gets = ratio * node_usage.number_of_gets;

Expand Down Expand Up @@ -4176,7 +4201,7 @@ fn send_rewarding_providers_batch_100_nodes_small_large_usage_works() {

let mut user_usage = usage1.clone();
user_usage.transferred_bytes = ratio * user_usage.transferred_bytes;
user_usage.stored_bytes = ratio * user_usage.stored_bytes;
user_usage.stored_bytes = (ratio * user_usage.stored_bytes as u64) as i64;
user_usage.number_of_puts = ratio * user_usage.number_of_puts;
user_usage.number_of_gets = ratio * user_usage.number_of_gets;

Expand Down Expand Up @@ -4313,8 +4338,8 @@ fn send_rewarding_providers_batch_100_nodes_small_large_usage_works() {
let transfer_charge = ratio1_transfer * report_after.total_customer_charge.transfer;

let ratio1_storage = Perquintill::from_rational(
node_usage1.stored_bytes,
total_nodes_usage.stored_bytes,
node_usage1.stored_bytes as u64,
total_nodes_usage.stored_bytes as u64,
);
let storage_charge = ratio1_storage * report_after.total_customer_charge.storage;

Expand Down Expand Up @@ -4385,7 +4410,7 @@ fn send_rewarding_providers_batch_100_nodes_random_usage_works() {
for i in 10..10 + num_nodes {
let node_usage = NodeUsage {
transferred_bytes: generate_random_u64(&mock_randomness, min, max),
stored_bytes: generate_random_u64(&mock_randomness, min, max),
stored_bytes: (generate_random_u64(&mock_randomness, min, max)) as i64,
number_of_puts: generate_random_u64(&mock_randomness, min, max),
number_of_gets: generate_random_u64(&mock_randomness, min, max),
};
Expand All @@ -4411,7 +4436,7 @@ fn send_rewarding_providers_batch_100_nodes_random_usage_works() {
for user_id in 1000..1000 + num_users {
let user_usage = CustomerUsage {
transferred_bytes: generate_random_u64(&mock_randomness, min, max),
stored_bytes: generate_random_u64(&mock_randomness, min, max),
stored_bytes: (generate_random_u64(&mock_randomness, min, max)) as i64,
number_of_puts: generate_random_u64(&mock_randomness, min, max),
number_of_gets: generate_random_u64(&mock_randomness, min, max),
};
Expand Down Expand Up @@ -4549,8 +4574,8 @@ fn send_rewarding_providers_batch_100_nodes_random_usage_works() {
let transfer_charge = ratio1_transfer * report_after.total_customer_charge.transfer;

let ratio1_storage = Perquintill::from_rational(
node_usage1.stored_bytes,
total_nodes_usage.stored_bytes,
node_usage1.stored_bytes as u64,
total_nodes_usage.stored_bytes as u64,
);
let storage_charge = ratio1_storage * report_after.total_customer_charge.storage;

Expand Down
76 changes: 51 additions & 25 deletions pallets/ddc-verification/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,11 @@ pub mod pallet {
ValidatorKeySet {
validator: T::AccountId,
},
TotalNodeUsageLessThanZero {
cluster_id: ClusterId,
era_id: DdcEra,
validator: T::AccountId,
},
}

/// Consensus Errors
Expand Down Expand Up @@ -366,6 +371,10 @@ pub mod pallet {
},
FailedToFetchCurrentValidator,
FailedToFetchNodeProvider,
TotalNodeUsageLessThanZero {
cluster_id: ClusterId,
era_id: DdcEra,
},
}

#[pallet::error]
Expand Down Expand Up @@ -498,7 +507,7 @@ pub mod pallet {
/// Node id.
pub(crate) node_id: String,
/// Total amount of stored bytes.
pub(crate) stored_bytes: u64,
pub(crate) stored_bytes: i64,
/// Total amount of transferred bytes.
pub(crate) transferred_bytes: u64,
/// Total number of puts.
Expand All @@ -517,7 +526,7 @@ pub mod pallet {
/// Bucket id
pub(crate) bucket_id: BucketId,
/// Total amount of stored bytes.
pub(crate) stored_bytes: u64,
pub(crate) stored_bytes: i64,
/// Total amount of transferred bytes.
pub(crate) transferred_bytes: u64,
/// Total number of puts.
Expand Down Expand Up @@ -1536,7 +1545,7 @@ pub mod pallet {
}
}

fn fetch_reward_activities(
pub(crate) fn fetch_reward_activities(
cluster_id: &ClusterId,
era_id: DdcEra,
nodes_activity_in_consensus: Vec<NodeActivity>,
Expand All @@ -1549,21 +1558,33 @@ pub mod pallet {
vec![OCWError::BatchIndexConversionFailed { cluster_id: *cluster_id, era_id }]
})?;

let total_node_usage = nodes_activity_in_consensus.into_iter().fold(
NodeUsage {
transferred_bytes: 0,
stored_bytes: 0,
number_of_puts: 0,
number_of_gets: 0,
},
|mut acc, activity| {
acc.transferred_bytes += activity.transferred_bytes;
acc.stored_bytes += activity.stored_bytes;
acc.number_of_puts += activity.number_of_puts;
acc.number_of_gets += activity.number_of_gets;
acc
},
);
let total_node_usage = nodes_activity_in_consensus
.into_iter()
.try_fold(
NodeUsage {
transferred_bytes: 0,
stored_bytes: 0,
number_of_puts: 0,
number_of_gets: 0,
},
|mut acc: NodeUsage, activity| {
let total_stored_bytes = acc.stored_bytes + activity.stored_bytes;

if total_stored_bytes < 0 {
Err(OCWError::TotalNodeUsageLessThanZero {
cluster_id: *cluster_id,
era_id,
})
} else {
acc.transferred_bytes += activity.transferred_bytes;
acc.stored_bytes = total_stored_bytes;
acc.number_of_puts += activity.number_of_puts;
acc.number_of_gets += activity.number_of_gets;
Ok(acc)
}
},
)
.map_err(|e| vec![e])?;

Ok(Some((era_id, max_batch_index, total_node_usage)))
} else {
Expand Down Expand Up @@ -2742,6 +2763,13 @@ pub mod pallet {
validator: caller.clone(),
});
},
OCWError::TotalNodeUsageLessThanZero { cluster_id, era_id } => {
Self::deposit_event(Event::TotalNodeUsageLessThanZero {
cluster_id,
era_id,
validator: caller.clone(),
});
},
}
}

Expand All @@ -2762,17 +2790,15 @@ pub mod pallet {
) -> DispatchResult {
let controller = ensure_signed(origin)?;

ensure!(
T::StakingVisitor::stash_by_ctrl(&controller).is_ok(),
Error::<T>::NotController
);
let stash = T::StakingVisitor::stash_by_ctrl(&controller)
.map_err(|_| Error::<T>::NotController)?;

ensure!(
<ValidatorSet<T>>::get().contains(&ddc_validator_pub),
Error::<T>::NotValidatorStash
);

ValidatorToStashKey::<T>::insert(&ddc_validator_pub, &ddc_validator_pub);
ValidatorToStashKey::<T>::insert(&ddc_validator_pub, &stash);
Self::deposit_event(Event::<T>::ValidatorKeySet { validator: ddc_validator_pub });
Ok(())
}
Expand Down Expand Up @@ -2955,8 +2981,8 @@ pub mod pallet {
ValidatorSet::<T>::put(validators);
}
fn is_ocw_validator(caller: T::AccountId) -> bool {
if let Some(stash) = ValidatorToStashKey::<T>::get(caller) {
<ValidatorSet<T>>::get().contains(&stash)
if ValidatorToStashKey::<T>::contains_key(caller.clone()) {
<ValidatorSet<T>>::get().contains(&caller)
} else {
false
}
Expand Down
Loading

0 comments on commit 00eed38

Please sign in to comment.