diff --git a/consensus/core/src/block_verifier.rs b/consensus/core/src/block_verifier.rs index ac94d261839..3447814092c 100644 --- a/consensus/core/src/block_verifier.rs +++ b/consensus/core/src/block_verifier.rs @@ -82,7 +82,7 @@ impl BlockVerifier for SignedBlockVerifier { }); } - // Verifiy the block's signature. + // Verify the block's signature. block.verify_signature(&self.context)?; // Verify the block's ancestor refs are consistent with the block's round, diff --git a/consensus/core/src/tests/base_committer_tests.rs b/consensus/core/src/tests/base_committer_tests.rs index 08b8e1d698d..0ed89bf7954 100644 --- a/consensus/core/src/tests/base_committer_tests.rs +++ b/consensus/core/src/tests/base_committer_tests.rs @@ -21,7 +21,7 @@ use crate::{ #[test] fn try_direct_commit() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -77,7 +77,7 @@ fn try_direct_commit() { #[test] fn idempotence() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -121,7 +121,7 @@ fn idempotence() { #[test] fn multiple_direct_commit() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -162,7 +162,7 @@ fn multiple_direct_commit() { #[test] fn direct_skip() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -214,7 +214,7 @@ fn direct_skip() { #[test] fn indirect_commit() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -357,7 +357,7 @@ fn indirect_commit() { #[test] fn indirect_skip() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -411,7 +411,7 @@ fn indirect_skip() { dag_state.clone(), )); - // Add enough blocks to reach the decison round of wave 3. + // Add enough blocks to reach the decision round of wave 3. let decision_round_wave_3 = committer.decision_round(3); build_dag( context.clone(), @@ -487,7 +487,7 @@ fn indirect_skip() { #[test] fn undecided() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -576,7 +576,7 @@ fn undecided() { #[test] fn test_byzantine_direct_commit() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -659,19 +659,19 @@ fn test_byzantine_direct_commit() { // non-votes C13 but there are enough good votes to prevent a skip. // Additionally only one of the non-votes per authority should be counted so // we should not skip leader A12. - let decison_block_a14 = VerifiedBlock::new_for_test( + let decision_block_a14 = VerifiedBlock::new_for_test( TestBlock::new(14, 0) .set_ancestors(good_references_voting_round_wave_4.clone()) .build(), ); - dag_state.write().accept_block(decison_block_a14.clone()); + dag_state.write().accept_block(decision_block_a14.clone()); let good_references_voting_round_wave_4_without_c13 = good_references_voting_round_wave_4 .into_iter() .filter(|r| r.author != AuthorityIndex::new_for_test(2)) .collect::>(); - let decison_block_b14 = VerifiedBlock::new_for_test( + let decision_block_b14 = VerifiedBlock::new_for_test( TestBlock::new(14, 1) .set_ancestors( good_references_voting_round_wave_4_without_c13 @@ -682,9 +682,9 @@ fn test_byzantine_direct_commit() { ) .build(), ); - dag_state.write().accept_block(decison_block_b14.clone()); + dag_state.write().accept_block(decision_block_b14.clone()); - let decison_block_c14 = VerifiedBlock::new_for_test( + let decision_block_c14 = VerifiedBlock::new_for_test( TestBlock::new(14, 2) .set_ancestors( good_references_voting_round_wave_4_without_c13 @@ -695,9 +695,9 @@ fn test_byzantine_direct_commit() { ) .build(), ); - dag_state.write().accept_block(decison_block_c14.clone()); + dag_state.write().accept_block(decision_block_c14.clone()); - let decison_block_d14 = VerifiedBlock::new_for_test( + let decision_block_d14 = VerifiedBlock::new_for_test( TestBlock::new(14, 3) .set_ancestors( good_references_voting_round_wave_4_without_c13 @@ -708,7 +708,7 @@ fn test_byzantine_direct_commit() { ) .build(), ); - dag_state.write().accept_block(decison_block_d14.clone()); + dag_state.write().accept_block(decision_block_d14.clone()); // DagState Update: // - We have A13, B13, D13 & C13 as good votes in the voting round of wave 4 diff --git a/consensus/core/src/tests/pipelined_committer_tests.rs b/consensus/core/src/tests/pipelined_committer_tests.rs index 958e32b6cbb..f7bac7b021e 100644 --- a/consensus/core/src/tests/pipelined_committer_tests.rs +++ b/consensus/core/src/tests/pipelined_committer_tests.rs @@ -655,20 +655,20 @@ fn test_byzantine_validator() { // Additionally only one of the non-votes per authority should be counted so // we should not skip leader A12. let mut references_round_14 = vec![]; - let decison_block_a14 = VerifiedBlock::new_for_test( + let decision_block_a14 = VerifiedBlock::new_for_test( TestBlock::new(14, 0) .set_ancestors(good_references_voting_round_wave_4.clone()) .build(), ); - references_round_14.push(decison_block_a14.reference()); - dag_state.write().accept_block(decison_block_a14.clone()); + references_round_14.push(decision_block_a14.reference()); + dag_state.write().accept_block(decision_block_a14.clone()); let good_references_voting_round_wave_4_without_b13 = good_references_voting_round_wave_4 .into_iter() .filter(|r| r.author != AuthorityIndex::new_for_test(1)) .collect::>(); - let decison_block_b14 = VerifiedBlock::new_for_test( + let decision_block_b14 = VerifiedBlock::new_for_test( TestBlock::new(14, 1) .set_ancestors( good_references_voting_round_wave_4_without_b13 @@ -679,10 +679,10 @@ fn test_byzantine_validator() { ) .build(), ); - references_round_14.push(decison_block_b14.reference()); - dag_state.write().accept_block(decison_block_b14.clone()); + references_round_14.push(decision_block_b14.reference()); + dag_state.write().accept_block(decision_block_b14.clone()); - let decison_block_c14 = VerifiedBlock::new_for_test( + let decision_block_c14 = VerifiedBlock::new_for_test( TestBlock::new(14, 2) .set_ancestors( good_references_voting_round_wave_4_without_b13 @@ -693,10 +693,10 @@ fn test_byzantine_validator() { ) .build(), ); - references_round_14.push(decison_block_c14.reference()); - dag_state.write().accept_block(decison_block_c14.clone()); + references_round_14.push(decision_block_c14.reference()); + dag_state.write().accept_block(decision_block_c14.clone()); - let decison_block_d14 = VerifiedBlock::new_for_test( + let decision_block_d14 = VerifiedBlock::new_for_test( TestBlock::new(14, 3) .set_ancestors( good_references_voting_round_wave_4_without_b13 @@ -707,8 +707,8 @@ fn test_byzantine_validator() { ) .build(), ); - references_round_14.push(decison_block_d14.reference()); - dag_state.write().accept_block(decison_block_d14.clone()); + references_round_14.push(decision_block_d14.reference()); + dag_state.write().accept_block(decision_block_d14.clone()); // DagState Update: // - We have A13, B13, D13 & C13 as good votes in the voting round of leader A12 @@ -777,7 +777,7 @@ fn basic_test_setup() -> ( super::UniversalCommitter, ) { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), diff --git a/consensus/core/src/tests/universal_committer_tests.rs b/consensus/core/src/tests/universal_committer_tests.rs index eee399a523c..491d064ae10 100644 --- a/consensus/core/src/tests/universal_committer_tests.rs +++ b/consensus/core/src/tests/universal_committer_tests.rs @@ -664,19 +664,19 @@ fn test_byzantine_direct_commit() { // non-votes C13 but there are enough good votes to prevent a skip. // Additionally only one of the non-votes per authority should be counted so // we should not skip leader A12. - let decison_block_a14 = VerifiedBlock::new_for_test( + let decision_block_a14 = VerifiedBlock::new_for_test( TestBlock::new(14, 0) .set_ancestors(good_references_voting_round_wave_4.clone()) .build(), ); - dag_state.write().accept_block(decison_block_a14.clone()); + dag_state.write().accept_block(decision_block_a14.clone()); let good_references_voting_round_wave_4_without_c13 = good_references_voting_round_wave_4 .into_iter() .filter(|r| r.author != AuthorityIndex::new_for_test(2)) .collect::>(); - let decison_block_b14 = VerifiedBlock::new_for_test( + let decision_block_b14 = VerifiedBlock::new_for_test( TestBlock::new(14, 1) .set_ancestors( good_references_voting_round_wave_4_without_c13 @@ -687,9 +687,9 @@ fn test_byzantine_direct_commit() { ) .build(), ); - dag_state.write().accept_block(decison_block_b14.clone()); + dag_state.write().accept_block(decision_block_b14.clone()); - let decison_block_c14 = VerifiedBlock::new_for_test( + let decision_block_c14 = VerifiedBlock::new_for_test( TestBlock::new(14, 2) .set_ancestors( good_references_voting_round_wave_4_without_c13 @@ -700,9 +700,9 @@ fn test_byzantine_direct_commit() { ) .build(), ); - dag_state.write().accept_block(decison_block_c14.clone()); + dag_state.write().accept_block(decision_block_c14.clone()); - let decison_block_d14 = VerifiedBlock::new_for_test( + let decision_block_d14 = VerifiedBlock::new_for_test( TestBlock::new(14, 3) .set_ancestors( good_references_voting_round_wave_4_without_c13 @@ -713,7 +713,7 @@ fn test_byzantine_direct_commit() { ) .build(), ); - dag_state.write().accept_block(decison_block_d14.clone()); + dag_state.write().accept_block(decision_block_d14.clone()); // DagState Update: // - We have A13, B13, D13 & C13 as good votes in the voting round of wave 4 @@ -747,7 +747,7 @@ fn basic_test_setup() -> ( super::UniversalCommitter, ) { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), diff --git a/crates/data-transform/src/main.rs b/crates/data-transform/src/main.rs index e6bc766ad89..1ace90758ff 100644 --- a/crates/data-transform/src/main.rs +++ b/crates/data-transform/src/main.rs @@ -275,7 +275,7 @@ fn main() { println!("Unable to find event_json {}", target_id); } Err(_) => { - println!("An error occured while fetching event_json {}", target_id); + println!("An error occurred while fetching event_json {}", target_id); } } @@ -326,7 +326,7 @@ fn main() { exit(0); } Err(_) => { - println!("An error occured while fetching event {}", target_id); + println!("An error occurred while fetching event {}", target_id); exit(0); } } diff --git a/crates/iota-adapter-transactional-tests/tests/size_limits/identitifer_len_limits.move b/crates/iota-adapter-transactional-tests/tests/size_limits/identitifer_len_limits.move index 490f914f0b6..71139d83ad3 100644 --- a/crates/iota-adapter-transactional-tests/tests/size_limits/identitifer_len_limits.move +++ b/crates/iota-adapter-transactional-tests/tests/size_limits/identitifer_len_limits.move @@ -2,13 +2,13 @@ // Modifications Copyright (c) 2024 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 -// Test Identifer length limits +// Test identifier length limits //# init --addresses Test=0x0 //# publish -/// Test Identifer length limits enforced for module name +/// Test identifier length limits enforced for module name module Test::M1_1234567891234567890123456789012345678912345678901234567890123456789123456789012345678908901234567891234567890123456789078912345678901234567890 { public entry fun create_n_idscreate_n_idscreate_n_() { @@ -17,7 +17,7 @@ module Test::M1_1234567891234567890123456789012345678912345678901234567890123456 //# publish -/// Test Identifer length limits enforced for function name +/// Test identifier length limits enforced for function name module Test::M1_12345678912345678901234567890 { public entry fun create_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_ids() { } @@ -26,7 +26,7 @@ module Test::M1_12345678912345678901234567890 { //# publish -/// Test normal Identifer lengths +/// Test normal identifier lengths module Test::M1_1234567891234567890123456789012345678912345678901234567 { public entry fun create_n_(n: u64, ctx: &mut TxContext) { diff --git a/crates/iota-bridge/src/action_executor.rs b/crates/iota-bridge/src/action_executor.rs index b6576e9bda3..3b4638fb11e 100644 --- a/crates/iota-bridge/src/action_executor.rs +++ b/crates/iota-bridge/src/action_executor.rs @@ -812,7 +812,7 @@ mod tests { let action_digest = action.digest(); // Wait for 1 second. It should still in the process of retrying requesting sigs - // becaues we mock errors above. + // because we mock errors above. tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; tx_subscription.try_recv().unwrap_err(); // And the action is still in WAL diff --git a/crates/iota-bridge/src/iota_client.rs b/crates/iota-bridge/src/iota_client.rs index dd06ba4dc79..c1ec1f83510 100644 --- a/crates/iota-bridge/src/iota_client.rs +++ b/crates/iota-bridge/src/iota_client.rs @@ -350,7 +350,7 @@ impl IotaClientInner for IotaSdkClient { } }; - // get_dynamic_field_object does not return bcs, so we have to issue anothe + // get_dynamic_field_object does not return bcs, so we have to issue another // query let bcs_bytes = self .read_api() diff --git a/crates/iota-bridge/src/server/governance_verifier.rs b/crates/iota-bridge/src/server/governance_verifier.rs index bb2443aeb64..735481286d9 100644 --- a/crates/iota-bridge/src/server/governance_verifier.rs +++ b/crates/iota-bridge/src/server/governance_verifier.rs @@ -17,7 +17,7 @@ pub struct GovernanceVerifier { impl GovernanceVerifier { pub fn new(approved_actions: Vec) -> BridgeResult { - // TOOD(audit-blocking): verify chain ids + // TODO(audit-blocking): verify chain ids let mut approved_goverance_actions = HashMap::new(); for action in approved_actions { if !action.is_governace_action() { diff --git a/crates/iota-bridge/src/test_utils.rs b/crates/iota-bridge/src/test_utils.rs index 11d96f83b1f..940f122c55a 100644 --- a/crates/iota-bridge/src/test_utils.rs +++ b/crates/iota-bridge/src/test_utils.rs @@ -188,7 +188,7 @@ pub fn mock_get_logs( } /// Returns a test Log and corresponding BridgeAction -// Refernece: https://github.com/rust-ethereum/ethabi/blob/master/ethabi/src/event.rs#L192 +// Reference: https://github.com/rust-ethereum/ethabi/blob/master/ethabi/src/event.rs#L192 pub fn get_test_log_and_action( contract_address: EthAddress, tx_hash: TxHash, @@ -199,7 +199,7 @@ pub fn get_test_log_and_action( let source_address = EthAddress::random(); let iota_address: IotaAddress = IotaAddress::random_for_testing_only(); let target_address = Hex::decode(&iota_address.to_string()).unwrap(); - // Note: must use `encode` rather than `encode_packged` + // Note: must use `encode` rather than `encode_packaged` let encoded = ethers::abi::encode(&[ // u8 is encoded as u256 in abi standard ethers::abi::Token::Uint(ethers::types::U256::from(token_code)), diff --git a/crates/iota-config/src/node.rs b/crates/iota-config/src/node.rs index b3ecef25370..1c6de892c63 100644 --- a/crates/iota-config/src/node.rs +++ b/crates/iota-config/src/node.rs @@ -82,7 +82,7 @@ pub struct NodeConfig { #[serde(default = "default_enable_index_processing")] pub enable_index_processing: bool, - // only alow websocket connections for jsonrpc traffic + // only allow websocket connections for jsonrpc traffic #[serde(default)] pub websocket_only: bool, diff --git a/crates/iota-core/src/transaction_input_loader.rs b/crates/iota-core/src/transaction_input_loader.rs index 6eb60e3d3e8..40448da4f28 100644 --- a/crates/iota-core/src/transaction_input_loader.rs +++ b/crates/iota-core/src/transaction_input_loader.rs @@ -45,7 +45,7 @@ impl TransactionInputLoader { receiving_objects: &[ObjectRef], epoch_id: EpochId, ) -> IotaResult<(InputObjects, ReceivingObjects)> { - // Length of input_object_kinds have beeen checked via validity_check() for + // Length of input_object_kinds have been checked via validity_check() for // ProgrammableTransaction. let mut input_results = vec![None; input_object_kinds.len()]; let mut object_refs = Vec::with_capacity(input_object_kinds.len()); @@ -293,7 +293,7 @@ impl TransactionInputLoader { _protocol_config: &ProtocolConfig, ) -> IotaResult<(InputObjects, ReceivingObjects)> { let mut results = Vec::with_capacity(input_object_kinds.len()); - // Length of input_object_kinds have beeen checked via validity_check() for + // Length of input_object_kinds have been checked via validity_check() for // ProgrammableTransaction. for kind in input_object_kinds { let obj = match kind { diff --git a/crates/iota-framework/docs/deepbook/clob.md b/crates/iota-framework/docs/deepbook/clob.md index f0dc3afb83b..2265cfc8f30 100644 --- a/crates/iota-framework/docs/deepbook/clob.md +++ b/crates/iota-framework/docs/deepbook/clob.md @@ -1641,11 +1641,11 @@ Place a market order to the order book. // We first retrieve the PriceLevel with the lowest price by calling min_leaf on the asks Critbit Tree. // We then match the market order by iterating through open orders on that price level in ascending order of the order id. // Open orders that are being filled are removed from the order book. - // We stop the iteration untill all quantities are filled. + // We stop the iteration until all quantities are filled. // If the total quantity of open orders at the lowest price level is not large enough to fully fill the market order, // we move on to the next price level by calling next_leaf on the asks Critbit Tree and repeat the same procedure. // Continue iterating over the price levels in ascending order until the market order is completely filled. - // If ther market order cannot be completely filled even after consuming all the open ask orders, + // If their market order cannot be completely filled even after consuming all the open ask orders, // the unfilled quantity will be cancelled. // Market ask order follows similar procedure. // The difference is that market ask order is matched against the open bid orders. diff --git a/crates/iota-framework/docs/deepbook/clob_v2.md b/crates/iota-framework/docs/deepbook/clob_v2.md index c690d12ea97..d5337549d4d 100644 --- a/crates/iota-framework/docs/deepbook/clob_v2.md +++ b/crates/iota-framework/docs/deepbook/clob_v2.md @@ -2782,11 +2782,11 @@ Place a market order to the order book. // We first retrieve the PriceLevel with the lowest price by calling min_leaf on the asks Critbit Tree. // We then match the market order by iterating through open orders on that price level in ascending order of the order id. // Open orders that are being filled are removed from the order book. - // We stop the iteration untill all quantities are filled. + // We stop the iteration until all quantities are filled. // If the total quantity of open orders at the lowest price level is not large enough to fully fill the market order, // we move on to the next price level by calling next_leaf on the asks Critbit Tree and repeat the same procedure. // Continue iterating over the price levels in ascending order until the market order is completely filled. - // If ther market order cannot be completely filled even after consuming all the open ask orders, + // If their market order cannot be completely filled even after consuming all the open ask orders, // the unfilled quantity will be cancelled. // Market ask order follows similar procedure. // The difference is that market ask order is matched against the open bid orders. @@ -3822,7 +3822,7 @@ The latter is the corresponding depth list let (price_low_, _) = critbit::min_leaf(&pool.bids); let (price_high_, _) = critbit::max_leaf(&pool.bids); - // If price_low is greater than the higest element in the tree, we return empty + // If price_low is greater than the highest element in the tree, we return empty if (price_low > price_high_) { return (price_vec, depth_vec) }; diff --git a/crates/iota-framework/docs/iota-framework/transfer_policy.md b/crates/iota-framework/docs/iota-framework/transfer_policy.md index 9a29b18e940..8a6277ba248 100644 --- a/crates/iota-framework/docs/iota-framework/transfer_policy.md +++ b/crates/iota-framework/docs/iota-framework/transfer_policy.md @@ -16,7 +16,7 @@ hot potato or transaction will fail. - Type owner (creator) can set any Rules as long as the ecosystem supports them. All of the Rules need to be resolved within a single transaction (eg pay royalty and pay fixed commission). Once required actions are performed, -the TransferRequest can be "confimed" via confirm_request call. +the TransferRequest can be "confirmed" via confirm_request call. - TransferPolicy aims to be the main interface for creators to control trades of their types and collect profits if a fee is required on sales. Custom @@ -428,7 +428,7 @@ available for use, the type can not be traded in kiosks. ## Function `default` -Initialize the Tranfer Policy in the default scenario: Create and share +Initialize the Transfer Policy in the default scenario: Create and share the TransferPolicy, transfer TransferPolicyCap to the transaction sender. diff --git a/crates/iota-framework/docs/stardust/nft.md b/crates/iota-framework/docs/stardust/nft.md index 97779e5e400..39f10200fa8 100644 --- a/crates/iota-framework/docs/stardust/nft.md +++ b/crates/iota-framework/docs/stardust/nft.md @@ -143,7 +143,7 @@ The Nft module initializer. string::utf8(b"description"), string::utf8(b"creator"), - // The extra IRC27-nested fileds. + // The extra IRC27-nested fields. string::utf8(b"version"), string::utf8(b"media_type"), string::utf8(b"collection_name"), @@ -159,7 +159,7 @@ The Nft module initializer. string::utf8(b"{immutable_metadata.description}"), string::utf8(b"{immutable_metadata.issuer_name}"), - // The extra IRC27-nested fileds. + // The extra IRC27-nested fields. string::utf8(b"{immutable_metadata.version}"), string::utf8(b"{immutable_metadata.media_type}"), string::utf8(b"{immutable_metadata.collection_name}"), diff --git a/crates/iota-framework/packages/deepbook/sources/clob.move b/crates/iota-framework/packages/deepbook/sources/clob.move index 3cba68d3b63..0c4fb46c59d 100644 --- a/crates/iota-framework/packages/deepbook/sources/clob.move +++ b/crates/iota-framework/packages/deepbook/sources/clob.move @@ -133,7 +133,7 @@ module deepbook::clob { // Orders that are submitted earlier has lower order ids. // 64 bits are sufficient for order ids whereas 32 bits are not. // Assuming a maximum TPS of 100K/s of Iota chain, it would take (1<<63) / 100000 / 3600 / 24 / 365 = 2924712 years to reach the full capacity. - // The highest bit of the order id is used to denote the order tyep, 0 for bid, 1 for ask. + // The highest bit of the order id is used to denote the order type, 0 for bid, 1 for ask. order_id: u64, // Only used for limit orders. price: u64, @@ -768,11 +768,11 @@ module deepbook::clob { // We first retrieve the PriceLevel with the lowest price by calling min_leaf on the asks Critbit Tree. // We then match the market order by iterating through open orders on that price level in ascending order of the order id. // Open orders that are being filled are removed from the order book. - // We stop the iteration untill all quantities are filled. + // We stop the iteration until all quantities are filled. // If the total quantity of open orders at the lowest price level is not large enough to fully fill the market order, // we move on to the next price level by calling next_leaf on the asks Critbit Tree and repeat the same procedure. // Continue iterating over the price levels in ascending order until the market order is completely filled. - // If ther market order cannot be completely filled even after consuming all the open ask orders, + // If their market order cannot be completely filled even after consuming all the open ask orders, // the unfilled quantity will be cancelled. // Market ask order follows similar procedure. // The difference is that market ask order is matched against the open bid orders. diff --git a/crates/iota-framework/packages/deepbook/sources/clob_v2.move b/crates/iota-framework/packages/deepbook/sources/clob_v2.move index 68a9dd5a17d..e95e159f000 100644 --- a/crates/iota-framework/packages/deepbook/sources/clob_v2.move +++ b/crates/iota-framework/packages/deepbook/sources/clob_v2.move @@ -1289,11 +1289,11 @@ module deepbook::clob_v2 { // We first retrieve the PriceLevel with the lowest price by calling min_leaf on the asks Critbit Tree. // We then match the market order by iterating through open orders on that price level in ascending order of the order id. // Open orders that are being filled are removed from the order book. - // We stop the iteration untill all quantities are filled. + // We stop the iteration until all quantities are filled. // If the total quantity of open orders at the lowest price level is not large enough to fully fill the market order, // we move on to the next price level by calling next_leaf on the asks Critbit Tree and repeat the same procedure. // Continue iterating over the price levels in ascending order until the market order is completely filled. - // If ther market order cannot be completely filled even after consuming all the open ask orders, + // If their market order cannot be completely filled even after consuming all the open ask orders, // the unfilled quantity will be cancelled. // Market ask order follows similar procedure. // The difference is that market ask order is matched against the open bid orders. @@ -2010,7 +2010,7 @@ module deepbook::clob_v2 { let (price_low_, _) = critbit::min_leaf(&pool.bids); let (price_high_, _) = critbit::max_leaf(&pool.bids); - // If price_low is greater than the higest element in the tree, we return empty + // If price_low is greater than the highest element in the tree, we return empty if (price_low > price_high_) { return (price_vec, depth_vec) }; diff --git a/crates/iota-framework/packages/iota-framework/sources/crypto/group_ops.move b/crates/iota-framework/packages/iota-framework/sources/crypto/group_ops.move index 8bb462cc5dc..048fdac5aae 100644 --- a/crates/iota-framework/packages/iota-framework/sources/crypto/group_ops.move +++ b/crates/iota-framework/packages/iota-framework/sources/crypto/group_ops.move @@ -19,7 +19,7 @@ module iota::group_ops { ///////////////////////////////////////////////////// ////// Generic functions for group operations. ////// - // The caller provides a type identifer that should match the types of enum [Groups] in group_ops.rs. + // The caller provides a type identifier that should match the types of enum [Groups] in group_ops.rs. // General wrapper for all group elements. public struct Element has store, copy, drop { diff --git a/crates/iota-framework/packages/iota-framework/sources/kiosk/transfer_policy.move b/crates/iota-framework/packages/iota-framework/sources/kiosk/transfer_policy.move index 36a5a875230..5abe500e2c6 100644 --- a/crates/iota-framework/packages/iota-framework/sources/kiosk/transfer_policy.move +++ b/crates/iota-framework/packages/iota-framework/sources/kiosk/transfer_policy.move @@ -16,7 +16,7 @@ /// - Type owner (creator) can set any Rules as long as the ecosystem supports /// them. All of the Rules need to be resolved within a single transaction (eg /// pay royalty and pay fixed commission). Once required actions are performed, -/// the `TransferRequest` can be "confimed" via `confirm_request` call. +/// the `TransferRequest` can be "confirmed" via `confirm_request` call. /// /// - `TransferPolicy` aims to be the main interface for creators to control trades /// of their types and collect profits if a fee is required on sales. Custom @@ -130,7 +130,7 @@ module iota::transfer_policy { } #[allow(lint(self_transfer, share_owned))] - /// Initialize the Tranfer Policy in the default scenario: Create and share + /// Initialize the Transfer Policy in the default scenario: Create and share /// the `TransferPolicy`, transfer `TransferPolicyCap` to the transaction /// sender. entry fun default(pub: &Publisher, ctx: &mut TxContext) { diff --git a/crates/iota-framework/packages/stardust/sources/nft/nft.move b/crates/iota-framework/packages/stardust/sources/nft/nft.move index 9a753319a17..5ffbd7677d1 100644 --- a/crates/iota-framework/packages/stardust/sources/nft/nft.move +++ b/crates/iota-framework/packages/stardust/sources/nft/nft.move @@ -45,7 +45,7 @@ module stardust::nft { string::utf8(b"description"), string::utf8(b"creator"), - // The extra IRC27-nested fileds. + // The extra IRC27-nested fields. string::utf8(b"version"), string::utf8(b"media_type"), string::utf8(b"collection_name"), @@ -61,7 +61,7 @@ module stardust::nft { string::utf8(b"{immutable_metadata.description}"), string::utf8(b"{immutable_metadata.issuer_name}"), - // The extra IRC27-nested fileds. + // The extra IRC27-nested fields. string::utf8(b"{immutable_metadata.version}"), string::utf8(b"{immutable_metadata.media_type}"), string::utf8(b"{immutable_metadata.collection_name}"), diff --git a/crates/iota-genesis-builder/src/stardust/migration/migration.rs b/crates/iota-genesis-builder/src/stardust/migration/migration.rs index b02f8dc286f..1579235185b 100644 --- a/crates/iota-genesis-builder/src/stardust/migration/migration.rs +++ b/crates/iota-genesis-builder/src/stardust/migration/migration.rs @@ -118,7 +118,7 @@ impl Migration { // a certain milestone timestamp remains the same between runs. // // This guarantees that fresh ids created through the transaction - // context will also map to the same objects betwen runs. + // context will also map to the same objects between runs. outputs.sort_by_key(|(header, _)| (header.ms_timestamp(), header.output_id())); foundries.sort_by_key(|(header, _)| (header.ms_timestamp(), header.output_id())); info!("Migrating foundries..."); diff --git a/crates/iota-graphql-rpc/src/types/coin.rs b/crates/iota-graphql-rpc/src/types/coin.rs index f49391e675c..ca966ce37c5 100644 --- a/crates/iota-graphql-rpc/src/types/coin.rs +++ b/crates/iota-graphql-rpc/src/types/coin.rs @@ -363,7 +363,7 @@ impl Coin { })?; let coin = Coin::try_from(&move_).map_err(|_| { - Error::Internal(format!("Faild to deserialize as Coin: {}", object.address)) + Error::Internal(format!("Failed to deserialize as Coin: {}", object.address)) })?; conn.edges.push(Edge::new(cursor, coin)); diff --git a/crates/iota-indexer/src/metrics.rs b/crates/iota-indexer/src/metrics.rs index 69e0c2fdd06..48d30947d52 100644 --- a/crates/iota-indexer/src/metrics.rs +++ b/crates/iota-indexer/src/metrics.rs @@ -341,7 +341,7 @@ impl IndexerMetrics { .unwrap(), checkpoint_db_commit_latency: register_histogram_with_registry!( "checkpoint_db_commit_latency", - "Time spent commiting a checkpoint to the db", + "Time spent committing a checkpoint to the db", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) @@ -349,21 +349,21 @@ impl IndexerMetrics { checkpoint_db_commit_latency_step_1: register_histogram_with_registry!( "checkpoint_db_commit_latency_step_1", - "Time spent commiting a checkpoint to the db, step 1", + "Time spent committing a checkpoint to the db, step 1", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_transactions: register_histogram_with_registry!( "checkpoint_db_commit_latency_transactions", - "Time spent commiting transactions", + "Time spent committing transactions", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_transactions_chunks: register_histogram_with_registry!( "checkpoint_db_commit_latency_transactions_chunks", - "Time spent commiting transactions chunks", + "Time spent committing transactions chunks", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) @@ -377,40 +377,40 @@ impl IndexerMetrics { .unwrap(), checkpoint_db_commit_latency_objects: register_histogram_with_registry!( "checkpoint_db_commit_latency_objects", - "Time spent commiting objects", + "Time spent committing objects", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_objects_history: register_histogram_with_registry!( "checkpoint_db_commit_latency_objects_history", - "Time spent commiting objects history", + "Time spent committing objects history", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ).unwrap(), checkpoint_db_commit_latency_objects_chunks: register_histogram_with_registry!( "checkpoint_db_commit_latency_objects_chunks", - "Time spent commiting objects chunks", + "Time spent committing objects chunks", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_objects_history_chunks: register_histogram_with_registry!( "checkpoint_db_commit_latency_objects_history_chunks", - "Time spent commiting objects history chunks", + "Time spent committing objects history chunks", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ).unwrap(), checkpoint_db_commit_latency_events: register_histogram_with_registry!( "checkpoint_db_commit_latency_events", - "Time spent commiting events", + "Time spent committing events", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_events_chunks: register_histogram_with_registry!( "checkpoint_db_commit_latency_events_chunks", - "Time spent commiting events chunks", + "Time spent committing events chunks", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) @@ -418,35 +418,35 @@ impl IndexerMetrics { checkpoint_db_commit_latency_packages: register_histogram_with_registry!( "checkpoint_db_commit_latency_packages", - "Time spent commiting packages", + "Time spent committing packages", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_tx_indices: register_histogram_with_registry!( "checkpoint_db_commit_latency_tx_indices", - "Time spent commiting tx indices", + "Time spent committing tx indices", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_tx_indices_chunks: register_histogram_with_registry!( "checkpoint_db_commit_latency_tx_indices_chunks", - "Time spent commiting tx_indices chunks", + "Time spent committing tx_indices chunks", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_checkpoints: register_histogram_with_registry!( "checkpoint_db_commit_latency_checkpoints", - "Time spent commiting checkpoints", + "Time spent committing checkpoints", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_epoch: register_histogram_with_registry!( "checkpoint_db_commit_latency_epochs", - "Time spent commiting epochs", + "Time spent committing epochs", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) @@ -471,35 +471,35 @@ impl IndexerMetrics { ).unwrap(), thousand_transaction_avg_db_commit_latency: register_histogram_with_registry!( "transaction_db_commit_latency", - "Average time spent commiting 1000 transactions to the db", + "Average time spent committing 1000 transactions to the db", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), object_db_commit_latency: register_histogram_with_registry!( "object_db_commit_latency", - "Time spent commiting a object to the db", + "Time spent committing a object to the db", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), object_mutation_db_commit_latency: register_histogram_with_registry!( "object_mutation_db_commit_latency", - "Time spent commiting a object mutation to the db", + "Time spent committing a object mutation to the db", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), object_deletion_db_commit_latency: register_histogram_with_registry!( "object_deletion_db_commit_latency", - "Time spent commiting a object deletion to the db", + "Time spent committing a object deletion to the db", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), epoch_db_commit_latency: register_histogram_with_registry!( "epoch_db_commit_latency", - "Time spent commiting a epoch to the db", + "Time spent committing a epoch to the db", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) diff --git a/crates/iota-indexer/src/store/pg_indexer_store.rs b/crates/iota-indexer/src/store/pg_indexer_store.rs index 246507639cc..9a4c29e4ecf 100644 --- a/crates/iota-indexer/src/store/pg_indexer_store.rs +++ b/crates/iota-indexer/src/store/pg_indexer_store.rs @@ -175,7 +175,7 @@ impl PgIndexerStore { object_id: ObjectID, version: Option, ) -> Result { - // TOOD: read remote object_history kv store + // TODO: read remote object_history kv store read_only_blocking!(&self.blocking_cp, |conn| { let query = objects::dsl::objects.filter(objects::dsl::object_id.eq(object_id.to_vec())); diff --git a/crates/iota-move/src/manage_package.rs b/crates/iota-move/src/manage_package.rs index c80eea9aaa1..6949729be69 100644 --- a/crates/iota-move/src/manage_package.rs +++ b/crates/iota-move/src/manage_package.rs @@ -14,7 +14,7 @@ use move_package::BuildConfig; #[group(id = "iota-move-manage-package")] pub struct ManagePackage { #[clap(long)] - /// The network chain identifer. Use '35834a8a' for mainnet. + /// The network chain identifier. Use '35834a8a' for mainnet. pub network: String, #[clap(long = "original-id", value_parser = ObjectID::from_hex_literal)] /// The original address (Object ID) where this package is published. diff --git a/crates/iota-proxy/src/prom_to_mimir.rs b/crates/iota-proxy/src/prom_to_mimir.rs index fd8db7ceb11..49db3ab1c8d 100644 --- a/crates/iota-proxy/src/prom_to_mimir.rs +++ b/crates/iota-proxy/src/prom_to_mimir.rs @@ -92,7 +92,7 @@ impl From> for Mimir> { Vec::with_capacity(metric_families.len()); for mf in metric_families { - // TOOD add From impl + // TODO add From impl let mt = match mf.get_field_type() { MetricType::COUNTER => remote_write::metric_metadata::MetricType::Counter, MetricType::GAUGE => remote_write::metric_metadata::MetricType::Gauge, diff --git a/crates/iota-replay/src/replay.rs b/crates/iota-replay/src/replay.rs index e6a4e88f970..638c2cb5e0b 100644 --- a/crates/iota-replay/src/replay.rs +++ b/crates/iota-replay/src/replay.rs @@ -79,7 +79,7 @@ use crate::{ pub struct ExecutionSandboxState { /// Information describing the transaction pub transaction_info: OnChainTransactionInfo, - /// All the obejcts that are required for the execution of the transaction + /// All the objects that are required for the execution of the transaction pub required_objects: Vec, /// Temporary store from executing this locally in /// `execute_transaction_to_effects` diff --git a/crates/iota/genesis.md b/crates/iota/genesis.md index 26251a4af13..b23ea7b3630 100644 --- a/crates/iota/genesis.md +++ b/crates/iota/genesis.md @@ -2,22 +2,23 @@ This document lays out the step-by-step process for orchestrating a Iota Genesis Ceremony. -## Prerequisites +## Prerequisites Each validator participating in the ceremony will need the following: -- Ed25519 Public key -- Iota network address // WAN -- Narwhal_primary_to_primary network address // WAN -- Narwhal_worker_to_primary network address // LAN -- Narwhal_primary_to_worker network address // LAN -- Narwhal_worker_to_worker network address // WAN -- Narwhal_consensus_address network address // LAN +- Ed25519 Public key +- Iota network address // WAN +- Narwhal_primary_to_primary network address // WAN +- Narwhal_worker_to_primary network address // LAN +- Narwhal_primary_to_worker network address // LAN +- Narwhal_worker_to_worker network address // WAN +- Narwhal_consensus_address network address // LAN Note: -- Network addresses should be Multiaddrs in the form of `/dns/{dns name}/tcp/{port}/http` and -only the addresses marked WAN need to be publicly accessible by the wider internet. -- An Ed25519 key can be created using `iota keytool generate` + +- Network addresses should be Multiaddrs in the form of `/dns/{dns name}/tcp/{port}/http` and + only the addresses marked WAN need to be publicly accessible by the wider internet. +- An Ed25519 key can be created using `iota keytool generate` ## Ceremony @@ -31,7 +32,7 @@ The MC (Master of Ceremony) will create a new git repository and initialize the ``` $ git init genesis && cd genesis -$ iota genesis-ceremony +$ iota genesis-ceremony $ git add . $ git commit -m "init genesis" $ git push @@ -66,7 +67,7 @@ Add configuration for any initial gas objects that should be created at genesis. $ iota genesis-ceremony add-gas-object \ --address \ --object-id \ - --valud <# of iota coins> + --value <# of iota coins> $ git add . $ git commit -m "add gas object" $ git push diff --git a/crates/iota/src/client_ptb/builder.rs b/crates/iota/src/client_ptb/builder.rs index 6a2a5eacbb8..d56802a41ca 100644 --- a/crates/iota/src/client_ptb/builder.rs +++ b/crates/iota/src/client_ptb/builder.rs @@ -757,7 +757,7 @@ impl<'a> PTBBuilder<'a> { } } } - // Unable to resolve an identifer to anything at this point -- error and see if we can + // Unable to resolve an identifier to anything at this point -- error and see if we can // find a similar identifier to suggest. PTBArg::Identifier(i) => match self.did_you_mean_identifier(&i) { Some(similars) => { diff --git a/crates/iota/src/key_identity.rs b/crates/iota/src/key_identity.rs index 3b7d7ba2863..daeed5e5457 100644 --- a/crates/iota/src/key_identity.rs +++ b/crates/iota/src/key_identity.rs @@ -41,7 +41,7 @@ impl Display for KeyIdentity { } /// Get the IotaAddress corresponding to this key identity. -/// If no string is provided, then the curernt active address is returned. +/// If no string is provided, then the current active address is returned. pub fn get_identity_address( input: Option, ctx: &mut WalletContext, diff --git a/crates/iotaop-cli/src/cli/pulumi/setup.rs b/crates/iotaop-cli/src/cli/pulumi/setup.rs index 1281df5d1b2..dfbbbc84329 100644 --- a/crates/iotaop-cli/src/cli/pulumi/setup.rs +++ b/crates/iotaop-cli/src/cli/pulumi/setup.rs @@ -103,7 +103,7 @@ fn ensure_gcloud_logged_in() -> Result<()> { "Please select your @mystenlabs.com profile: {}", "gcloud config set account `ACCOUNT`".bright_yellow() ); - return Err(anyhow!("Incorret account selected.")); + return Err(anyhow!("Incorrect account selected.")); } } } diff --git a/crates/typed-store/src/rocks/mod.rs b/crates/typed-store/src/rocks/mod.rs index e01cfc14e30..9435583c997 100644 --- a/crates/typed-store/src/rocks/mod.rs +++ b/crates/typed-store/src/rocks/mod.rs @@ -1422,7 +1422,7 @@ impl DBBatch { /// with ignore_range_deletions set to true, the old value are visible until /// compaction actually deletes them which will happen sometime after. By /// default ignore_range_deletions is set to true on a DBMap (unless it is - /// overriden in the config), so please use this function with caution + /// overridden in the config), so please use this function with caution pub fn schedule_delete_range( &mut self, db: &DBMap, @@ -1954,7 +1954,7 @@ where /// with ignore_range_deletions set to true, the old value are visible until /// compaction actually deletes them which will happen sometime after. By /// default ignore_range_deletions is set to true on a DBMap (unless it is - /// overriden in the config), so please use this function with caution + /// overridden in the config), so please use this function with caution #[instrument(level = "trace", skip_all, err)] fn schedule_delete_all(&self) -> Result<(), TypedStoreError> { let mut iter = self.unbounded_iter().seek_to_first(); diff --git a/iota-execution/latest/iota-verifier/src/lib.rs b/iota-execution/latest/iota-verifier/src/lib.rs index 35fc98bfafc..e52af6f9a07 100644 --- a/iota-execution/latest/iota-verifier/src/lib.rs +++ b/iota-execution/latest/iota-verifier/src/lib.rs @@ -80,7 +80,7 @@ pub fn default_verifier_config( max_basic_blocks_in_script: None, max_per_fun_meter_units, max_per_mod_meter_units, - max_idenfitier_len: protocol_config.max_move_identifier_len_as_option(), /* Before protocol version 9, there was no limit */ + max_identifier_len: protocol_config.max_move_identifier_len_as_option(), /* Before protocol version 9, there was no limit */ allow_receiving_object_id: protocol_config.allow_receiving_object_id(), reject_mutable_random_on_entry_functions: protocol_config .reject_mutable_random_on_entry_functions(), diff --git a/iota-execution/v0/iota-verifier/src/lib.rs b/iota-execution/v0/iota-verifier/src/lib.rs index 35fc98bfafc..877e3b62e21 100644 --- a/iota-execution/v0/iota-verifier/src/lib.rs +++ b/iota-execution/v0/iota-verifier/src/lib.rs @@ -80,7 +80,7 @@ pub fn default_verifier_config( max_basic_blocks_in_script: None, max_per_fun_meter_units, max_per_mod_meter_units, - max_idenfitier_len: protocol_config.max_move_identifier_len_as_option(), /* Before protocol version 9, there was no limit */ + max_identifier_len: protocol_config.max_move_identifier_len_as_option(), allow_receiving_object_id: protocol_config.allow_receiving_object_id(), reject_mutable_random_on_entry_functions: protocol_config .reject_mutable_random_on_entry_functions(), diff --git a/narwhal/config/src/committee.rs b/narwhal/config/src/committee.rs index dd251e7358b..c07a9e86167 100644 --- a/narwhal/config/src/committee.rs +++ b/narwhal/config/src/committee.rs @@ -48,7 +48,7 @@ impl Authority { /// CommitteeBuilder). As some internal properties of Authority are /// initialised via the Committee, to ensure that the user will not /// accidentally use stale Authority data, should always derive them via - /// the Commitee. + /// the Committee. fn new( protocol_key: PublicKey, stake: Stake,