diff --git a/.typos.toml b/.typos.toml index 8758b29205e..051931c53e7 100644 --- a/.typos.toml +++ b/.typos.toml @@ -1,7 +1,12 @@ [default] -extend-ignore-re = [ - "[a-zA-Z0-9]{44}", -] +extend-ignore-re = ["[a-zA-Z0-9]{44}", "[a-zA-Z0-9]{9}"] [default.extend-words] groth = "groth" +BA = "BA" +DAA = "DAA" +UE = "UE" +tto = "tto" +ser = "ser" +pn = "pn" +strat = "strat" diff --git a/apps/core/src/hooks/useGetValidatorsEvents.ts b/apps/core/src/hooks/useGetValidatorsEvents.ts index f8770039523..f4723d90f15 100644 --- a/apps/core/src/hooks/useGetValidatorsEvents.ts +++ b/apps/core/src/hooks/useGetValidatorsEvents.ts @@ -10,7 +10,7 @@ type GetValidatorsEvent = { order: 'ascending' | 'descending'; }; -// NOTE: This copys the query limit from our Rust JSON RPC backend, this needs to be kept in sync! +// NOTE: This copies the query limit from our Rust JSON RPC backend, this needs to be kept in sync! const QUERY_MAX_RESULT_LIMIT = 50; const VALIDATORS_EVENTS_QUERY = '0x3::validator_set::ValidatorEpochInfoEventV2'; diff --git a/consensus/core/src/block_verifier.rs b/consensus/core/src/block_verifier.rs index ac94d261839..3447814092c 100644 --- a/consensus/core/src/block_verifier.rs +++ b/consensus/core/src/block_verifier.rs @@ -82,7 +82,7 @@ impl BlockVerifier for SignedBlockVerifier { }); } - // Verifiy the block's signature. + // Verify the block's signature. block.verify_signature(&self.context)?; // Verify the block's ancestor refs are consistent with the block's round, diff --git a/consensus/core/src/tests/base_committer_tests.rs b/consensus/core/src/tests/base_committer_tests.rs index 08b8e1d698d..0ed89bf7954 100644 --- a/consensus/core/src/tests/base_committer_tests.rs +++ b/consensus/core/src/tests/base_committer_tests.rs @@ -21,7 +21,7 @@ use crate::{ #[test] fn try_direct_commit() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -77,7 +77,7 @@ fn try_direct_commit() { #[test] fn idempotence() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -121,7 +121,7 @@ fn idempotence() { #[test] fn multiple_direct_commit() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -162,7 +162,7 @@ fn multiple_direct_commit() { #[test] fn direct_skip() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -214,7 +214,7 @@ fn direct_skip() { #[test] fn indirect_commit() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -357,7 +357,7 @@ fn indirect_commit() { #[test] fn indirect_skip() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -411,7 +411,7 @@ fn indirect_skip() { dag_state.clone(), )); - // Add enough blocks to reach the decison round of wave 3. + // Add enough blocks to reach the decision round of wave 3. let decision_round_wave_3 = committer.decision_round(3); build_dag( context.clone(), @@ -487,7 +487,7 @@ fn indirect_skip() { #[test] fn undecided() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -576,7 +576,7 @@ fn undecided() { #[test] fn test_byzantine_direct_commit() { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), @@ -659,19 +659,19 @@ fn test_byzantine_direct_commit() { // non-votes C13 but there are enough good votes to prevent a skip. // Additionally only one of the non-votes per authority should be counted so // we should not skip leader A12. - let decison_block_a14 = VerifiedBlock::new_for_test( + let decision_block_a14 = VerifiedBlock::new_for_test( TestBlock::new(14, 0) .set_ancestors(good_references_voting_round_wave_4.clone()) .build(), ); - dag_state.write().accept_block(decison_block_a14.clone()); + dag_state.write().accept_block(decision_block_a14.clone()); let good_references_voting_round_wave_4_without_c13 = good_references_voting_round_wave_4 .into_iter() .filter(|r| r.author != AuthorityIndex::new_for_test(2)) .collect::>(); - let decison_block_b14 = VerifiedBlock::new_for_test( + let decision_block_b14 = VerifiedBlock::new_for_test( TestBlock::new(14, 1) .set_ancestors( good_references_voting_round_wave_4_without_c13 @@ -682,9 +682,9 @@ fn test_byzantine_direct_commit() { ) .build(), ); - dag_state.write().accept_block(decison_block_b14.clone()); + dag_state.write().accept_block(decision_block_b14.clone()); - let decison_block_c14 = VerifiedBlock::new_for_test( + let decision_block_c14 = VerifiedBlock::new_for_test( TestBlock::new(14, 2) .set_ancestors( good_references_voting_round_wave_4_without_c13 @@ -695,9 +695,9 @@ fn test_byzantine_direct_commit() { ) .build(), ); - dag_state.write().accept_block(decison_block_c14.clone()); + dag_state.write().accept_block(decision_block_c14.clone()); - let decison_block_d14 = VerifiedBlock::new_for_test( + let decision_block_d14 = VerifiedBlock::new_for_test( TestBlock::new(14, 3) .set_ancestors( good_references_voting_round_wave_4_without_c13 @@ -708,7 +708,7 @@ fn test_byzantine_direct_commit() { ) .build(), ); - dag_state.write().accept_block(decison_block_d14.clone()); + dag_state.write().accept_block(decision_block_d14.clone()); // DagState Update: // - We have A13, B13, D13 & C13 as good votes in the voting round of wave 4 diff --git a/consensus/core/src/tests/pipelined_committer_tests.rs b/consensus/core/src/tests/pipelined_committer_tests.rs index 958e32b6cbb..f7bac7b021e 100644 --- a/consensus/core/src/tests/pipelined_committer_tests.rs +++ b/consensus/core/src/tests/pipelined_committer_tests.rs @@ -655,20 +655,20 @@ fn test_byzantine_validator() { // Additionally only one of the non-votes per authority should be counted so // we should not skip leader A12. let mut references_round_14 = vec![]; - let decison_block_a14 = VerifiedBlock::new_for_test( + let decision_block_a14 = VerifiedBlock::new_for_test( TestBlock::new(14, 0) .set_ancestors(good_references_voting_round_wave_4.clone()) .build(), ); - references_round_14.push(decison_block_a14.reference()); - dag_state.write().accept_block(decison_block_a14.clone()); + references_round_14.push(decision_block_a14.reference()); + dag_state.write().accept_block(decision_block_a14.clone()); let good_references_voting_round_wave_4_without_b13 = good_references_voting_round_wave_4 .into_iter() .filter(|r| r.author != AuthorityIndex::new_for_test(1)) .collect::>(); - let decison_block_b14 = VerifiedBlock::new_for_test( + let decision_block_b14 = VerifiedBlock::new_for_test( TestBlock::new(14, 1) .set_ancestors( good_references_voting_round_wave_4_without_b13 @@ -679,10 +679,10 @@ fn test_byzantine_validator() { ) .build(), ); - references_round_14.push(decison_block_b14.reference()); - dag_state.write().accept_block(decison_block_b14.clone()); + references_round_14.push(decision_block_b14.reference()); + dag_state.write().accept_block(decision_block_b14.clone()); - let decison_block_c14 = VerifiedBlock::new_for_test( + let decision_block_c14 = VerifiedBlock::new_for_test( TestBlock::new(14, 2) .set_ancestors( good_references_voting_round_wave_4_without_b13 @@ -693,10 +693,10 @@ fn test_byzantine_validator() { ) .build(), ); - references_round_14.push(decison_block_c14.reference()); - dag_state.write().accept_block(decison_block_c14.clone()); + references_round_14.push(decision_block_c14.reference()); + dag_state.write().accept_block(decision_block_c14.clone()); - let decison_block_d14 = VerifiedBlock::new_for_test( + let decision_block_d14 = VerifiedBlock::new_for_test( TestBlock::new(14, 3) .set_ancestors( good_references_voting_round_wave_4_without_b13 @@ -707,8 +707,8 @@ fn test_byzantine_validator() { ) .build(), ); - references_round_14.push(decison_block_d14.reference()); - dag_state.write().accept_block(decison_block_d14.clone()); + references_round_14.push(decision_block_d14.reference()); + dag_state.write().accept_block(decision_block_d14.clone()); // DagState Update: // - We have A13, B13, D13 & C13 as good votes in the voting round of leader A12 @@ -777,7 +777,7 @@ fn basic_test_setup() -> ( super::UniversalCommitter, ) { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), diff --git a/consensus/core/src/tests/universal_committer_tests.rs b/consensus/core/src/tests/universal_committer_tests.rs index eee399a523c..491d064ae10 100644 --- a/consensus/core/src/tests/universal_committer_tests.rs +++ b/consensus/core/src/tests/universal_committer_tests.rs @@ -664,19 +664,19 @@ fn test_byzantine_direct_commit() { // non-votes C13 but there are enough good votes to prevent a skip. // Additionally only one of the non-votes per authority should be counted so // we should not skip leader A12. - let decison_block_a14 = VerifiedBlock::new_for_test( + let decision_block_a14 = VerifiedBlock::new_for_test( TestBlock::new(14, 0) .set_ancestors(good_references_voting_round_wave_4.clone()) .build(), ); - dag_state.write().accept_block(decison_block_a14.clone()); + dag_state.write().accept_block(decision_block_a14.clone()); let good_references_voting_round_wave_4_without_c13 = good_references_voting_round_wave_4 .into_iter() .filter(|r| r.author != AuthorityIndex::new_for_test(2)) .collect::>(); - let decison_block_b14 = VerifiedBlock::new_for_test( + let decision_block_b14 = VerifiedBlock::new_for_test( TestBlock::new(14, 1) .set_ancestors( good_references_voting_round_wave_4_without_c13 @@ -687,9 +687,9 @@ fn test_byzantine_direct_commit() { ) .build(), ); - dag_state.write().accept_block(decison_block_b14.clone()); + dag_state.write().accept_block(decision_block_b14.clone()); - let decison_block_c14 = VerifiedBlock::new_for_test( + let decision_block_c14 = VerifiedBlock::new_for_test( TestBlock::new(14, 2) .set_ancestors( good_references_voting_round_wave_4_without_c13 @@ -700,9 +700,9 @@ fn test_byzantine_direct_commit() { ) .build(), ); - dag_state.write().accept_block(decison_block_c14.clone()); + dag_state.write().accept_block(decision_block_c14.clone()); - let decison_block_d14 = VerifiedBlock::new_for_test( + let decision_block_d14 = VerifiedBlock::new_for_test( TestBlock::new(14, 3) .set_ancestors( good_references_voting_round_wave_4_without_c13 @@ -713,7 +713,7 @@ fn test_byzantine_direct_commit() { ) .build(), ); - dag_state.write().accept_block(decison_block_d14.clone()); + dag_state.write().accept_block(decision_block_d14.clone()); // DagState Update: // - We have A13, B13, D13 & C13 as good votes in the voting round of wave 4 @@ -747,7 +747,7 @@ fn basic_test_setup() -> ( super::UniversalCommitter, ) { telemetry_subscribers::init_for_testing(); - // Commitee of 4 with even stake + // Committee of 4 with even stake let context = Arc::new(Context::new_for_test(4).0); let dag_state = Arc::new(RwLock::new(DagState::new( context.clone(), diff --git a/crates/data-transform/src/main.rs b/crates/data-transform/src/main.rs index e6bc766ad89..1ace90758ff 100644 --- a/crates/data-transform/src/main.rs +++ b/crates/data-transform/src/main.rs @@ -275,7 +275,7 @@ fn main() { println!("Unable to find event_json {}", target_id); } Err(_) => { - println!("An error occured while fetching event_json {}", target_id); + println!("An error occurred while fetching event_json {}", target_id); } } @@ -326,7 +326,7 @@ fn main() { exit(0); } Err(_) => { - println!("An error occured while fetching event {}", target_id); + println!("An error occurred while fetching event {}", target_id); exit(0); } } diff --git a/crates/iota-adapter-transactional-tests/tests/size_limits/identitifer_len_limits.move b/crates/iota-adapter-transactional-tests/tests/size_limits/identitifer_len_limits.move index 490f914f0b6..71139d83ad3 100644 --- a/crates/iota-adapter-transactional-tests/tests/size_limits/identitifer_len_limits.move +++ b/crates/iota-adapter-transactional-tests/tests/size_limits/identitifer_len_limits.move @@ -2,13 +2,13 @@ // Modifications Copyright (c) 2024 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 -// Test Identifer length limits +// Test identifier length limits //# init --addresses Test=0x0 //# publish -/// Test Identifer length limits enforced for module name +/// Test identifier length limits enforced for module name module Test::M1_1234567891234567890123456789012345678912345678901234567890123456789123456789012345678908901234567891234567890123456789078912345678901234567890 { public entry fun create_n_idscreate_n_idscreate_n_() { @@ -17,7 +17,7 @@ module Test::M1_1234567891234567890123456789012345678912345678901234567890123456 //# publish -/// Test Identifer length limits enforced for function name +/// Test identifier length limits enforced for function name module Test::M1_12345678912345678901234567890 { public entry fun create_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_idscreate_n_ids() { } @@ -26,7 +26,7 @@ module Test::M1_12345678912345678901234567890 { //# publish -/// Test normal Identifer lengths +/// Test normal identifier lengths module Test::M1_1234567891234567890123456789012345678912345678901234567 { public entry fun create_n_(n: u64, ctx: &mut TxContext) { diff --git a/crates/iota-bridge/src/action_executor.rs b/crates/iota-bridge/src/action_executor.rs index b6576e9bda3..3b4638fb11e 100644 --- a/crates/iota-bridge/src/action_executor.rs +++ b/crates/iota-bridge/src/action_executor.rs @@ -812,7 +812,7 @@ mod tests { let action_digest = action.digest(); // Wait for 1 second. It should still in the process of retrying requesting sigs - // becaues we mock errors above. + // because we mock errors above. tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; tx_subscription.try_recv().unwrap_err(); // And the action is still in WAL diff --git a/crates/iota-bridge/src/iota_client.rs b/crates/iota-bridge/src/iota_client.rs index dd06ba4dc79..c1ec1f83510 100644 --- a/crates/iota-bridge/src/iota_client.rs +++ b/crates/iota-bridge/src/iota_client.rs @@ -350,7 +350,7 @@ impl IotaClientInner for IotaSdkClient { } }; - // get_dynamic_field_object does not return bcs, so we have to issue anothe + // get_dynamic_field_object does not return bcs, so we have to issue another // query let bcs_bytes = self .read_api() diff --git a/crates/iota-bridge/src/server/governance_verifier.rs b/crates/iota-bridge/src/server/governance_verifier.rs index bb2443aeb64..735481286d9 100644 --- a/crates/iota-bridge/src/server/governance_verifier.rs +++ b/crates/iota-bridge/src/server/governance_verifier.rs @@ -17,7 +17,7 @@ pub struct GovernanceVerifier { impl GovernanceVerifier { pub fn new(approved_actions: Vec) -> BridgeResult { - // TOOD(audit-blocking): verify chain ids + // TODO(audit-blocking): verify chain ids let mut approved_goverance_actions = HashMap::new(); for action in approved_actions { if !action.is_governace_action() { diff --git a/crates/iota-bridge/src/test_utils.rs b/crates/iota-bridge/src/test_utils.rs index 11d96f83b1f..940f122c55a 100644 --- a/crates/iota-bridge/src/test_utils.rs +++ b/crates/iota-bridge/src/test_utils.rs @@ -188,7 +188,7 @@ pub fn mock_get_logs( } /// Returns a test Log and corresponding BridgeAction -// Refernece: https://github.com/rust-ethereum/ethabi/blob/master/ethabi/src/event.rs#L192 +// Reference: https://github.com/rust-ethereum/ethabi/blob/master/ethabi/src/event.rs#L192 pub fn get_test_log_and_action( contract_address: EthAddress, tx_hash: TxHash, @@ -199,7 +199,7 @@ pub fn get_test_log_and_action( let source_address = EthAddress::random(); let iota_address: IotaAddress = IotaAddress::random_for_testing_only(); let target_address = Hex::decode(&iota_address.to_string()).unwrap(); - // Note: must use `encode` rather than `encode_packged` + // Note: must use `encode` rather than `encode_packaged` let encoded = ethers::abi::encode(&[ // u8 is encoded as u256 in abi standard ethers::abi::Token::Uint(ethers::types::U256::from(token_code)), diff --git a/crates/iota-config/src/node.rs b/crates/iota-config/src/node.rs index b3ecef25370..1c6de892c63 100644 --- a/crates/iota-config/src/node.rs +++ b/crates/iota-config/src/node.rs @@ -82,7 +82,7 @@ pub struct NodeConfig { #[serde(default = "default_enable_index_processing")] pub enable_index_processing: bool, - // only alow websocket connections for jsonrpc traffic + // only allow websocket connections for jsonrpc traffic #[serde(default)] pub websocket_only: bool, diff --git a/crates/iota-core/src/transaction_input_loader.rs b/crates/iota-core/src/transaction_input_loader.rs index 6eb60e3d3e8..40448da4f28 100644 --- a/crates/iota-core/src/transaction_input_loader.rs +++ b/crates/iota-core/src/transaction_input_loader.rs @@ -45,7 +45,7 @@ impl TransactionInputLoader { receiving_objects: &[ObjectRef], epoch_id: EpochId, ) -> IotaResult<(InputObjects, ReceivingObjects)> { - // Length of input_object_kinds have beeen checked via validity_check() for + // Length of input_object_kinds have been checked via validity_check() for // ProgrammableTransaction. let mut input_results = vec![None; input_object_kinds.len()]; let mut object_refs = Vec::with_capacity(input_object_kinds.len()); @@ -293,7 +293,7 @@ impl TransactionInputLoader { _protocol_config: &ProtocolConfig, ) -> IotaResult<(InputObjects, ReceivingObjects)> { let mut results = Vec::with_capacity(input_object_kinds.len()); - // Length of input_object_kinds have beeen checked via validity_check() for + // Length of input_object_kinds have been checked via validity_check() for // ProgrammableTransaction. for kind in input_object_kinds { let obj = match kind { diff --git a/crates/iota-framework/docs/deepbook/clob.md b/crates/iota-framework/docs/deepbook/clob.md index f0dc3afb83b..2265cfc8f30 100644 --- a/crates/iota-framework/docs/deepbook/clob.md +++ b/crates/iota-framework/docs/deepbook/clob.md @@ -1641,11 +1641,11 @@ Place a market order to the order book. // We first retrieve the PriceLevel with the lowest price by calling min_leaf on the asks Critbit Tree. // We then match the market order by iterating through open orders on that price level in ascending order of the order id. // Open orders that are being filled are removed from the order book. - // We stop the iteration untill all quantities are filled. + // We stop the iteration until all quantities are filled. // If the total quantity of open orders at the lowest price level is not large enough to fully fill the market order, // we move on to the next price level by calling next_leaf on the asks Critbit Tree and repeat the same procedure. // Continue iterating over the price levels in ascending order until the market order is completely filled. - // If ther market order cannot be completely filled even after consuming all the open ask orders, + // If their market order cannot be completely filled even after consuming all the open ask orders, // the unfilled quantity will be cancelled. // Market ask order follows similar procedure. // The difference is that market ask order is matched against the open bid orders. diff --git a/crates/iota-framework/docs/deepbook/clob_v2.md b/crates/iota-framework/docs/deepbook/clob_v2.md index c690d12ea97..d5337549d4d 100644 --- a/crates/iota-framework/docs/deepbook/clob_v2.md +++ b/crates/iota-framework/docs/deepbook/clob_v2.md @@ -2782,11 +2782,11 @@ Place a market order to the order book. // We first retrieve the PriceLevel with the lowest price by calling min_leaf on the asks Critbit Tree. // We then match the market order by iterating through open orders on that price level in ascending order of the order id. // Open orders that are being filled are removed from the order book. - // We stop the iteration untill all quantities are filled. + // We stop the iteration until all quantities are filled. // If the total quantity of open orders at the lowest price level is not large enough to fully fill the market order, // we move on to the next price level by calling next_leaf on the asks Critbit Tree and repeat the same procedure. // Continue iterating over the price levels in ascending order until the market order is completely filled. - // If ther market order cannot be completely filled even after consuming all the open ask orders, + // If their market order cannot be completely filled even after consuming all the open ask orders, // the unfilled quantity will be cancelled. // Market ask order follows similar procedure. // The difference is that market ask order is matched against the open bid orders. @@ -3822,7 +3822,7 @@ The latter is the corresponding depth list let (price_low_, _) = critbit::min_leaf(&pool.bids); let (price_high_, _) = critbit::max_leaf(&pool.bids); - // If price_low is greater than the higest element in the tree, we return empty + // If price_low is greater than the highest element in the tree, we return empty if (price_low > price_high_) { return (price_vec, depth_vec) }; diff --git a/crates/iota-framework/docs/iota-framework/transfer_policy.md b/crates/iota-framework/docs/iota-framework/transfer_policy.md index 9a29b18e940..8a6277ba248 100644 --- a/crates/iota-framework/docs/iota-framework/transfer_policy.md +++ b/crates/iota-framework/docs/iota-framework/transfer_policy.md @@ -16,7 +16,7 @@ hot potato or transaction will fail. - Type owner (creator) can set any Rules as long as the ecosystem supports them. All of the Rules need to be resolved within a single transaction (eg pay royalty and pay fixed commission). Once required actions are performed, -the TransferRequest can be "confimed" via confirm_request call. +the TransferRequest can be "confirmed" via confirm_request call. - TransferPolicy aims to be the main interface for creators to control trades of their types and collect profits if a fee is required on sales. Custom @@ -428,7 +428,7 @@ available for use, the type can not be traded in kiosks. ## Function `default` -Initialize the Tranfer Policy in the default scenario: Create and share +Initialize the Transfer Policy in the default scenario: Create and share the TransferPolicy, transfer TransferPolicyCap to the transaction sender. diff --git a/crates/iota-framework/docs/stardust/nft.md b/crates/iota-framework/docs/stardust/nft.md index 97779e5e400..39f10200fa8 100644 --- a/crates/iota-framework/docs/stardust/nft.md +++ b/crates/iota-framework/docs/stardust/nft.md @@ -143,7 +143,7 @@ The Nft module initializer. string::utf8(b"description"), string::utf8(b"creator"), - // The extra IRC27-nested fileds. + // The extra IRC27-nested fields. string::utf8(b"version"), string::utf8(b"media_type"), string::utf8(b"collection_name"), @@ -159,7 +159,7 @@ The Nft module initializer. string::utf8(b"{immutable_metadata.description}"), string::utf8(b"{immutable_metadata.issuer_name}"), - // The extra IRC27-nested fileds. + // The extra IRC27-nested fields. string::utf8(b"{immutable_metadata.version}"), string::utf8(b"{immutable_metadata.media_type}"), string::utf8(b"{immutable_metadata.collection_name}"), diff --git a/crates/iota-framework/packages/deepbook/sources/clob.move b/crates/iota-framework/packages/deepbook/sources/clob.move index 3cba68d3b63..0c4fb46c59d 100644 --- a/crates/iota-framework/packages/deepbook/sources/clob.move +++ b/crates/iota-framework/packages/deepbook/sources/clob.move @@ -133,7 +133,7 @@ module deepbook::clob { // Orders that are submitted earlier has lower order ids. // 64 bits are sufficient for order ids whereas 32 bits are not. // Assuming a maximum TPS of 100K/s of Iota chain, it would take (1<<63) / 100000 / 3600 / 24 / 365 = 2924712 years to reach the full capacity. - // The highest bit of the order id is used to denote the order tyep, 0 for bid, 1 for ask. + // The highest bit of the order id is used to denote the order type, 0 for bid, 1 for ask. order_id: u64, // Only used for limit orders. price: u64, @@ -768,11 +768,11 @@ module deepbook::clob { // We first retrieve the PriceLevel with the lowest price by calling min_leaf on the asks Critbit Tree. // We then match the market order by iterating through open orders on that price level in ascending order of the order id. // Open orders that are being filled are removed from the order book. - // We stop the iteration untill all quantities are filled. + // We stop the iteration until all quantities are filled. // If the total quantity of open orders at the lowest price level is not large enough to fully fill the market order, // we move on to the next price level by calling next_leaf on the asks Critbit Tree and repeat the same procedure. // Continue iterating over the price levels in ascending order until the market order is completely filled. - // If ther market order cannot be completely filled even after consuming all the open ask orders, + // If their market order cannot be completely filled even after consuming all the open ask orders, // the unfilled quantity will be cancelled. // Market ask order follows similar procedure. // The difference is that market ask order is matched against the open bid orders. diff --git a/crates/iota-framework/packages/deepbook/sources/clob_v2.move b/crates/iota-framework/packages/deepbook/sources/clob_v2.move index 68a9dd5a17d..e95e159f000 100644 --- a/crates/iota-framework/packages/deepbook/sources/clob_v2.move +++ b/crates/iota-framework/packages/deepbook/sources/clob_v2.move @@ -1289,11 +1289,11 @@ module deepbook::clob_v2 { // We first retrieve the PriceLevel with the lowest price by calling min_leaf on the asks Critbit Tree. // We then match the market order by iterating through open orders on that price level in ascending order of the order id. // Open orders that are being filled are removed from the order book. - // We stop the iteration untill all quantities are filled. + // We stop the iteration until all quantities are filled. // If the total quantity of open orders at the lowest price level is not large enough to fully fill the market order, // we move on to the next price level by calling next_leaf on the asks Critbit Tree and repeat the same procedure. // Continue iterating over the price levels in ascending order until the market order is completely filled. - // If ther market order cannot be completely filled even after consuming all the open ask orders, + // If their market order cannot be completely filled even after consuming all the open ask orders, // the unfilled quantity will be cancelled. // Market ask order follows similar procedure. // The difference is that market ask order is matched against the open bid orders. @@ -2010,7 +2010,7 @@ module deepbook::clob_v2 { let (price_low_, _) = critbit::min_leaf(&pool.bids); let (price_high_, _) = critbit::max_leaf(&pool.bids); - // If price_low is greater than the higest element in the tree, we return empty + // If price_low is greater than the highest element in the tree, we return empty if (price_low > price_high_) { return (price_vec, depth_vec) }; diff --git a/crates/iota-framework/packages/iota-framework/sources/crypto/group_ops.move b/crates/iota-framework/packages/iota-framework/sources/crypto/group_ops.move index 8bb462cc5dc..048fdac5aae 100644 --- a/crates/iota-framework/packages/iota-framework/sources/crypto/group_ops.move +++ b/crates/iota-framework/packages/iota-framework/sources/crypto/group_ops.move @@ -19,7 +19,7 @@ module iota::group_ops { ///////////////////////////////////////////////////// ////// Generic functions for group operations. ////// - // The caller provides a type identifer that should match the types of enum [Groups] in group_ops.rs. + // The caller provides a type identifier that should match the types of enum [Groups] in group_ops.rs. // General wrapper for all group elements. public struct Element has store, copy, drop { diff --git a/crates/iota-framework/packages/iota-framework/sources/kiosk/transfer_policy.move b/crates/iota-framework/packages/iota-framework/sources/kiosk/transfer_policy.move index 36a5a875230..5abe500e2c6 100644 --- a/crates/iota-framework/packages/iota-framework/sources/kiosk/transfer_policy.move +++ b/crates/iota-framework/packages/iota-framework/sources/kiosk/transfer_policy.move @@ -16,7 +16,7 @@ /// - Type owner (creator) can set any Rules as long as the ecosystem supports /// them. All of the Rules need to be resolved within a single transaction (eg /// pay royalty and pay fixed commission). Once required actions are performed, -/// the `TransferRequest` can be "confimed" via `confirm_request` call. +/// the `TransferRequest` can be "confirmed" via `confirm_request` call. /// /// - `TransferPolicy` aims to be the main interface for creators to control trades /// of their types and collect profits if a fee is required on sales. Custom @@ -130,7 +130,7 @@ module iota::transfer_policy { } #[allow(lint(self_transfer, share_owned))] - /// Initialize the Tranfer Policy in the default scenario: Create and share + /// Initialize the Transfer Policy in the default scenario: Create and share /// the `TransferPolicy`, transfer `TransferPolicyCap` to the transaction /// sender. entry fun default(pub: &Publisher, ctx: &mut TxContext) { diff --git a/crates/iota-framework/packages/stardust/sources/nft/nft.move b/crates/iota-framework/packages/stardust/sources/nft/nft.move index 9a753319a17..5ffbd7677d1 100644 --- a/crates/iota-framework/packages/stardust/sources/nft/nft.move +++ b/crates/iota-framework/packages/stardust/sources/nft/nft.move @@ -45,7 +45,7 @@ module stardust::nft { string::utf8(b"description"), string::utf8(b"creator"), - // The extra IRC27-nested fileds. + // The extra IRC27-nested fields. string::utf8(b"version"), string::utf8(b"media_type"), string::utf8(b"collection_name"), @@ -61,7 +61,7 @@ module stardust::nft { string::utf8(b"{immutable_metadata.description}"), string::utf8(b"{immutable_metadata.issuer_name}"), - // The extra IRC27-nested fileds. + // The extra IRC27-nested fields. string::utf8(b"{immutable_metadata.version}"), string::utf8(b"{immutable_metadata.media_type}"), string::utf8(b"{immutable_metadata.collection_name}"), diff --git a/crates/iota-genesis-builder/src/stardust/migration/migration.rs b/crates/iota-genesis-builder/src/stardust/migration/migration.rs index b02f8dc286f..1579235185b 100644 --- a/crates/iota-genesis-builder/src/stardust/migration/migration.rs +++ b/crates/iota-genesis-builder/src/stardust/migration/migration.rs @@ -118,7 +118,7 @@ impl Migration { // a certain milestone timestamp remains the same between runs. // // This guarantees that fresh ids created through the transaction - // context will also map to the same objects betwen runs. + // context will also map to the same objects between runs. outputs.sort_by_key(|(header, _)| (header.ms_timestamp(), header.output_id())); foundries.sort_by_key(|(header, _)| (header.ms_timestamp(), header.output_id())); info!("Migrating foundries..."); diff --git a/crates/iota-graphql-rpc/src/types/coin.rs b/crates/iota-graphql-rpc/src/types/coin.rs index f49391e675c..ca966ce37c5 100644 --- a/crates/iota-graphql-rpc/src/types/coin.rs +++ b/crates/iota-graphql-rpc/src/types/coin.rs @@ -363,7 +363,7 @@ impl Coin { })?; let coin = Coin::try_from(&move_).map_err(|_| { - Error::Internal(format!("Faild to deserialize as Coin: {}", object.address)) + Error::Internal(format!("Failed to deserialize as Coin: {}", object.address)) })?; conn.edges.push(Edge::new(cursor, coin)); diff --git a/crates/iota-indexer/src/metrics.rs b/crates/iota-indexer/src/metrics.rs index 69e0c2fdd06..48d30947d52 100644 --- a/crates/iota-indexer/src/metrics.rs +++ b/crates/iota-indexer/src/metrics.rs @@ -341,7 +341,7 @@ impl IndexerMetrics { .unwrap(), checkpoint_db_commit_latency: register_histogram_with_registry!( "checkpoint_db_commit_latency", - "Time spent commiting a checkpoint to the db", + "Time spent committing a checkpoint to the db", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) @@ -349,21 +349,21 @@ impl IndexerMetrics { checkpoint_db_commit_latency_step_1: register_histogram_with_registry!( "checkpoint_db_commit_latency_step_1", - "Time spent commiting a checkpoint to the db, step 1", + "Time spent committing a checkpoint to the db, step 1", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_transactions: register_histogram_with_registry!( "checkpoint_db_commit_latency_transactions", - "Time spent commiting transactions", + "Time spent committing transactions", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_transactions_chunks: register_histogram_with_registry!( "checkpoint_db_commit_latency_transactions_chunks", - "Time spent commiting transactions chunks", + "Time spent committing transactions chunks", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) @@ -377,40 +377,40 @@ impl IndexerMetrics { .unwrap(), checkpoint_db_commit_latency_objects: register_histogram_with_registry!( "checkpoint_db_commit_latency_objects", - "Time spent commiting objects", + "Time spent committing objects", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_objects_history: register_histogram_with_registry!( "checkpoint_db_commit_latency_objects_history", - "Time spent commiting objects history", + "Time spent committing objects history", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ).unwrap(), checkpoint_db_commit_latency_objects_chunks: register_histogram_with_registry!( "checkpoint_db_commit_latency_objects_chunks", - "Time spent commiting objects chunks", + "Time spent committing objects chunks", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_objects_history_chunks: register_histogram_with_registry!( "checkpoint_db_commit_latency_objects_history_chunks", - "Time spent commiting objects history chunks", + "Time spent committing objects history chunks", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ).unwrap(), checkpoint_db_commit_latency_events: register_histogram_with_registry!( "checkpoint_db_commit_latency_events", - "Time spent commiting events", + "Time spent committing events", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_events_chunks: register_histogram_with_registry!( "checkpoint_db_commit_latency_events_chunks", - "Time spent commiting events chunks", + "Time spent committing events chunks", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) @@ -418,35 +418,35 @@ impl IndexerMetrics { checkpoint_db_commit_latency_packages: register_histogram_with_registry!( "checkpoint_db_commit_latency_packages", - "Time spent commiting packages", + "Time spent committing packages", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_tx_indices: register_histogram_with_registry!( "checkpoint_db_commit_latency_tx_indices", - "Time spent commiting tx indices", + "Time spent committing tx indices", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_tx_indices_chunks: register_histogram_with_registry!( "checkpoint_db_commit_latency_tx_indices_chunks", - "Time spent commiting tx_indices chunks", + "Time spent committing tx_indices chunks", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_checkpoints: register_histogram_with_registry!( "checkpoint_db_commit_latency_checkpoints", - "Time spent commiting checkpoints", + "Time spent committing checkpoints", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), checkpoint_db_commit_latency_epoch: register_histogram_with_registry!( "checkpoint_db_commit_latency_epochs", - "Time spent commiting epochs", + "Time spent committing epochs", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) @@ -471,35 +471,35 @@ impl IndexerMetrics { ).unwrap(), thousand_transaction_avg_db_commit_latency: register_histogram_with_registry!( "transaction_db_commit_latency", - "Average time spent commiting 1000 transactions to the db", + "Average time spent committing 1000 transactions to the db", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), object_db_commit_latency: register_histogram_with_registry!( "object_db_commit_latency", - "Time spent commiting a object to the db", + "Time spent committing a object to the db", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), object_mutation_db_commit_latency: register_histogram_with_registry!( "object_mutation_db_commit_latency", - "Time spent commiting a object mutation to the db", + "Time spent committing a object mutation to the db", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), object_deletion_db_commit_latency: register_histogram_with_registry!( "object_deletion_db_commit_latency", - "Time spent commiting a object deletion to the db", + "Time spent committing a object deletion to the db", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) .unwrap(), epoch_db_commit_latency: register_histogram_with_registry!( "epoch_db_commit_latency", - "Time spent commiting a epoch to the db", + "Time spent committing a epoch to the db", DB_COMMIT_LATENCY_SEC_BUCKETS.to_vec(), registry, ) diff --git a/crates/iota-indexer/src/store/pg_indexer_store.rs b/crates/iota-indexer/src/store/pg_indexer_store.rs index 246507639cc..9a4c29e4ecf 100644 --- a/crates/iota-indexer/src/store/pg_indexer_store.rs +++ b/crates/iota-indexer/src/store/pg_indexer_store.rs @@ -175,7 +175,7 @@ impl PgIndexerStore { object_id: ObjectID, version: Option, ) -> Result { - // TOOD: read remote object_history kv store + // TODO: read remote object_history kv store read_only_blocking!(&self.blocking_cp, |conn| { let query = objects::dsl::objects.filter(objects::dsl::object_id.eq(object_id.to_vec())); diff --git a/crates/iota-move/src/manage_package.rs b/crates/iota-move/src/manage_package.rs index c80eea9aaa1..6949729be69 100644 --- a/crates/iota-move/src/manage_package.rs +++ b/crates/iota-move/src/manage_package.rs @@ -14,7 +14,7 @@ use move_package::BuildConfig; #[group(id = "iota-move-manage-package")] pub struct ManagePackage { #[clap(long)] - /// The network chain identifer. Use '35834a8a' for mainnet. + /// The network chain identifier. Use '35834a8a' for mainnet. pub network: String, #[clap(long = "original-id", value_parser = ObjectID::from_hex_literal)] /// The original address (Object ID) where this package is published. diff --git a/crates/iota-proxy/src/prom_to_mimir.rs b/crates/iota-proxy/src/prom_to_mimir.rs index fd8db7ceb11..49db3ab1c8d 100644 --- a/crates/iota-proxy/src/prom_to_mimir.rs +++ b/crates/iota-proxy/src/prom_to_mimir.rs @@ -92,7 +92,7 @@ impl From> for Mimir> { Vec::with_capacity(metric_families.len()); for mf in metric_families { - // TOOD add From impl + // TODO add From impl let mt = match mf.get_field_type() { MetricType::COUNTER => remote_write::metric_metadata::MetricType::Counter, MetricType::GAUGE => remote_write::metric_metadata::MetricType::Gauge, diff --git a/crates/iota-replay/src/replay.rs b/crates/iota-replay/src/replay.rs index e6a4e88f970..638c2cb5e0b 100644 --- a/crates/iota-replay/src/replay.rs +++ b/crates/iota-replay/src/replay.rs @@ -79,7 +79,7 @@ use crate::{ pub struct ExecutionSandboxState { /// Information describing the transaction pub transaction_info: OnChainTransactionInfo, - /// All the obejcts that are required for the execution of the transaction + /// All the objects that are required for the execution of the transaction pub required_objects: Vec, /// Temporary store from executing this locally in /// `execute_transaction_to_effects` diff --git a/crates/iota/genesis.md b/crates/iota/genesis.md index 26251a4af13..b23ea7b3630 100644 --- a/crates/iota/genesis.md +++ b/crates/iota/genesis.md @@ -2,22 +2,23 @@ This document lays out the step-by-step process for orchestrating a Iota Genesis Ceremony. -## Prerequisites +## Prerequisites Each validator participating in the ceremony will need the following: -- Ed25519 Public key -- Iota network address // WAN -- Narwhal_primary_to_primary network address // WAN -- Narwhal_worker_to_primary network address // LAN -- Narwhal_primary_to_worker network address // LAN -- Narwhal_worker_to_worker network address // WAN -- Narwhal_consensus_address network address // LAN +- Ed25519 Public key +- Iota network address // WAN +- Narwhal_primary_to_primary network address // WAN +- Narwhal_worker_to_primary network address // LAN +- Narwhal_primary_to_worker network address // LAN +- Narwhal_worker_to_worker network address // WAN +- Narwhal_consensus_address network address // LAN Note: -- Network addresses should be Multiaddrs in the form of `/dns/{dns name}/tcp/{port}/http` and -only the addresses marked WAN need to be publicly accessible by the wider internet. -- An Ed25519 key can be created using `iota keytool generate` + +- Network addresses should be Multiaddrs in the form of `/dns/{dns name}/tcp/{port}/http` and + only the addresses marked WAN need to be publicly accessible by the wider internet. +- An Ed25519 key can be created using `iota keytool generate` ## Ceremony @@ -31,7 +32,7 @@ The MC (Master of Ceremony) will create a new git repository and initialize the ``` $ git init genesis && cd genesis -$ iota genesis-ceremony +$ iota genesis-ceremony $ git add . $ git commit -m "init genesis" $ git push @@ -66,7 +67,7 @@ Add configuration for any initial gas objects that should be created at genesis. $ iota genesis-ceremony add-gas-object \ --address \ --object-id \ - --valud <# of iota coins> + --value <# of iota coins> $ git add . $ git commit -m "add gas object" $ git push diff --git a/crates/iota/src/client_ptb/builder.rs b/crates/iota/src/client_ptb/builder.rs index 6a2a5eacbb8..d56802a41ca 100644 --- a/crates/iota/src/client_ptb/builder.rs +++ b/crates/iota/src/client_ptb/builder.rs @@ -757,7 +757,7 @@ impl<'a> PTBBuilder<'a> { } } } - // Unable to resolve an identifer to anything at this point -- error and see if we can + // Unable to resolve an identifier to anything at this point -- error and see if we can // find a similar identifier to suggest. PTBArg::Identifier(i) => match self.did_you_mean_identifier(&i) { Some(similars) => { diff --git a/crates/iota/src/key_identity.rs b/crates/iota/src/key_identity.rs index 3b7d7ba2863..daeed5e5457 100644 --- a/crates/iota/src/key_identity.rs +++ b/crates/iota/src/key_identity.rs @@ -41,7 +41,7 @@ impl Display for KeyIdentity { } /// Get the IotaAddress corresponding to this key identity. -/// If no string is provided, then the curernt active address is returned. +/// If no string is provided, then the current active address is returned. pub fn get_identity_address( input: Option, ctx: &mut WalletContext, diff --git a/crates/iotaop-cli/src/cli/pulumi/setup.rs b/crates/iotaop-cli/src/cli/pulumi/setup.rs index 1281df5d1b2..dfbbbc84329 100644 --- a/crates/iotaop-cli/src/cli/pulumi/setup.rs +++ b/crates/iotaop-cli/src/cli/pulumi/setup.rs @@ -103,7 +103,7 @@ fn ensure_gcloud_logged_in() -> Result<()> { "Please select your @mystenlabs.com profile: {}", "gcloud config set account `ACCOUNT`".bright_yellow() ); - return Err(anyhow!("Incorret account selected.")); + return Err(anyhow!("Incorrect account selected.")); } } } diff --git a/crates/typed-store/src/rocks/mod.rs b/crates/typed-store/src/rocks/mod.rs index e01cfc14e30..9435583c997 100644 --- a/crates/typed-store/src/rocks/mod.rs +++ b/crates/typed-store/src/rocks/mod.rs @@ -1422,7 +1422,7 @@ impl DBBatch { /// with ignore_range_deletions set to true, the old value are visible until /// compaction actually deletes them which will happen sometime after. By /// default ignore_range_deletions is set to true on a DBMap (unless it is - /// overriden in the config), so please use this function with caution + /// overridden in the config), so please use this function with caution pub fn schedule_delete_range( &mut self, db: &DBMap, @@ -1954,7 +1954,7 @@ where /// with ignore_range_deletions set to true, the old value are visible until /// compaction actually deletes them which will happen sometime after. By /// default ignore_range_deletions is set to true on a DBMap (unless it is - /// overriden in the config), so please use this function with caution + /// overridden in the config), so please use this function with caution #[instrument(level = "trace", skip_all, err)] fn schedule_delete_all(&self) -> Result<(), TypedStoreError> { let mut iter = self.unbounded_iter().seek_to_first(); diff --git a/examples/move/bridge/sources/committee.move b/examples/move/bridge/sources/committee.move index 1c57a808c63..99d5d0c7b8b 100644 --- a/examples/move/bridge/sources/committee.move +++ b/examples/move/bridge/sources/committee.move @@ -26,7 +26,7 @@ module bridge::committee { const IOTA_MESSAGE_PREFIX: vector = b"IOTA_BRIDGE_MESSAGE"; struct BridgeCommittee has store { - // commitee pub key and weight + // committee pub key and weight members: VecMap, CommitteeMember>, // threshold for each message type thresholds: VecMap diff --git a/examples/move/transfer-to-object/shared-no-tto/sources/shared_cash_register.move b/examples/move/transfer-to-object/shared-no-tto/sources/shared_cash_register.move index 6b4dcf0e38e..d0564a36a9a 100644 --- a/examples/move/transfer-to-object/shared-no-tto/sources/shared_cash_register.move +++ b/examples/move/transfer-to-object/shared-no-tto/sources/shared_cash_register.move @@ -87,7 +87,7 @@ module shared_no_tto::shared_cash_register { /// Process a payment that has been made, removing it from the register and /// returning the coin that can then be combined or sent elsewhere by the authorized individual. - /// Payments can ony be processed by either an account in the / `authorized_individuals` set or by the owner of the cash register. + /// Payments can only be processed by either an account in the / `authorized_individuals` set or by the owner of the cash register. public fun process_payment(register: &mut CashRegister, payment_id: u64, ctx: &TxContext): Coin { let sender = tx_context::sender(ctx); assert!(vec_set::contains(®ister.authorized_individuals, &sender) || sender == register.register_owner, ENotAuthorized); diff --git a/examples/move/transfer-to-object/shared-with-tto/sources/shared_cash_register.move b/examples/move/transfer-to-object/shared-with-tto/sources/shared_cash_register.move index 33a09503fe0..f2ee6a0854f 100644 --- a/examples/move/transfer-to-object/shared-with-tto/sources/shared_cash_register.move +++ b/examples/move/transfer-to-object/shared-with-tto/sources/shared_cash_register.move @@ -89,7 +89,7 @@ module shared_with_tto::shared_cash_register { /// Process a payment that has been made, removing it from the register and /// returning the coin that can then be combined or sent elsewhere by the authorized individual. - /// Payments can ony be processed by either an account in the / `authorized_individuals` set or by the owner of the cash register. + /// Payments can only be processed by either an account in the / `authorized_individuals` set or by the owner of the cash register. public fun process_payment(register: &mut CashRegister, payment_ticket: Receiving, ctx: &TxContext): Coin { let sender = tx_context::sender(ctx); assert!(vec_set::contains(®ister.authorized_individuals, &sender) || sender == register.register_owner, ENotAuthorized); diff --git a/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/limit_tests.rs b/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/limit_tests.rs index 8ab68be9307..df2d2760e7f 100644 --- a/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/limit_tests.rs +++ b/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/limit_tests.rs @@ -599,7 +599,7 @@ fn max_identifier_len() { let config = production_config(); let max_ident = "z".repeat( config - .max_idenfitier_len + .max_identifier_len .unwrap_or(DEFAULT_MAX_IDENTIFIER_LENGTH) as usize, ); let good_module = leaf_module(&max_ident); @@ -610,7 +610,7 @@ fn max_identifier_len() { let config = production_config(); let max_ident = "z".repeat( (config - .max_idenfitier_len + .max_identifier_len .unwrap_or(DEFAULT_MAX_IDENTIFIER_LENGTH) as usize) / 2, ); @@ -621,7 +621,7 @@ fn max_identifier_len() { let over_max_ident = "z".repeat( 1 + config - .max_idenfitier_len + .max_identifier_len .unwrap_or(DEFAULT_MAX_IDENTIFIER_LENGTH) as usize, ); let bad_module = leaf_module(&over_max_ident); @@ -634,7 +634,7 @@ fn max_identifier_len() { let over_max_ident = "zx".repeat( 1 + config - .max_idenfitier_len + .max_identifier_len .unwrap_or(DEFAULT_MAX_IDENTIFIER_LENGTH) as usize, ); let bad_module = leaf_module(&over_max_ident); diff --git a/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/mod.rs b/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/mod.rs index 941bec50933..ca809322dac 100644 --- a/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/mod.rs +++ b/external-crates/move/crates/bytecode-verifier-tests/src/unit_tests/mod.rs @@ -52,7 +52,7 @@ pub(crate) fn production_config() -> VerifierConfig { max_per_fun_meter_units: Some(1000 * 8000), max_per_mod_meter_units: Some(1000 * 8000), max_constant_vector_len: Some(DEFAULT_MAX_CONSTANT_VECTOR_LEN), - max_idenfitier_len: Some(DEFAULT_MAX_IDENTIFIER_LENGTH), + max_identifier_len: Some(DEFAULT_MAX_IDENTIFIER_LENGTH), allow_receiving_object_id: true, reject_mutable_random_on_entry_functions: true, } diff --git a/external-crates/move/crates/bytecode-verifier-transactional-tests/tests/reference_safety/read_local_ref_after_assign.mvir b/external-crates/move/crates/bytecode-verifier-transactional-tests/tests/reference_safety/read_local_ref_after_assign.mvir index da9638557eb..cdf9c0881a3 100644 --- a/external-crates/move/crates/bytecode-verifier-transactional-tests/tests/reference_safety/read_local_ref_after_assign.mvir +++ b/external-crates/move/crates/bytecode-verifier-transactional-tests/tests/reference_safety/read_local_ref_after_assign.mvir @@ -12,7 +12,7 @@ label b0: assign_ref = copy(read_ref); *copy(assign_ref) = 0; assert(*copy(assign_ref) == 0, 42); - no = *move(read_ref); // valid to read reference after assiging a copy! + no = *move(read_ref); // valid to read reference after assigning a copy! _ = move(assign_ref); return; } diff --git a/external-crates/move/crates/move-analyzer/src/symbols.rs b/external-crates/move/crates/move-analyzer/src/symbols.rs index 7221a63d6ad..c06cf4eea27 100644 --- a/external-crates/move/crates/move-analyzer/src/symbols.rs +++ b/external-crates/move/crates/move-analyzer/src/symbols.rs @@ -3536,9 +3536,9 @@ pub fn on_document_symbol_request(context: &Context, request: &Request, symbols: /// Helper function to handle struct fields #[allow(deprecated)] fn handle_struct_fields(struct_def: StructDef, fields: &mut Vec) { - let clonded_fileds = struct_def.field_defs; + let cloned_fields = struct_def.field_defs; - for field_def in clonded_fileds { + for field_def in cloned_fields { let field_range = Range { start: field_def.start, end: field_def.start, diff --git a/external-crates/move/crates/move-binary-format/src/proptest_types.rs b/external-crates/move/crates/move-binary-format/src/proptest_types.rs index a5aebdfdf8d..b95b4515f2e 100644 --- a/external-crates/move/crates/move-binary-format/src/proptest_types.rs +++ b/external-crates/move/crates/move-binary-format/src/proptest_types.rs @@ -226,7 +226,7 @@ impl CompiledModuleStrategyGen { .prop_map( |( self_idx_gen, - (address_identifier_gens, identifier_gens, constant_pool_gen, metdata_gen), + (address_identifier_gens, identifier_gens, constant_pool_gen, metadata_gen), module_handles_gen, (struct_handle_gens, struct_def_gens), random_sigs_gens, @@ -240,7 +240,7 @@ impl CompiledModuleStrategyGen { let identifiers_len = identifiers.len(); let constant_pool = constant_pool_gen.constant_pool(); let constant_pool_len = constant_pool.len(); - let metadata = metdata_gen.metadata(); + let metadata = metadata_gen.metadata(); // module handles let mut module_handles_set = BTreeSet::new(); diff --git a/external-crates/move/crates/move-borrow-graph/src/references.rs b/external-crates/move/crates/move-borrow-graph/src/references.rs index 05fc05c1539..50b8ad7f860 100644 --- a/external-crates/move/crates/move-borrow-graph/src/references.rs +++ b/external-crates/move/crates/move-borrow-graph/src/references.rs @@ -71,8 +71,9 @@ pub(crate) struct Ref { pub(crate) borrowed_by: BorrowEdges, /// Child to parent /// 'self' borrows from _ - /// Needed for efficient querying, but should be in one-to-one corespondence - /// with borrowed by i.e. x is borrowed by y IFF y borrows from x + /// Needed for efficient querying, but should be in one-to-one + /// correspondence with borrowed by i.e. x is borrowed by y IFF y + /// borrows from x pub(crate) borrows_from: BTreeSet, /// true if mutable, false otherwise pub(crate) mutable: bool, diff --git a/external-crates/move/crates/move-bytecode-verifier/src/absint.rs b/external-crates/move/crates/move-bytecode-verifier/src/absint.rs index f0266f98d81..2c51d23b7b0 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/absint.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/absint.rs @@ -52,7 +52,7 @@ pub trait TransferFunctions { /// Should return an AnalysisError if executing the instruction is /// unsuccessful, and () if the effects of successfully executing /// local@instr have been reflected by mutatating local@pre. - /// Auxilary data from the analysis that is not part of the abstract state + /// Auxiliary data from the analysis that is not part of the abstract state /// can be collected by mutating local@self. /// The last instruction index in the current block is local@last_index. /// Knowing this information allows clients to detect the end of a basic diff --git a/external-crates/move/crates/move-bytecode-verifier/src/limits.rs b/external-crates/move/crates/move-bytecode-verifier/src/limits.rs index 6e69044e740..3f0946e4fdd 100644 --- a/external-crates/move/crates/move-bytecode-verifier/src/limits.rs +++ b/external-crates/move/crates/move-bytecode-verifier/src/limits.rs @@ -206,11 +206,11 @@ impl<'a> LimitsVerifier<'a> { Ok(()) } - /// Verifies the lengths of all identifers are valid + /// Verifies the lengths of all identifiers are valid fn verify_identifiers(&self, config: &VerifierConfig) -> PartialVMResult<()> { - if let Some(max_idenfitier_len) = config.max_idenfitier_len { + if let Some(max_identifier_len) = config.max_identifier_len { for (idx, identifier) in self.resolver.identifiers().iter().enumerate() { - if identifier.len() > (max_idenfitier_len as usize) { + if identifier.len() > (max_identifier_len as usize) { return Err(verification_error( StatusCode::IDENTIFIER_TOO_LONG, IndexKind::Identifier, diff --git a/external-crates/move/crates/move-cli/tools.md b/external-crates/move/crates/move-cli/tools.md index 60091d5414e..e1d7301edb4 100644 --- a/external-crates/move/crates/move-cli/tools.md +++ b/external-crates/move/crates/move-cli/tools.md @@ -10,12 +10,12 @@ Move has a number of tools associated with it. This directory contains all, or almost all of them. The following crates in this directory are libraries that are used by the [`move-cli`](./move-cli) `package` subcommand: -* `move-bytecode-viewer` -* `move-disassembler` -* `move-explain` -* `move-unit-test` -* `move-package` -* `move-coverage` +- `move-bytecode-viewer` +- `move-disassembler` +- `move-explain` +- `move-unit-test` +- `move-package` +- `move-coverage` In this sense each of these crates defines the core logic for a specific package command, e.g., how to run and report unit tests, or collect and @@ -36,7 +36,7 @@ handled for the `move-bytecode-viewer` that's defined in the `move-bytecode-viewer` crate, and not the `move-cli` crate). Some of the crates mentioned above are also binaries at the moment, however -they should all be able to be made libaries only, with the possible +they should all be able to be made libraries only, with the possible exception of the `move-coverage` crate. The primary reason for this, is that this tool can collect and report test coverage statistics across multiple packages, and multiple runs over a package. This functionality is diff --git a/external-crates/move/crates/move-compiler/README.md b/external-crates/move/crates/move-compiler/README.md index 60e10fa85ea..da3a7274f4b 100644 --- a/external-crates/move/crates/move-compiler/README.md +++ b/external-crates/move/crates/move-compiler/README.md @@ -16,8 +16,8 @@ Move source language is an expression-based language that aims to simplify writi Currently, there are command line tools for Move. -* Move Check is used for checking code, but it does not generate bytecode -* Move Build is used for checking and then compiling to bytecode +- Move Check is used for checking code, but it does not generate bytecode +- Move Build is used for checking and then compiling to bytecode In the future there should be other utilities for testing and play grounding the Move modules. @@ -31,33 +31,32 @@ Deliver a minimalistic, expressive, safe, and transparent language to produce--a ### Primary Principles -* **More Concise than Bytecode** Move is expression based, which lends itself to concise and composed programs without the need for extra locals or structure. The Move bytecode is stack based (with the addition of local variables), so a language without stack access would need to be more verbose than the bytecode. In the Move source language, expressions allow for programming directly on the stack in a controlled and safe mode, and in that way, the language gives the same level of functionality as the bytecode but in a more concise and readable environment. +- **More Concise than Bytecode** Move is expression based, which lends itself to concise and composed programs without the need for extra locals or structure. The Move bytecode is stack based (with the addition of local variables), so a language without stack access would need to be more verbose than the bytecode. In the Move source language, expressions allow for programming directly on the stack in a controlled and safe mode, and in that way, the language gives the same level of functionality as the bytecode but in a more concise and readable environment. -* **Move Bytecode Transparency** The Move source language tries to lift up concepts in the Move bytecode into a source language; it is not trying to hide them. The bytecode already has some strong opinions (much stronger than you might expect to find in a bytecode language), and the source language is trying to keep that programming model and line of thinking. The intention of this principle is to remove the need to write bytecode directly. Additionally, this means full interoperability with functions and types declared in published modules. +- **Move Bytecode Transparency** The Move source language tries to lift up concepts in the Move bytecode into a source language; it is not trying to hide them. The bytecode already has some strong opinions (much stronger than you might expect to find in a bytecode language), and the source language is trying to keep that programming model and line of thinking. The intention of this principle is to remove the need to write bytecode directly. Additionally, this means full interoperability with functions and types declared in published modules. -* **Stricter than Bytecode** The source language often adds additional levels of restrictions. At an expression level, this means no arbitrary manipulation of the stack (only can do so through expressions), and no dead code or unused effects. At a module level, this could mean additional warnings for unused types or un-invocable functions. At a conceptual/program level, this will also mean adding integration for formal verification. +- **Stricter than Bytecode** The source language often adds additional levels of restrictions. At an expression level, this means no arbitrary manipulation of the stack (only can do so through expressions), and no dead code or unused effects. At a module level, this could mean additional warnings for unused types or un-invocable functions. At a conceptual/program level, this will also mean adding integration for formal verification. ### Secondary Principles -* **Pathway of Learning** Syntax choices and error messages are intended to give a natural flow of learning. For example, some of the choices around expression syntax could be changed to be more familiar to various other languages, but they would hurt the plug-n-play feeling of the expression based syntax, which might hurt developing a deeper understanding of the Move source language. +- **Pathway of Learning** Syntax choices and error messages are intended to give a natural flow of learning. For example, some of the choices around expression syntax could be changed to be more familiar to various other languages, but they would hurt the plug-n-play feeling of the expression based syntax, which might hurt developing a deeper understanding of the Move source language. -* **Aiding Common Community Patterns** As Move becomes more heavily used, common patterns for modules are likely to appear. Move might add new language features to make these patterns easier, clearer, or safer. But, they will not be added if it violates some other key design goal/principle of the language. +- **Aiding Common Community Patterns** As Move becomes more heavily used, common patterns for modules are likely to appear. Move might add new language features to make these patterns easier, clearer, or safer. But, they will not be added if it violates some other key design goal/principle of the language. -* **Semantic Preserving Optimizations** Optimizations are an important developer tool, as they let a programmer write code in a more natural way. However, all of the optimizations performed must be semantic preserving, to prevent any catastrophic exploits or errors from occurring in optimized code. That being said, it is not the primary goal of the Move source language to produce *heavily* optimized code, but it is a nice feature to have. +- **Semantic Preserving Optimizations** Optimizations are an important developer tool, as they let a programmer write code in a more natural way. However, all of the optimizations performed must be semantic preserving, to prevent any catastrophic exploits or errors from occurring in optimized code. That being said, it is not the primary goal of the Move source language to produce _heavily_ optimized code, but it is a nice feature to have. ### Non-Principles -* **Heavy Abstractions** The Move source language does not intend to hide the details of the Move bytecode, this ranges from everything of references to global storage. There might be some abstractions that make interacting with these items easier, but they should always be available in Move at their most basic (bytecode equivalent) level. This does not mean that conveniences currently given by the source language, such as easy field access or implicit freezing, are against the core set of principles, but only that conveniences should not be ambiguous or opaque in how they interact at the bytecode level. Note though, this does not preclude the addition of features to the lanugage, such as access modifiers that translate to compiler-generated dynamic checks. It is just that it is not an active goal of the language to add on heavy abstractions just for the sake of obscuring bytecode design choices. +- **Heavy Abstractions** The Move source language does not intend to hide the details of the Move bytecode, this ranges from everything of references to global storage. There might be some abstractions that make interacting with these items easier, but they should always be available in Move at their most basic (bytecode equivalent) level. This does not mean that conveniences currently given by the source language, such as easy field access or implicit freezing, are against the core set of principles, but only that conveniences should not be ambiguous or opaque in how they interact at the bytecode level. Note though, this does not preclude the addition of features to the lanugage, such as access modifiers that translate to compiler-generated dynamic checks. It is just that it is not an active goal of the language to add on heavy abstractions just for the sake of obscuring bytecode design choices. ## Command-line options The two available programs are Move check and Move build. -* They can be built using `cargo build -p move-compiler` -* Or run directly with - * `cargo run -p move-compiler --bin move-check -- [ARGS]` - * `cargo run -p move-compiler --bin move-build -- [ARGS]` - +- They can be built using `cargo build -p move-compiler` +- Or run directly with + - `cargo run -p move-compiler --bin move-check -- [ARGS]` + - `cargo run -p move-compiler --bin move-build -- [ARGS]` Move check is a command line tool for checking Move programs without producing bytecode @@ -168,8 +167,8 @@ move-compiler # Main crate │ ├── errors # Errors produced by the various checks │ │ └── mod.rs # Module for Errors | | -│ ├── command_line # Utilities used by both command line binnaries -│ | └── mod.rs # Module for Command LIne +│ ├── command_line # Utilities used by both command line binaries +│ | └── mod.rs # Module for Command Line | | │ └── bin # Command line binaries │ ├── move-check.rs # Defines the move-check command line tool diff --git a/external-crates/move/crates/move-compiler/src/cfgir/absint.rs b/external-crates/move/crates/move-compiler/src/cfgir/absint.rs index 52d38348df2..38defb74796 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/absint.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/absint.rs @@ -68,7 +68,7 @@ pub trait TransferFunctions { /// Should return an AnalysisError if executing the instruction is /// unsuccessful, and () if the effects of successfully executing /// local@instr have been reflected by mutatating local@pre. - /// Auxilary data from the analysis that is not part of the abstract state + /// Auxiliary data from the analysis that is not part of the abstract state /// can be collected by mutating local@self. /// The last instruction index in the current block is local@last_index. /// Knowing this information allows clients to detect the end of a basic diff --git a/external-crates/move/crates/move-compiler/src/cfgir/translate.rs b/external-crates/move/crates/move-compiler/src/cfgir/translate.rs index e34dd14c9e1..a6b170dcac4 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/translate.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/translate.rs @@ -254,7 +254,7 @@ fn constants( cycle_nodes.append(&mut scc.into_iter().collect()); } } - // report any node that relies on a node in a cycle but is not iself part of + // report any node that relies on a node in a cycle but is not itself part of // that cycle for cycle_node in cycle_nodes.iter() { // petgraph retains edges for nodes that have been deleted, so we ensure the diff --git a/external-crates/move/crates/move-compiler/src/cfgir/visitor.rs b/external-crates/move/crates/move-compiler/src/cfgir/visitor.rs index c66f8ec4c0a..f356f83f98b 100644 --- a/external-crates/move/crates/move-compiler/src/cfgir/visitor.rs +++ b/external-crates/move/crates/move-compiler/src/cfgir/visitor.rs @@ -279,7 +279,7 @@ pub trait SimpleAbsInt: Sized { ls: &[LValue], values: Vec<::Value>, ) { - // pad with defautl to account for errors + // pad with default to account for errors let padded_values = values.into_iter().chain(std::iter::repeat( ::Value::default(), )); diff --git a/external-crates/move/crates/move-compiler/src/parser/lexer.rs b/external-crates/move/crates/move-compiler/src/parser/lexer.rs index 73f597876ba..ab32bb5870b 100644 --- a/external-crates/move/crates/move-compiler/src/parser/lexer.rs +++ b/external-crates/move/crates/move-compiler/src/parser/lexer.rs @@ -643,7 +643,7 @@ fn find_token( if rest_text.starts_with('\'') { let diag = maybe_diag! { let loc = make_loc(file_hash, start_offset, start_offset + len + 1); - let msg = "Charater literals are not supported"; + let msg = "Character literals are not supported"; let mut diag = diag!(Syntax::UnexpectedToken, (loc, msg)); diag.add_note("String literals use double-quote (\")."); Box::new(diag) diff --git a/external-crates/move/crates/move-compiler/src/typing/infinite_instantiations.rs b/external-crates/move/crates/move-compiler/src/typing/infinite_instantiations.rs index 1e358096870..a8083264fcc 100644 --- a/external-crates/move/crates/move-compiler/src/typing/infinite_instantiations.rs +++ b/external-crates/move/crates/move-compiler/src/typing/infinite_instantiations.rs @@ -186,7 +186,7 @@ fn module<'a>( .for_each(|(_fname, fdef)| function_body(context, &fdef.body)); let graph = context.instantiation_graph(); // - get the strongly connected components - // - fitler out SCCs that do not contain a 'nested' or 'strong' edge + // - filter out SCCs that do not contain a 'nested' or 'strong' edge // - report those cycles petgraph_scc(&graph) .into_iter() diff --git a/external-crates/move/crates/move-compiler/src/typing/translate.rs b/external-crates/move/crates/move-compiler/src/typing/translate.rs index ea41c29f692..5e30f14ed66 100644 --- a/external-crates/move/crates/move-compiler/src/typing/translate.rs +++ b/external-crates/move/crates/move-compiler/src/typing/translate.rs @@ -2131,7 +2131,7 @@ fn add_field_types( N::StructFields::Native(nloc) => { let msg = format!( "Invalid {} usage for native struct '{}::{}'. Native structs cannot be directly \ - constructed/deconstructed, and their fields cannot be dirctly accessed", + constructed/deconstructed, and their fields cannot be directly accessed", verb, m, n ); context.env.add_diag(diag!( diff --git a/external-crates/move/crates/move-compiler/tests/iota_mode/init/must_have_txn_context.move b/external-crates/move/crates/move-compiler/tests/iota_mode/init/must_have_txn_context.move index cd06057cee2..ab9d6c79281 100644 --- a/external-crates/move/crates/move-compiler/tests/iota_mode/init/must_have_txn_context.move +++ b/external-crates/move/crates/move-compiler/tests/iota_mode/init/must_have_txn_context.move @@ -5,7 +5,7 @@ module a::m { } } -// cannot have mroe than one TxContext +// cannot have more than one TxContext module a::n { use iota::tx_context; fun init(_ctx: &mut tx_context::TxContext, _ctx2: &mut tx_context::TxContext) { diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/expansion/self_leading_access.move b/external-crates/move/crates/move-compiler/tests/move_2024/expansion/self_leading_access.move index c0189f9fb71..90f42e13e3f 100644 --- a/external-crates/move/crates/move-compiler/tests/move_2024/expansion/self_leading_access.move +++ b/external-crates/move/crates/move-compiler/tests/move_2024/expansion/self_leading_access.move @@ -2,7 +2,7 @@ module a::foo { public struct S() public fun foo(): S { - // TOOD fix this, should resolve to a::m::S + // TODO fix this, should resolve to a::m::S foo::S() } } diff --git a/external-crates/move/crates/move-compiler/tests/move_2024/typing/macros_types_checked_invalid_constraints.move b/external-crates/move/crates/move-compiler/tests/move_2024/typing/macros_types_checked_invalid_constraints.move index fbc502cd2ab..4e4aece2766 100644 --- a/external-crates/move/crates/move-compiler/tests/move_2024/typing/macros_types_checked_invalid_constraints.move +++ b/external-crates/move/crates/move-compiler/tests/move_2024/typing/macros_types_checked_invalid_constraints.move @@ -20,7 +20,7 @@ module a::m { #[allow(dead_code)] fun t() { - // type args don't satisify constraints + // type args don't satisfy constraints foo!>(0, &mut 1, NeedsCopy {}); foo!>(0, &mut 1, NeedsCopy {}); foo!(0, &mut 1, NeedsCopy {}); diff --git a/external-crates/move/crates/move-compiler/tests/move_check/typing/native_structs_pack_unpack.exp b/external-crates/move/crates/move-compiler/tests/move_check/typing/native_structs_pack_unpack.exp index ad9ed63283e..f494f493213 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check/typing/native_structs_pack_unpack.exp +++ b/external-crates/move/crates/move-compiler/tests/move_check/typing/native_structs_pack_unpack.exp @@ -5,7 +5,7 @@ error[E04015]: invalid use of native item │ ------ Struct declared 'native' here · 9 │ C::T {} - │ ^^^^^^^ Invalid argument usage for native struct '0x42::C::T'. Native structs cannot be directly constructed/deconstructed, and their fields cannot be dirctly accessed + │ ^^^^^^^ Invalid argument usage for native struct '0x42::C::T'. Native structs cannot be directly constructed/deconstructed, and their fields cannot be directly accessed error[E04001]: restricted visibility ┌─ tests/move_check/typing/native_structs_pack_unpack.move:9:9 @@ -21,7 +21,7 @@ error[E04015]: invalid use of native item │ ------ Struct declared 'native' here · 12 │ let C::T {} = c; - │ ^^^^^^^ Invalid binding usage for native struct '0x42::C::T'. Native structs cannot be directly constructed/deconstructed, and their fields cannot be dirctly accessed + │ ^^^^^^^ Invalid binding usage for native struct '0x42::C::T'. Native structs cannot be directly constructed/deconstructed, and their fields cannot be directly accessed error[E04001]: restricted visibility ┌─ tests/move_check/typing/native_structs_pack_unpack.move:12:13 diff --git a/external-crates/move/crates/move-compiler/tests/move_check/typing/vector_with_non_base_type.move b/external-crates/move/crates/move-compiler/tests/move_check/typing/vector_with_non_base_type.move index 3c66b726d33..93af7e54e0f 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check/typing/vector_with_non_base_type.move +++ b/external-crates/move/crates/move-compiler/tests/move_check/typing/vector_with_non_base_type.move @@ -1,6 +1,6 @@ module 0x42::Test { fun t() { - // test invalid vector instatiation + // test invalid vector instantiation let _ = vector<&u64>[]; let _ = vector<&mut u64>[]; let _ = vector<()>[]; diff --git a/external-crates/move/crates/move-compiler/tests/move_check/typing/vector_with_non_base_type_inferred.move b/external-crates/move/crates/move-compiler/tests/move_check/typing/vector_with_non_base_type_inferred.move index 849cf07b56b..dd13f551766 100644 --- a/external-crates/move/crates/move-compiler/tests/move_check/typing/vector_with_non_base_type_inferred.move +++ b/external-crates/move/crates/move-compiler/tests/move_check/typing/vector_with_non_base_type_inferred.move @@ -1,6 +1,6 @@ module 0x42::Test { fun t() { - // test invalid vector instatiation, inferred type + // test invalid vector instantiation, inferred type vector[&0]; vector[&mut 0]; vector[()]; diff --git a/external-crates/move/crates/move-docgen/src/docgen.rs b/external-crates/move/crates/move-docgen/src/docgen.rs index 1e2b56807b3..43db75558f8 100644 --- a/external-crates/move/crates/move-docgen/src/docgen.rs +++ b/external-crates/move/crates/move-docgen/src/docgen.rs @@ -451,7 +451,7 @@ impl<'env> Docgen<'env> { .to_string(), ) } else { - // If it's a dependency traverse back up to finde the package name so that we + // If it's a dependency traverse back up to find the package name so that we // can generate the documentation in the right place. let path = PathBuf::from(module_env.get_source_path()); let package_name = path.ancestors().find_map(|dir| { diff --git a/external-crates/move/crates/move-ir-to-bytecode/src/compiler.rs b/external-crates/move/crates/move-ir-to-bytecode/src/compiler.rs index 61db01c29a8..72a28e7741c 100644 --- a/external-crates/move/crates/move-ir-to-bytecode/src/compiler.rs +++ b/external-crates/move/crates/move-ir-to-bytecode/src/compiler.rs @@ -374,7 +374,7 @@ pub fn compile_module<'a>( for ir_constant in module.constants { // If the constant is an error constant in the source, then add the error - // constant's name look up the constant's name, as a constant valeu -- + // constant's name look up the constant's name, as a constant value -- // this may be present already, e.g., in the case of something like // `const Foo: vector = b"Foo"` in which case the new index will not // be added and the previous index will be used. diff --git a/external-crates/move/crates/move-ir-to-bytecode/src/context.rs b/external-crates/move/crates/move-ir-to-bytecode/src/context.rs index a6ae17fa811..edd8e43f471 100644 --- a/external-crates/move/crates/move-ir-to-bytecode/src/context.rs +++ b/external-crates/move/crates/move-ir-to-bytecode/src/context.rs @@ -253,7 +253,7 @@ pub(crate) struct Context<'a> { labels: HashMap, // queryable pools - // TODO: lookup for Fields is not that seemless after binary format changes + // TODO: lookup for Fields is not that seamless after binary format changes // We need multiple lookups or a better representation for fields fields: HashMap<(StructHandleIndex, Field_), (StructDefinitionIndex, SignatureToken, usize)>, function_handles: HashMap<(ModuleName, FunctionName), (FunctionHandle, FunctionHandleIndex)>, diff --git a/external-crates/move/crates/move-ir-types/src/ast.rs b/external-crates/move/crates/move-ir-types/src/ast.rs index 73839f51f0d..456dbd8a31e 100644 --- a/external-crates/move/crates/move-ir-types/src/ast.rs +++ b/external-crates/move/crates/move-ir-types/src/ast.rs @@ -74,7 +74,7 @@ pub struct ModuleDefinition { /// Explicitly given dependency #[derive(Clone, Debug, PartialEq)] pub struct ModuleDependency { - /// Qualified identifer of the dependency + /// Qualified identifier of the dependency pub name: ModuleName, /// The structs (including resources) that the dependency defines pub structs: Vec, diff --git a/external-crates/move/crates/move-model/src/lib.rs b/external-crates/move/crates/move-model/src/lib.rs index d426b1c2b62..5f4026d06e5 100644 --- a/external-crates/move/crates/move-model/src/lib.rs +++ b/external-crates/move/crates/move-model/src/lib.rs @@ -75,7 +75,7 @@ pub fn run_model_builder< } /// Build the move model with default compilation flags and custom options and a -/// set of provided named addreses. +/// set of provided named addresses. /// This collects transitive dependencies for move sources from the provided /// directory list. pub fn run_model_builder_with_options< diff --git a/external-crates/move/crates/move-model/src/model.rs b/external-crates/move/crates/move-model/src/model.rs index fb7eceefa54..edf973a7af3 100644 --- a/external-crates/move/crates/move-model/src/model.rs +++ b/external-crates/move/crates/move-model/src/model.rs @@ -439,7 +439,7 @@ pub struct GlobalEnv { pub module_data: Vec, /// A type-indexed container for storing extension data in the environment. extensions: RefCell>>, - /// The address of the standard and extension libaries. + /// The address of the standard and extension libraries. stdlib_address: Option, extlib_address: Option, } @@ -2668,7 +2668,7 @@ impl<'env> FunctionEnv<'env> { self.definition_view().visibility() } - /// Return true if the function is an entry fucntion + /// Return true if the function is an entry function pub fn is_entry(&self) -> bool { self.definition_view().is_entry() } @@ -2705,7 +2705,7 @@ impl<'env> FunctionEnv<'env> { /// Returns true if the function is a script function pub fn is_script(&self) -> bool { - // The main function of a scipt is a script function + // The main function of a script is a script function self.module_env.is_script_module() || self.definition_view().is_entry() } diff --git a/external-crates/move/crates/move-package/src/resolution/dependency_graph.rs b/external-crates/move/crates/move-package/src/resolution/dependency_graph.rs index 08a5a181bcb..e49a1ecadd9 100644 --- a/external-crates/move/crates/move-package/src/resolution/dependency_graph.rs +++ b/external-crates/move/crates/move-package/src/resolution/dependency_graph.rs @@ -596,9 +596,9 @@ impl DependencyGraph { &mut dev_o, mode == DependencyMode::DevOnly, )?; - self.prune_overriden_pkgs(root_package_name, mode, &o, &dev_o)?; + self.prune_overridden_pkgs(root_package_name, mode, &o, &dev_o)?; } else { - self.prune_overriden_pkgs(root_package_name, mode, overrides, dev_overrides)?; + self.prune_overridden_pkgs(root_package_name, mode, overrides, dev_overrides)?; } Ok(()) } @@ -679,7 +679,7 @@ impl DependencyGraph { /// Prunes packages in a sub-graph based on the overrides information from /// the outer graph. - fn prune_overriden_pkgs( + fn prune_overridden_pkgs( &mut self, root_pkg_name: PM::PackageName, mode: DependencyMode, diff --git a/external-crates/move/crates/move-package/src/resolution/resolution_graph.rs b/external-crates/move/crates/move-package/src/resolution/resolution_graph.rs index 6a90a284290..c1478dca249 100644 --- a/external-crates/move/crates/move-package/src/resolution/resolution_graph.rs +++ b/external-crates/move/crates/move-package/src/resolution/resolution_graph.rs @@ -116,10 +116,10 @@ impl ResolvedGraph { let mut resolved_pkg = Package::new(package_path, &build_options) .with_context(|| format!("Resolving package '{pkg_id}'"))?; - // Check dependencies package names from manifest are consistent with ther names - // in parent (this) manifest. We do this check only for local and git - // dependencies as we assume custom dependencies might not have a user-defined - // name. + // Check dependencies package names from manifest are consistent with their + // names in parent (this) manifest. We do this check only for local + // and git dependencies as we assume custom dependencies might not + // have a user-defined name. for (dep_name, dep) in &resolved_pkg.source_package.dependencies { match dep { PM::Dependency::External(_) => continue, diff --git a/external-crates/move/crates/move-stdlib/sources/signer.move b/external-crates/move/crates/move-stdlib/sources/signer.move index 55376dd3fff..d41fd3a7c87 100644 --- a/external-crates/move/crates/move-stdlib/sources/signer.move +++ b/external-crates/move/crates/move-stdlib/sources/signer.move @@ -1,6 +1,6 @@ module std::signer { // Borrows the address of the signer - // Conceptually, you can think of the `signer` as being a struct wrapper arround an + // Conceptually, you can think of the `signer` as being a struct wrapper around an // address // ``` // struct signer has drop { addr: address } diff --git a/external-crates/move/crates/move-vm-config/src/verifier.rs b/external-crates/move/crates/move-vm-config/src/verifier.rs index 77ccc5e93c1..46debc20263 100644 --- a/external-crates/move/crates/move-vm-config/src/verifier.rs +++ b/external-crates/move/crates/move-vm-config/src/verifier.rs @@ -24,7 +24,7 @@ pub struct VerifierConfig { pub max_basic_blocks_in_script: Option, pub max_per_fun_meter_units: Option, pub max_per_mod_meter_units: Option, - pub max_idenfitier_len: Option, + pub max_identifier_len: Option, pub allow_receiving_object_id: bool, pub reject_mutable_random_on_entry_functions: bool, } @@ -63,7 +63,7 @@ impl Default for VerifierConfig { max_per_fun_meter_units: Some(1000 * 8000), max_per_mod_meter_units: Some(1000 * 8000), max_constant_vector_len: Some(DEFAULT_MAX_CONSTANT_VECTOR_LEN), - max_idenfitier_len: Some(DEFAULT_MAX_IDENTIFIER_LENGTH), + max_identifier_len: Some(DEFAULT_MAX_IDENTIFIER_LENGTH), allow_receiving_object_id: true, reject_mutable_random_on_entry_functions: true, } diff --git a/external-crates/move/crates/move-vm-runtime/src/loader.rs b/external-crates/move/crates/move-vm-runtime/src/loader.rs index b09d25614e6..fd18427f177 100644 --- a/external-crates/move/crates/move-vm-runtime/src/loader.rs +++ b/external-crates/move/crates/move-vm-runtime/src/loader.rs @@ -564,7 +564,7 @@ impl ModuleCache { // Helpers to load/verify modules without recursion -// In order to traverse the transitive dependencies of a module (when verifing +// In order to traverse the transitive dependencies of a module (when verifying // the module), we create a stack and iterate over the dependencies to avoid // recursion. An entry on the stack is conceptually a pair (module, // dependencies) where dependencies is used to visit them and to track when a diff --git a/external-crates/move/crates/move-vm-runtime/src/move_vm.rs b/external-crates/move/crates/move-vm-runtime/src/move_vm.rs index 1b5ddebb8e6..eea4e360053 100644 --- a/external-crates/move/crates/move-vm-runtime/src/move_vm.rs +++ b/external-crates/move/crates/move-vm-runtime/src/move_vm.rs @@ -58,7 +58,7 @@ impl MoveVM { /// other words, if there is a module publishing Session it must be the /// only Session existing. /// - In general, a new Move VM needs to be created whenever the storage - /// gets modified by an outer envrionment, or otherwise the states may + /// gets modified by an outer environment, or otherwise the states may /// be out of sync. There are a few exceptional cases where this may not /// be necessary, with the most notable one being the common module /// publishing flow: you can keep using the same Move VM if you publish diff --git a/external-crates/move/crates/move-vm-types/src/natives/function.rs b/external-crates/move/crates/move-vm-types/src/natives/function.rs index 6fa4de4620b..cf7b068a716 100644 --- a/external-crates/move/crates/move-vm-types/src/natives/function.rs +++ b/external-crates/move/crates/move-vm-types/src/natives/function.rs @@ -14,7 +14,7 @@ //! ) -> PartialVMResult;` //! //! arguments are passed with first argument at position 0 and so forth. -//! Popping values from `arguments` gives the aguments in reverse order (last +//! Popping values from `arguments` gives the arguments in reverse order (last //! first). This module contains the declarations and utilities to implement a //! native function. diff --git a/external-crates/move/crates/move-vm-types/src/values/value_tests.rs b/external-crates/move/crates/move-vm-types/src/values/value_tests.rs index 9e690875a4e..4ce7b972427 100644 --- a/external-crates/move/crates/move-vm-types/src/values/value_tests.rs +++ b/external-crates/move/crates/move-vm-types/src/values/value_tests.rs @@ -164,7 +164,7 @@ fn leagacy_ref_abstract_memory_size_consistency() -> PartialVMResult<()> { } #[test] -fn legacy_struct_abstract_memory_size_consistenty() -> PartialVMResult<()> { +fn legacy_struct_abstract_memory_size_consistency() -> PartialVMResult<()> { let structs = [ Struct::pack([]), Struct::pack([Value::struct_(Struct::pack([Value::u8(0), Value::u64(0)]))]), diff --git a/external-crates/move/documentation/book/src/packages.md b/external-crates/move/documentation/book/src/packages.md index 101870c5869..13bb1c9f28f 100644 --- a/external-crates/move/documentation/book/src/packages.md +++ b/external-crates/move/documentation/book/src/packages.md @@ -3,16 +3,16 @@ Packages allow Move programmers to more easily re-use code and share it across projects. The Move package system allows programmers to easily: -- Define a package containing Move code; -- Parameterize a package by [named addresses](./primitive-types/address.md); -- Import and use packages in other Move code and instantiate named addresses; -- Build packages and generate associated compilation artifacts from packages; and -- Work with a common interface around compiled Move artifacts. +- Define a package containing Move code; +- Parameterize a package by [named addresses](./primitive-types/address.md); +- Import and use packages in other Move code and instantiate named addresses; +- Build packages and generate associated compilation artifacts from packages; and +- Work with a common interface around compiled Move artifacts. ## Package Layout and Manifest Syntax -A Move package source directory contains a `Move.toml` package manifest file, a -generated `Move.lock` file, and a set of subdirectories: +A Move package source directory contains a `Move.toml` package manifest file, a generated +`Move.lock` file, and a set of subdirectories: ``` a_move_package @@ -24,32 +24,27 @@ a_move_package └── tests (optional, test mode) ``` - The directories and files labeled "required" must be present for a directory to be - considered a Move package and built. Optional directories may be present, - and if so, they will be included in the compilation process depending on the - mode used to build the package. For instance, when built in "dev" or "test" - modes, the `tests` and `examples` directories will also be included. - - Going through each of these in turn: - -1. The `Move.toml` file is the package manifest and is required for a directory - to be considered a Move package. This file contains metadata about the - package, such as name, dependencies, and so on. -2. The `Move.lock` file is generated by the Move CLI and contains the fixed - build versions of the package and its dependencies. It is used to ensure - consistent versions are used across different builds and that changes in - dependencies are apparent as a change in this file. -3. The `sources` directory is required and contains the Move modules that make - up the package. Modules in this directory will always be included in the - compilation process. -4. The `doc_templates` directory can contain documentation templates that will - be used when generating documentation for the package. -5. The `examples` directory can hold additional code to be used only for - development and/or tutorials, this will not be included when compiled - outside of `test` or `dev` modes. -6. The `tests` directory can contain Move modules that are only included when - compiled in `test` mode or when [Move unit tests](./unit-testing.md) are - run. +The directories and files labeled "required" must be present for a directory to be considered a Move +package and built. Optional directories may be present, and if so, they will be included in the +compilation process depending on the mode used to build the package. For instance, when built in +"dev" or "test" modes, the `tests` and `examples` directories will also be included. + +Going through each of these in turn: + +1. The `Move.toml` file is the package manifest and is required for a directory to be considered a + Move package. This file contains metadata about the package, such as name, dependencies, and so + on. +2. The `Move.lock` file is generated by the Move CLI and contains the fixed build versions of the + package and its dependencies. It is used to ensure consistent versions are used across different + builds and that changes in dependencies are apparent as a change in this file. +3. The `sources` directory is required and contains the Move modules that make up the package. + Modules in this directory will always be included in the compilation process. +4. The `doc_templates` directory can contain documentation templates that will be used when + generating documentation for the package. +5. The `examples` directory can hold additional code to be used only for development and/or + tutorials, this will not be included when compiled outside of `test` or `dev` modes. +6. The `tests` directory can contain Move modules that are only included when compiled in `test` + mode or when [Move unit tests](./unit-testing.md) are run. ### Move.toml @@ -67,7 +62,7 @@ authors* = [,+] # e.g., ["Joe Smith (joesmith@noemail.com)", "John Snow # Additional fields may be added to this section by external tools. E.g., on Iota the following sections are added: published-at* = "" # The address that the package is published at. Should be set after the first publication. -[dependencies] # (Optional section) Paths to dependencies +[dependencies] # (Optional section) Paths to dependencies # One or more lines declaring dependencies in the following format # ##### Local Dependencies ##### @@ -77,10 +72,10 @@ published-at* = "" # The address that the package is published at. # override you can use `override = true` # Override = { local = "../conflicting/version", override = true } # To instantiate address values in a dependency, use `addr_subst` - = { + = { local = , override* = , - addr_subst* = { ( = ( | ""))+ } + addr_subst* = { ( = ( | ""))+ } } # ##### Git Dependencies ##### @@ -88,7 +83,7 @@ published-at* = "" # The address that the package is published at. # Revision must be supplied, it can be a branch, a tag, or a commit hash. # If no `subdir` is specified, the root of the repository is used. # MyRemotePackage = { git = "https://some.remote/host.git", subdir = "remote/path", rev = "main" } - = { + = { git = , subdir=, rev=, @@ -109,12 +104,12 @@ published-at* = "" # The address that the package is published at. # The dev-dependencies section allows overriding dependencies for `--test` and # `--dev` modes. You can e.g., introduce test-only dependencies here. # Local = { local = "../path/to/dev-build" } - = { + = { local = , override* = , - addr_subst* = { ( = ( | ""))+ } + addr_subst* = { ( = ( | ""))+ } } - = { + = { git = , subdir=, rev=, @@ -157,48 +152,44 @@ MoveStdlib = { git = "https://github.com/iotaledger/iota.git", subdir = "crates/ address_to_be_filled_in = "0x101010101" ``` -Most of the sections in the package manifest are self explanatory, but named -addresses can be a bit difficult to understand so we examine them in more -detail in [Named Addresses During -Compilation](#named-addresses-during-compilation), but before that we'll first -take a look at the `Move.lock` file and what it contains. +Most of the sections in the package manifest are self explanatory, but named addresses can be a bit +difficult to understand so we examine them in more detail in +[Named Addresses During Compilation](#named-addresses-during-compilation), but before that we'll +first take a look at the `Move.lock` file and what it contains. ## Move.lock -The `Move.lock` file is generated at the root of the Move pacakge when the -package is built. The `Move.lock` file contains information about your package -and its build configuration, and acts as a communication layer between the Move -compiler and other tools, like chain-specific command line interfaces and -third-party package managers. +The `Move.lock` file is generated at the root of the Move package when the package is built. The +`Move.lock` file contains information about your package and its build configuration, and acts as a +communication layer between the Move compiler and other tools, like chain-specific command line +interfaces and third-party package managers. -Like the `Move.toml` file, the `Move.lock` file is a text-based TOML file. -Unlike the package manifest however, the `Move.lock` file is not intended for -you to edit directly. Processes on the toolchain, like the Move compiler, -access and edit the file to read and append relevant information to it. You -also must not move the file from the root, as it needs to be at the same level -as the `Move.toml` manifest in the package. +Like the `Move.toml` file, the `Move.lock` file is a text-based TOML file. Unlike the package +manifest however, the `Move.lock` file is not intended for you to edit directly. Processes on the +toolchain, like the Move compiler, access and edit the file to read and append relevant information +to it. You also must not move the file from the root, as it needs to be at the same level as the +`Move.toml` manifest in the package. -If you are using source control for your package, it's recommended practice to -check in the `Move.lock` file that corresponds with your desired built or -published package. This ensures that every build of your package is an exact replica -of the original, and that changes to the build will be apparent as changes to -the `Move.lock` file. +If you are using source control for your package, it's recommended practice to check in the +`Move.lock` file that corresponds with your desired built or published package. This ensures that +every build of your package is an exact replica of the original, and that changes to the build will +be apparent as changes to the `Move.lock` file. -The `Move.lock` file is a TOML file that currently contains the following -fields. +The `Move.lock` file is a TOML file that currently contains the following fields. -**Note**: other fields may be added to the lock file either in the future, or -by third-party package package managers as well. +**Note**: other fields may be added to the lock file either in the future, or by third-party package +package managers as well. ### The `[move]` Section This section contains the core information needed in the lockfile: -* The version of the lockfile (needed for backwards compatibility checking, and - versioning lockfile changes in the future). -* The hash of the `Move.toml` file that was used to generate this lock file. -* The hash of the `Move.lock` file of all dependencies. If no depencies are - present, this will be an empty string. -* The list of dependencies. + +- The version of the lockfile (needed for backwards compatibility checking, and versioning + lockfile changes in the future). +- The hash of the `Move.toml` file that was used to generate this lock file. +- The hash of the `Move.lock` file of all dependencies. If no depencies are present, this will be + an empty string. +- The list of dependencies. ``` [move] @@ -210,12 +201,11 @@ dependencies = { (name = )* } # List of dependencies. Not present if the ### The `[move.package]` Sections -After the Move compiler resolves each of the dependencies for the package it -writes the location of the dependency to the `Move.lock` file. If a dependency -failed to resolve, the compiler will not write the `Move.lock` file and the -build fails. If all dependencies resolve, the `Move.lock` file contains the -locations (local and remote) of all of the package's transitive dependencies. -These will be stored in the `Move.lock` file in the following format: +After the Move compiler resolves each of the dependencies for the package it writes the location of +the dependency to the `Move.lock` file. If a dependency failed to resolve, the compiler will not +write the `Move.lock` file and the build fails. If all dependencies resolve, the `Move.lock` file +contains the locations (local and remote) of all of the package's transitive dependencies. These +will be stored in the `Move.lock` file in the following format: ``` ... @@ -231,9 +221,9 @@ source = { local = "../local-dep" } ### The `[move.toolchain-version]` Section -As mentioned above, additional fields may be added to the lock file by external -tools. For example, the Iota package manager adds toolchain version information -to the lock file that can then be used for on-chain source verification: +As mentioned above, additional fields may be added to the lock file by external tools. For example, +the Iota package manager adds toolchain version information to the lock file that can then be used +for on-chain source verification: ``` ... @@ -244,19 +234,18 @@ edition = # The edition of the Move language used to build the package, flavor = # The flavor of the Move compiler used to build the package, e.g. "iota" ``` -With that, let's now turn to the compilation process and how named addresses are -resolved, and how to use them. +With that, let's now turn to the compilation process and how named addresses are resolved, and how +to use them. ## Named Addresses During Compilation -Recall that Move has [named addresses](./primitive-types/address.md) and that named addresses -cannot be declared in Move. Instead they are declared at the package level: in -the manifest file (`Move.toml`) for a Move package you declare named addresses -in the package, instantiate other named addresses, and rename named addresses -from other packages within the Move package system. +Recall that Move has [named addresses](./primitive-types/address.md) and that named addresses cannot +be declared in Move. Instead they are declared at the package level: in the manifest file +(`Move.toml`) for a Move package you declare named addresses in the package, instantiate other named +addresses, and rename named addresses from other packages within the Move package system. -Let's go through each of these actions, and how they are performed in the -package's manifest one-by-one: +Let's go through each of these actions, and how they are performed in the package's manifest +one-by-one: ### Declaring Named Addresses @@ -280,10 +269,10 @@ named_addr = "_" ``` Declares `named_addr` as a named address in the package `example_pkg` and that _this address can be -any valid address value_. In particular, an importing package can pick the value of the named address -`named_addr` to be any address it wishes. Intuitively you can think of this as parameterizing the -package `example_pkg` by the named address `named_addr`, and the package can then be instantiated -later on by an importing package. +any valid address value_. In particular, an importing package can pick the value of the named +address `named_addr` to be any address it wishes. Intuitively you can think of this as +parameterizing the package `example_pkg` by the named address `named_addr`, and the package can then +be instantiated later on by an importing package. `named_addr` can also be declared as: @@ -302,10 +291,10 @@ exact value assigned to it. With these two different declaration methods, there are two ways that information about named addresses can flow in the package graph: -- The former ("unassigned named addresses") allows named address values to flow from the importation - site to the declaration site. -- The latter ("assigned named addresses") allows named address values to flow from the declaration - site upwards in the package graph to usage sites. +- The former ("unassigned named addresses") allows named address values to flow from the + importation site to the declaration site. +- The latter ("assigned named addresses") allows named address values to flow from the declaration + site upwards in the package graph to usage sites. With these two methods for flowing named address information throughout the package graph the rules around scoping and renaming become important to understand. @@ -321,13 +310,13 @@ A named address `N` in a package `P` is in scope if: Additionally, every named address in a package is exported. Because of this and the above scoping rules each package can be viewed as coming with a set of named addresses that will be brought into -scope when the package is imported, e.g., if the `example_pkg` package was imported, that importation -would bring into scope the `named_addr` named address. Because of this, if `P` imports two packages -`P1` and `P2` both of which declare a named address `N` an issue arises in `P`: which "`N`" is meant -when `N` is referred to in `P`? The one from `P1` or `P2`? To prevent this ambiguity around which -package a named address is coming from, we enforce that the sets of scopes introduced by all -dependencies in a package are disjoint, and provide a way to _rename named addresses_ when the -package that brings them into scope is imported. +scope when the package is imported, e.g., if the `example_pkg` package was imported, that +importation would bring into scope the `named_addr` named address. Because of this, if `P` imports +two packages `P1` and `P2` both of which declare a named address `N` an issue arises in `P`: which +"`N`" is meant when `N` is referred to in `P`? The one from `P1` or `P2`? To prevent this ambiguity +around which package a named address is coming from, we enforce that the sets of scopes introduced +by all dependencies in a package are disjoint, and provide a way to _rename named addresses_ when +the package that brings them into scope is imported. Renaming a named address when importing can be done as follows in our `P`, `P1`, and `P2` example above: @@ -363,10 +352,10 @@ instantiated with differing values across the package graph. A Move package can only be compiled if all named addresses resolve to a value. This presents issues if the package wishes to expose an uninstantiated named address. This is what the `[dev-addresses]` -section solves in part. This section can set values for named addresses, but cannot introduce any named -addresses. Additionally, only the `[dev-addresses]` in the root package are included in `dev` mode. -For example a root package with the following manifest would not compile outside of `dev` mode since -`named_addr` would be uninstantiated: +section solves in part. This section can set values for named addresses, but cannot introduce any +named addresses. Additionally, only the `[dev-addresses]` in the root package are included in `dev` +mode. For example a root package with the following manifest would not compile outside of `dev` mode +since `named_addr` would be uninstantiated: ``` [package] @@ -382,16 +371,15 @@ named_addr = "0xC0FFEE" ## Usage and Artifacts The Move package system comes with a command line option as part of the CLI: -`iota move `. Unless a particular path is provided, all -package commands will run in the current working directory. The full list of -commands and flags for the Move CLI can be found by running `iota move --help`. +`iota move `. Unless a particular path is provided, all package commands +will run in the current working directory. The full list of commands and flags for the Move CLI can +be found by running `iota move --help`. ### Artifacts -A package can be compiled using CLI commands. -This will create a `build` -directory containing build-related artifacts (such as bytecode, source maps, and -documentation). The general layout of the `build` directory is as follows: +A package can be compiled using CLI commands. This will create a `build` directory containing +build-related artifacts (such as bytecode, source maps, and documentation). The general layout of +the `build` directory is as follows: ``` a_move_package diff --git a/external-crates/move/documentation/coding_guidelines.md b/external-crates/move/documentation/coding_guidelines.md index c44d4393e49..36ed87bde85 100644 --- a/external-crates/move/documentation/coding_guidelines.md +++ b/external-crates/move/documentation/coding_guidelines.md @@ -211,7 +211,7 @@ It forces us to think of edge-cases, and handle them explicitly. This is a brief and simplified mini guide of the different functions that exist to handle integer arithmetic: - [checked\_](https://doc.rust-lang.org/std/primitive.u32.html#method.checked_add): use this function if you want to handle overflows and underflows as a special edge-case. It returns `None` if an underflow or overflow has happened, and `Some(operation_result)` otherwise. -- [overflowing\_](https://doc.rust-lang.org/std/primitive.u32.html#method.overflowing_add): use this function if you want the result of an overflow to potentially wrap around (e.g. `u64::MAX.overflow_add(10) == (9, true)`). It returns the underflowed or overflowed result as well as a flag indicating if an overflow has occured or not. +- [overflowing\_](https://doc.rust-lang.org/std/primitive.u32.html#method.overflowing_add): use this function if you want the result of an overflow to potentially wrap around (e.g. `u64::MAX.overflow_add(10) == (9, true)`). It returns the underflowed or overflowed result as well as a flag indicating if an overflow has occurred or not. - [wrapping\_](https://doc.rust-lang.org/std/primitive.u32.html#method.wrapping_add): this is similar to overflowing operations, except that it returns the result directly. Use this function if you are sure that you want to handle underflows and overflows by wrapping around. - [saturating\_](https://doc.rust-lang.org/std/primitive.u32.html#method.saturating_add): if an overflow occurs, the result is kept within the boundary of the type (e.g. `u64::MAX.saturating_add(1) == u64::MAX`). diff --git a/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/absint.rs b/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/absint.rs index f0266f98d81..2c51d23b7b0 100644 --- a/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/absint.rs +++ b/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/absint.rs @@ -52,7 +52,7 @@ pub trait TransferFunctions { /// Should return an AnalysisError if executing the instruction is /// unsuccessful, and () if the effects of successfully executing /// local@instr have been reflected by mutatating local@pre. - /// Auxilary data from the analysis that is not part of the abstract state + /// Auxiliary data from the analysis that is not part of the abstract state /// can be collected by mutating local@self. /// The last instruction index in the current block is local@last_index. /// Knowing this information allows clients to detect the end of a basic diff --git a/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/limits.rs b/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/limits.rs index 6e69044e740..3f0946e4fdd 100644 --- a/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/limits.rs +++ b/external-crates/move/move-execution/v0/crates/move-bytecode-verifier/src/limits.rs @@ -206,11 +206,11 @@ impl<'a> LimitsVerifier<'a> { Ok(()) } - /// Verifies the lengths of all identifers are valid + /// Verifies the lengths of all identifiers are valid fn verify_identifiers(&self, config: &VerifierConfig) -> PartialVMResult<()> { - if let Some(max_idenfitier_len) = config.max_idenfitier_len { + if let Some(max_identifier_len) = config.max_identifier_len { for (idx, identifier) in self.resolver.identifiers().iter().enumerate() { - if identifier.len() > (max_idenfitier_len as usize) { + if identifier.len() > (max_identifier_len as usize) { return Err(verification_error( StatusCode::IDENTIFIER_TOO_LONG, IndexKind::Identifier, diff --git a/external-crates/move/move-execution/v0/crates/move-stdlib/sources/signer.move b/external-crates/move/move-execution/v0/crates/move-stdlib/sources/signer.move index 55376dd3fff..d41fd3a7c87 100644 --- a/external-crates/move/move-execution/v0/crates/move-stdlib/sources/signer.move +++ b/external-crates/move/move-execution/v0/crates/move-stdlib/sources/signer.move @@ -1,6 +1,6 @@ module std::signer { // Borrows the address of the signer - // Conceptually, you can think of the `signer` as being a struct wrapper arround an + // Conceptually, you can think of the `signer` as being a struct wrapper around an // address // ``` // struct signer has drop { addr: address } diff --git a/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/loader.rs b/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/loader.rs index 9ad91629014..1f430be1753 100644 --- a/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/loader.rs +++ b/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/loader.rs @@ -564,7 +564,7 @@ impl ModuleCache { // Helpers to load/verify modules without recursion -// In order to traverse the transitive dependencies of a module (when verifing +// In order to traverse the transitive dependencies of a module (when verifying // the module), we create a stack and iterate over the dependencies to avoid // recursion. An entry on the stack is conceptually a pair (module, // dependencies) where dependencies is used to visit them and to track when a diff --git a/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/move_vm.rs b/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/move_vm.rs index 1b5ddebb8e6..eea4e360053 100644 --- a/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/move_vm.rs +++ b/external-crates/move/move-execution/v0/crates/move-vm-runtime/src/move_vm.rs @@ -58,7 +58,7 @@ impl MoveVM { /// other words, if there is a module publishing Session it must be the /// only Session existing. /// - In general, a new Move VM needs to be created whenever the storage - /// gets modified by an outer envrionment, or otherwise the states may + /// gets modified by an outer environment, or otherwise the states may /// be out of sync. There are a few exceptional cases where this may not /// be necessary, with the most notable one being the common module /// publishing flow: you can keep using the same Move VM if you publish diff --git a/iota-execution/latest/iota-verifier/src/lib.rs b/iota-execution/latest/iota-verifier/src/lib.rs index 35fc98bfafc..e52af6f9a07 100644 --- a/iota-execution/latest/iota-verifier/src/lib.rs +++ b/iota-execution/latest/iota-verifier/src/lib.rs @@ -80,7 +80,7 @@ pub fn default_verifier_config( max_basic_blocks_in_script: None, max_per_fun_meter_units, max_per_mod_meter_units, - max_idenfitier_len: protocol_config.max_move_identifier_len_as_option(), /* Before protocol version 9, there was no limit */ + max_identifier_len: protocol_config.max_move_identifier_len_as_option(), /* Before protocol version 9, there was no limit */ allow_receiving_object_id: protocol_config.allow_receiving_object_id(), reject_mutable_random_on_entry_functions: protocol_config .reject_mutable_random_on_entry_functions(), diff --git a/iota-execution/v0/iota-verifier/src/lib.rs b/iota-execution/v0/iota-verifier/src/lib.rs index 35fc98bfafc..877e3b62e21 100644 --- a/iota-execution/v0/iota-verifier/src/lib.rs +++ b/iota-execution/v0/iota-verifier/src/lib.rs @@ -80,7 +80,7 @@ pub fn default_verifier_config( max_basic_blocks_in_script: None, max_per_fun_meter_units, max_per_mod_meter_units, - max_idenfitier_len: protocol_config.max_move_identifier_len_as_option(), /* Before protocol version 9, there was no limit */ + max_identifier_len: protocol_config.max_move_identifier_len_as_option(), allow_receiving_object_id: protocol_config.allow_receiving_object_id(), reject_mutable_random_on_entry_functions: protocol_config .reject_mutable_random_on_entry_functions(), diff --git a/narwhal/config/src/committee.rs b/narwhal/config/src/committee.rs index dd251e7358b..c07a9e86167 100644 --- a/narwhal/config/src/committee.rs +++ b/narwhal/config/src/committee.rs @@ -48,7 +48,7 @@ impl Authority { /// CommitteeBuilder). As some internal properties of Authority are /// initialised via the Committee, to ensure that the user will not /// accidentally use stale Authority data, should always derive them via - /// the Commitee. + /// the Committee. fn new( protocol_key: PublicKey, stake: Stake, diff --git a/sdk/kiosk/src/client/tp-transaction.ts b/sdk/kiosk/src/client/tp-transaction.ts index 0c4971fdcc2..85189134b4e 100644 --- a/sdk/kiosk/src/client/tp-transaction.ts +++ b/sdk/kiosk/src/client/tp-transaction.ts @@ -48,7 +48,7 @@ export class TransferPolicyTransaction { /** * A function to create a new transfer policy. * Checks if there's already an existing transfer policy to prevent - * double transfer polciy mistakes. + * double transfer policy mistakes. * There's an optional `skipCheck` flag that will just create the policy * without checking *