From c8b16d9aa3212f6ee1692ed3f9e1e8a5cef57cd4 Mon Sep 17 00:00:00 2001 From: Pana Date: Mon, 23 Sep 2024 18:11:54 +0800 Subject: [PATCH 01/31] add new async eth rpc fake impl --- crates/cfxcore/core/src/errors.rs | 2 +- crates/rpc/rpc/src/eth.rs | 362 ++++++++++++++++++++++++++++++ crates/rpc/rpc/src/lib.rs | 1 + 3 files changed, 364 insertions(+), 1 deletion(-) create mode 100644 crates/rpc/rpc/src/eth.rs diff --git a/crates/cfxcore/core/src/errors.rs b/crates/cfxcore/core/src/errors.rs index c855bbc15..6d8a92c22 100644 --- a/crates/cfxcore/core/src/errors.rs +++ b/crates/cfxcore/core/src/errors.rs @@ -3,6 +3,7 @@ // See http://www.gnu.org/licenses/ use crate::light_protocol::Error as LightProtocolError; use cfx_rpc_eth_types::Error as EthRpcError; +pub use cfx_rpc_utils::error::error_codes::EXCEPTION_ERROR; use cfx_statedb::Error as StateDbError; use cfx_storage::Error as StorageError; use jsonrpc_core::{futures::future, Error as JsonRpcError, ErrorCode}; @@ -12,7 +13,6 @@ use rlp::DecoderError; use serde_json::Value; use std::fmt::{Debug, Display}; use thiserror::Error; -pub use cfx_rpc_utils::error::error_codes::EXCEPTION_ERROR; #[derive(Debug, Error)] pub enum Error { diff --git a/crates/rpc/rpc/src/eth.rs b/crates/rpc/rpc/src/eth.rs new file mode 100644 index 000000000..cb2fbe9a1 --- /dev/null +++ b/crates/rpc/rpc/src/eth.rs @@ -0,0 +1,362 @@ +use async_trait::async_trait; +use cfx_rpc_eth_api::EthApiServer; +use cfx_rpc_eth_types::{ + Block, BlockNumber as BlockId, FeeHistory, Header, Receipt, SyncStatus, + Transaction, TransactionRequest, +}; +use cfx_rpc_primitives::{Bytes, Index}; +use cfx_types::{Address, H256, H64, U256, U64}; +use jsonrpsee::core::RpcResult; + +type BlockNumberOrTag = BlockId; + +type JsonStorageKey = U256; + +pub struct EthApi; + +#[async_trait] +impl EthApiServer for EthApi { + /// Returns the protocol version encoded as a string. + async fn protocol_version(&self) -> RpcResult { todo!() } + + /// Returns an object with data about the sync status or false. + fn syncing(&self) -> RpcResult { todo!() } + + /// Returns the client coinbase address. + async fn author(&self) -> RpcResult
{ todo!() } + + /// Returns a list of addresses owned by client. + fn accounts(&self) -> RpcResult> { todo!() } + + /// Returns the number of most recent block. + fn block_number(&self) -> RpcResult { todo!() } + + /// Returns the chain ID of the current network. + async fn chain_id(&self) -> RpcResult> { todo!() } + + /// Returns information about a block by hash. + async fn block_by_hash( + &self, hash: H256, full: bool, + ) -> RpcResult> { + todo!() + } + + /// Returns information about a block by number. + async fn block_by_number( + &self, number: BlockNumberOrTag, full: bool, + ) -> RpcResult> { + todo!() + } + + /// Returns the number of transactions in a block from a block matching the + /// given block hash. + async fn block_transaction_count_by_hash( + &self, hash: H256, + ) -> RpcResult> { + todo!() + } + + /// Returns the number of transactions in a block matching the given block + /// number. + async fn block_transaction_count_by_number( + &self, number: BlockNumberOrTag, + ) -> RpcResult> { + todo!() + } + + /// Returns the number of uncles in a block from a block matching the given + /// block hash. + async fn block_uncles_count_by_hash( + &self, hash: H256, + ) -> RpcResult> { + todo!() + } + + /// Returns the number of uncles in a block with given block number. + async fn block_uncles_count_by_number( + &self, number: BlockNumberOrTag, + ) -> RpcResult> { + todo!() + } + + /// Returns all transaction receipts for a given block. + async fn block_receipts( + &self, block_id: BlockId, + ) -> RpcResult>> { + todo!() + } + + /// Returns an uncle block of the given block and index. + async fn uncle_by_block_hash_and_index( + &self, hash: H256, index: Index, + ) -> RpcResult> { + todo!() + } + + /// Returns an uncle block of the given block and index. + async fn uncle_by_block_number_and_index( + &self, number: BlockNumberOrTag, index: Index, + ) -> RpcResult> { + todo!() + } + + /// Returns the EIP-2718 encoded transaction if it exists. + /// + /// If this is a EIP-4844 transaction that is in the pool it will include + /// the sidecar. + async fn raw_transaction_by_hash( + &self, hash: H256, + ) -> RpcResult> { + todo!() + } + + /// Returns the information about a transaction requested by transaction + /// hash. + async fn transaction_by_hash( + &self, hash: H256, + ) -> RpcResult> { + todo!() + } + + /// Returns information about a raw transaction by block hash and + /// transaction index position. + async fn raw_transaction_by_block_hash_and_index( + &self, hash: H256, index: Index, + ) -> RpcResult> { + todo!() + } + + /// Returns information about a transaction by block hash and transaction + /// index position. + async fn transaction_by_block_hash_and_index( + &self, hash: H256, index: Index, + ) -> RpcResult> { + todo!() + } + + /// Returns information about a raw transaction by block number and + /// transaction index position. + async fn raw_transaction_by_block_number_and_index( + &self, number: BlockNumberOrTag, index: Index, + ) -> RpcResult> { + todo!() + } + + /// Returns information about a transaction by block number and transaction + /// index position. + async fn transaction_by_block_number_and_index( + &self, number: BlockNumberOrTag, index: Index, + ) -> RpcResult> { + todo!() + } + + /// Returns information about a transaction by sender and nonce. + async fn transaction_by_sender_and_nonce( + &self, address: Address, nonce: U64, + ) -> RpcResult> { + todo!() + } + + /// Returns the receipt of a transaction by transaction hash. + async fn transaction_receipt( + &self, hash: H256, + ) -> RpcResult> { + todo!() + } + + /// Returns the balance of the account of given address. + async fn balance( + &self, address: Address, block_number: Option, + ) -> RpcResult { + todo!() + } + + /// Returns the value from a storage position at a given address + async fn storage_at( + &self, address: Address, index: JsonStorageKey, + block_number: Option, + ) -> RpcResult { + todo!() + } + + /// Returns the number of transactions sent from an address at given block + /// number. + async fn transaction_count( + &self, address: Address, block_number: Option, + ) -> RpcResult { + todo!() + } + + /// Returns code at a given address at given block number. + async fn get_code( + &self, address: Address, block_number: Option, + ) -> RpcResult { + todo!() + } + + /// Returns the block's header at given number. + async fn header_by_number( + &self, hash: BlockNumberOrTag, + ) -> RpcResult> { + todo!() + } + + /// Returns the block's header at given hash. + async fn header_by_hash(&self, hash: H256) -> RpcResult> { + todo!() + } + + /// `eth_simulateV1` executes an arbitrary number of transactions on top of + /// the requested state. The transactions are packed into individual + /// blocks. Overrides can be provided. + // async fn simulate_v1( + // &self, + // opts: SimBlock, + // block_number: Option, + // ) -> RpcResult>; + + /// Executes a new message call immediately without creating a transaction + /// on the block chain. + async fn call( + &self, + request: TransactionRequest, + block_number: Option, + // state_overrides: Option, + // block_overrides: Option>, + ) -> RpcResult { + todo!() + } + + /// Simulate arbitrary number of transactions at an arbitrary blockchain + /// index, with the optionality of state overrides + // async fn call_many( + // &self, + // bundle: Bundle, + // state_context: Option, + // state_override: Option, + // ) -> RpcResult>; + + /// Generates an access list for a transaction. + /// + /// This method creates an [EIP2930](https://eips.ethereum.org/EIPS/eip-2930) type accessList based on a given Transaction. + /// + /// An access list contains all storage slots and addresses touched by the + /// transaction, except for the sender account and the chain's + /// precompiles. + /// + /// It returns list of addresses and storage keys used by the transaction, + /// plus the gas consumed when the access list is added. That is, it + /// gives you the list of addresses and storage keys that will be used + /// by that transaction, plus the gas consumed if the access + /// list is included. Like eth_estimateGas, this is an estimation; the list + /// could change when the transaction is actually mined. Adding an + /// accessList to your transaction does not necessary result in lower + /// gas usage compared to a transaction without an access list. + // async fn create_access_list( + // &self, + // request: TransactionRequest, + // block_number: Option, + // ) -> RpcResult; + + /// Generates and returns an estimate of how much gas is necessary to allow + /// the transaction to complete. + async fn estimate_gas( + &self, + request: TransactionRequest, + block_number: Option, + // state_override: Option, + ) -> RpcResult { + todo!() + } + + /// Returns the current price per gas in wei. + async fn gas_price(&self) -> RpcResult { todo!() } + + /// Returns the account details by specifying an address and a block + /// number/tag + // async fn get_account( + // &self, + // address: Address, + // block: BlockId, + // ) -> RpcResult>; + + /// Introduced in EIP-1559, returns suggestion for the priority for dynamic + /// fee transactions. + async fn max_priority_fee_per_gas(&self) -> RpcResult { todo!() } + + /// Introduced in EIP-4844, returns the current blob base fee in wei. + // async fn blob_base_fee(&self) -> RpcResult; + + /// Returns the Transaction fee history + /// + /// Introduced in EIP-1559 for getting information on the appropriate + /// priority fee to use. + /// + /// Returns transaction base fee per gas and effective priority fee per gas + /// for the requested/supported block range. The returned Fee history + /// for the returned block range can be a subsection of the requested + /// range if not all blocks are available. + async fn fee_history( + &self, block_count: U64, newest_block: BlockNumberOrTag, + reward_percentiles: Option>, + ) -> RpcResult { + todo!() + } + + /// Returns whether the client is actively mining new blocks. + async fn is_mining(&self) -> RpcResult { todo!() } + + /// Returns the number of hashes per second that the node is mining with. + async fn hashrate(&self) -> RpcResult { todo!() } + + /// Returns the hash of the current block, the seedHash, and the boundary + /// condition to be met (“target”) + // async fn get_work(&self) -> RpcResult; + + /// Used for submitting mining hashrate. + /// + /// Can be used for remote miners to submit their hash rate. + /// It accepts the miner hash rate and an identifier which must be unique + /// between nodes. Returns `true` if the block was successfully + /// submitted, `false` otherwise. + async fn submit_hashrate( + &self, hashrate: U256, id: H256, + ) -> RpcResult { + todo!() + } + + /// Used for submitting a proof-of-work solution. + async fn submit_work( + &self, nonce: H64, pow_hash: H256, mix_digest: H256, + ) -> RpcResult { + todo!() + } + + /// Sends transaction; will block waiting for signer to return the + /// transaction hash. + async fn send_transaction( + &self, request: TransactionRequest, + ) -> RpcResult { + todo!() + } + + /// Sends signed transaction, returning its hash. + async fn send_raw_transaction(&self, bytes: Bytes) -> RpcResult { + todo!() + } + + /// Returns an Ethereum specific signature with: + /// sign(keccak256("\x19Ethereum Signed Message:\n" + /// + len(message) + message))). + async fn sign(&self, address: Address, message: Bytes) -> RpcResult { + todo!() + } + + /// Signs a transaction that can be submitted to the network at a later time + /// using with `sendRawTransaction.` + async fn sign_transaction( + &self, transaction: TransactionRequest, + ) -> RpcResult { + todo!() + } +} diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index d01ab68ca..cfd2257bc 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -1,4 +1,5 @@ mod debug; +mod eth; mod net; mod rpc; mod web3; From c35125e7e49ba7948818cfe5c20bf44c5651eb59 Mon Sep 17 00:00:00 2001 From: Pana Date: Tue, 24 Sep 2024 10:30:19 +0800 Subject: [PATCH 02/31] move type RpcImplConfiguration to cfx_rpc_cfx_types --- crates/client/src/rpc/impls.rs | 23 +------------------ .../client/src/rpc/impls/eth/eth_handler.rs | 16 ++++++------- crates/rpc/rpc-cfx-types/src/lib.rs | 2 ++ .../src/rpc_impl_configuration.rs | 21 +++++++++++++++++ 4 files changed, 32 insertions(+), 30 deletions(-) create mode 100644 crates/rpc/rpc-cfx-types/src/rpc_impl_configuration.rs diff --git a/crates/client/src/rpc/impls.rs b/crates/client/src/rpc/impls.rs index 5559a9c43..732e95123 100644 --- a/crates/client/src/rpc/impls.rs +++ b/crates/client/src/rpc/impls.rs @@ -2,31 +2,10 @@ // Conflux is free software and distributed under GNU General Public License. // See http://www.gnu.org/licenses/ -#[derive(Clone, Default)] -pub struct RpcImplConfiguration { - pub get_logs_filter_max_limit: Option, - /// If it's `true`, `DEFERRED_STATE_EPOCH_COUNT` blocks are generated after - /// receiving a new tx through RPC calling to pack and execute this - /// transaction. - pub dev_pack_tx_immediately: bool, - - // maximum response payload size allowed - // note: currently we only handle this for `cfx_getEpochReceipts`, - // other APIs will disconnect on oversized response - pub max_payload_bytes: usize, - - pub max_estimation_gas_limit: Option, - - pub enable_metrics: bool, - - pub poll_lifetime_in_seconds: Option, -} - pub mod cfx; pub mod eth; pub mod pos; pub use cfx::{cfx_filter, common, light, pool, pubsub}; -pub use cfx_rpc_cfx_types::FeeHistoryCacheEntry; -use cfx_types::U256; +pub use cfx_rpc_cfx_types::{FeeHistoryCacheEntry, RpcImplConfiguration}; pub use eth::{debug, eth_filter, eth_handler::EthHandler, eth_pubsub}; diff --git a/crates/client/src/rpc/impls/eth/eth_handler.rs b/crates/client/src/rpc/impls/eth/eth_handler.rs index 372384acf..7de0b7009 100644 --- a/crates/client/src/rpc/impls/eth/eth_handler.rs +++ b/crates/client/src/rpc/impls/eth/eth_handler.rs @@ -33,7 +33,7 @@ use cfx_types::{ }; use cfx_vm_types::Error as VmError; use cfxcore::{ - errors::{Error as CfxRpcError, Result as CfxRpcResult}, + errors::{Error as CoreError, Result as CoreResult}, ConsensusGraph, ConsensusGraphTrait, SharedConsensusGraph, SharedSynchronizationService, SharedTransactionPool, }; @@ -107,7 +107,7 @@ impl EthHandler { fn exec_transaction( &self, mut request: TransactionRequest, block_number_or_hash: Option, - ) -> CfxRpcResult<(Executed, U256)> { + ) -> CoreResult<(Executed, U256)> { let consensus_graph = self.consensus_graph(); if request.gas_price.is_some() @@ -234,7 +234,7 @@ impl EthHandler { fn send_transaction_with_signature( &self, tx: TransactionWithSignature, - ) -> CfxRpcResult { + ) -> CoreResult { if self.sync.catch_up_mode() { warn!("Ignore send_transaction request {}. Cannot send transaction when the node is still in catch-up mode.", tx.hash()); bail!(request_rejected_in_catch_up_mode(None)); @@ -612,7 +612,7 @@ impl Eth for EthHandler { .get_eth_state_db_by_epoch_number(epoch_num, "num")?; let acc = state_db .get_account(&address.with_evm_space()) - .map_err(|err| CfxRpcError::from(err))?; + .map_err(|err| CoreError::from(err))?; Ok(acc.map_or(U256::zero(), |acc| acc.balance).into()) } @@ -638,7 +638,7 @@ impl Eth for EthHandler { Ok( match state_db .get::(key) - .map_err(|err| CfxRpcError::from(err))? + .map_err(|err| CoreError::from(err))? { Some(entry) => H256::from_uint(&entry.value).into(), None => H256::zero(), @@ -829,11 +829,11 @@ impl Eth for EthHandler { let code = match state_db .get_account(&address) - .map_err(|err| CfxRpcError::from(err))? + .map_err(|err| CoreError::from(err))? { Some(acc) => match state_db .get_code(&address, &acc.code_hash) - .map_err(|err| CfxRpcError::from(err))? + .map_err(|err| CoreError::from(err))? { Some(code) => (*code.code).clone(), _ => vec![], @@ -1195,7 +1195,7 @@ impl Eth for EthHandler { let logs = self .consensus_graph() .logs(filter) - .map_err(|err| CfxRpcError::from(err))?; + .map_err(|err| CoreError::from(err))?; // If the results does not fit into `max_limit`, report an error if let Some(max_limit) = self.config.get_logs_filter_max_limit { diff --git a/crates/rpc/rpc-cfx-types/src/lib.rs b/crates/rpc/rpc-cfx-types/src/lib.rs index 56a3fee60..4b8fb4171 100644 --- a/crates/rpc/rpc-cfx-types/src/lib.rs +++ b/crates/rpc/rpc-cfx-types/src/lib.rs @@ -7,6 +7,7 @@ pub mod trace; pub mod trace_filter; pub mod traits; mod transaction_status; +mod rpc_impl_configuration; pub use address::RpcAddress; pub use epoch_number::EpochNumber; @@ -14,3 +15,4 @@ pub use fee_history::CfxFeeHistory; pub use fee_history_cache_entry::FeeHistoryCacheEntry; pub use phantom_block::PhantomBlock; pub use transaction_status::{PendingReason, TransactionStatus}; +pub use rpc_impl_configuration::RpcImplConfiguration; diff --git a/crates/rpc/rpc-cfx-types/src/rpc_impl_configuration.rs b/crates/rpc/rpc-cfx-types/src/rpc_impl_configuration.rs new file mode 100644 index 000000000..18ad94d54 --- /dev/null +++ b/crates/rpc/rpc-cfx-types/src/rpc_impl_configuration.rs @@ -0,0 +1,21 @@ +use cfx_types::U256; + +#[derive(Clone, Default)] +pub struct RpcImplConfiguration { + pub get_logs_filter_max_limit: Option, + /// If it's `true`, `DEFERRED_STATE_EPOCH_COUNT` blocks are generated after + /// receiving a new tx through RPC calling to pack and execute this + /// transaction. + pub dev_pack_tx_immediately: bool, + + // maximum response payload size allowed + // note: currently we only handle this for `cfx_getEpochReceipts`, + // other APIs will disconnect on oversized response + pub max_payload_bytes: usize, + + pub max_estimation_gas_limit: Option, + + pub enable_metrics: bool, + + pub poll_lifetime_in_seconds: Option, +} \ No newline at end of file From 0abfe7a44a55e6ce575ee0cf389d3d2219643a66 Mon Sep 17 00:00:00 2001 From: Pana Date: Tue, 24 Sep 2024 10:36:26 +0800 Subject: [PATCH 03/31] move fee_history_cache to helper folder --- Cargo.lock | 2 ++ crates/client/src/rpc/helpers/mod.rs | 7 +++---- crates/rpc/rpc-cfx-types/src/lib.rs | 4 ++-- crates/rpc/rpc-cfx-types/src/rpc_impl_configuration.rs | 2 +- crates/rpc/rpc/Cargo.toml | 2 ++ .../src/rpc => rpc/rpc/src}/helpers/fee_history_cache.rs | 0 crates/rpc/rpc/src/helpers/mod.rs | 5 +++++ crates/rpc/rpc/src/lib.rs | 1 + 8 files changed, 16 insertions(+), 7 deletions(-) rename crates/{client/src/rpc => rpc/rpc/src}/helpers/fee_history_cache.rs (100%) create mode 100644 crates/rpc/rpc/src/helpers/mod.rs diff --git a/Cargo.lock b/Cargo.lock index 437eee534..00257776d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1318,6 +1318,7 @@ dependencies = [ "alloy-rpc-types", "alloy-rpc-types-trace", "async-trait", + "cfx-rpc-cfx-types", "cfx-rpc-eth-api", "cfx-rpc-eth-types", "cfx-rpc-primitives", @@ -1329,6 +1330,7 @@ dependencies = [ "jsonrpsee", "keccak-hash", "parity-version", + "parking_lot 0.11.2", "primitives", ] diff --git a/crates/client/src/rpc/helpers/mod.rs b/crates/client/src/rpc/helpers/mod.rs index 38b2149cb..5ac428723 100644 --- a/crates/client/src/rpc/helpers/mod.rs +++ b/crates/client/src/rpc/helpers/mod.rs @@ -3,7 +3,6 @@ // See http://www.gnu.org/licenses/ mod epoch_queue; -mod fee_history_cache; mod poll_filter; mod poll_manager; mod subscribers; @@ -14,9 +13,9 @@ pub use self::{ }, poll_manager::PollManager, }; -pub use cfx_rpc_primitives::{maybe_vec_into, VariadicValue}; -pub use epoch_queue::EpochQueue; -pub use fee_history_cache::{ +pub use cfx_rpc::helpers::{ FeeHistoryCache, MAX_FEE_HISTORY_CACHE_BLOCK_COUNT, }; +pub use cfx_rpc_primitives::{maybe_vec_into, VariadicValue}; +pub use epoch_queue::EpochQueue; pub use subscribers::{Id as SubscriberId, Subscribers}; diff --git a/crates/rpc/rpc-cfx-types/src/lib.rs b/crates/rpc/rpc-cfx-types/src/lib.rs index 4b8fb4171..0bba488be 100644 --- a/crates/rpc/rpc-cfx-types/src/lib.rs +++ b/crates/rpc/rpc-cfx-types/src/lib.rs @@ -3,16 +3,16 @@ pub mod epoch_number; mod fee_history; mod fee_history_cache_entry; mod phantom_block; +mod rpc_impl_configuration; pub mod trace; pub mod trace_filter; pub mod traits; mod transaction_status; -mod rpc_impl_configuration; pub use address::RpcAddress; pub use epoch_number::EpochNumber; pub use fee_history::CfxFeeHistory; pub use fee_history_cache_entry::FeeHistoryCacheEntry; pub use phantom_block::PhantomBlock; -pub use transaction_status::{PendingReason, TransactionStatus}; pub use rpc_impl_configuration::RpcImplConfiguration; +pub use transaction_status::{PendingReason, TransactionStatus}; diff --git a/crates/rpc/rpc-cfx-types/src/rpc_impl_configuration.rs b/crates/rpc/rpc-cfx-types/src/rpc_impl_configuration.rs index 18ad94d54..289911fad 100644 --- a/crates/rpc/rpc-cfx-types/src/rpc_impl_configuration.rs +++ b/crates/rpc/rpc-cfx-types/src/rpc_impl_configuration.rs @@ -18,4 +18,4 @@ pub struct RpcImplConfiguration { pub enable_metrics: bool, pub poll_lifetime_in_seconds: Option, -} \ No newline at end of file +} diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 56e564cf0..54089cdd3 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -28,3 +28,5 @@ cfxcore = { workspace = true } primitives = { workspace = true } geth-tracer = { workspace = true } cfx-rpc-utils = { workspace = true } +parking_lot = { workspace = true } +cfx-rpc-cfx-types = { workspace = true } diff --git a/crates/client/src/rpc/helpers/fee_history_cache.rs b/crates/rpc/rpc/src/helpers/fee_history_cache.rs similarity index 100% rename from crates/client/src/rpc/helpers/fee_history_cache.rs rename to crates/rpc/rpc/src/helpers/fee_history_cache.rs diff --git a/crates/rpc/rpc/src/helpers/mod.rs b/crates/rpc/rpc/src/helpers/mod.rs new file mode 100644 index 000000000..e043055aa --- /dev/null +++ b/crates/rpc/rpc/src/helpers/mod.rs @@ -0,0 +1,5 @@ +mod fee_history_cache; + +pub use fee_history_cache::{ + FeeHistoryCache, MAX_FEE_HISTORY_CACHE_BLOCK_COUNT, +}; diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index cfd2257bc..e6bd68973 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -1,5 +1,6 @@ mod debug; mod eth; +pub mod helpers; mod net; mod rpc; mod web3; From fcff1cb3e7847d4c17c0dc1314b5a6da93f136e8 Mon Sep 17 00:00:00 2001 From: Pana Date: Tue, 24 Sep 2024 15:23:14 +0800 Subject: [PATCH 04/31] impl async eth traits --- Cargo.lock | 8 + crates/client/src/rpc/helpers/mod.rs | 4 +- .../client/src/rpc/impls/eth/eth_handler.rs | 989 ++------------ crates/rpc/rpc-utils/src/error/errors.rs | 8 + crates/rpc/rpc/Cargo.toml | 8 + crates/rpc/rpc/src/eth.rs | 1134 ++++++++++++++++- crates/rpc/rpc/src/lib.rs | 1 + 7 files changed, 1196 insertions(+), 956 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 00257776d..b0e75f4db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1318,20 +1318,28 @@ dependencies = [ "alloy-rpc-types", "alloy-rpc-types-trace", "async-trait", + "cfx-execute-helper", + "cfx-executor", + "cfx-parameters", "cfx-rpc-cfx-types", "cfx-rpc-eth-api", "cfx-rpc-eth-types", "cfx-rpc-primitives", "cfx-rpc-utils", + "cfx-statedb", "cfx-types", + "cfx-vm-types", "cfxcore", "clap", + "error-chain", "geth-tracer", + "jsonrpc-core", "jsonrpsee", "keccak-hash", "parity-version", "parking_lot 0.11.2", "primitives", + "rustc-hex", ] [[package]] diff --git a/crates/client/src/rpc/helpers/mod.rs b/crates/client/src/rpc/helpers/mod.rs index 5ac428723..47756a3cd 100644 --- a/crates/client/src/rpc/helpers/mod.rs +++ b/crates/client/src/rpc/helpers/mod.rs @@ -13,9 +13,7 @@ pub use self::{ }, poll_manager::PollManager, }; -pub use cfx_rpc::helpers::{ - FeeHistoryCache, MAX_FEE_HISTORY_CACHE_BLOCK_COUNT, -}; +pub use cfx_rpc::helpers::MAX_FEE_HISTORY_CACHE_BLOCK_COUNT; pub use cfx_rpc_primitives::{maybe_vec_into, VariadicValue}; pub use epoch_queue::EpochQueue; pub use subscribers::{Id as SubscriberId, Subscribers}; diff --git a/crates/client/src/rpc/impls/eth/eth_handler.rs b/crates/client/src/rpc/impls/eth/eth_handler.rs index 7de0b7009..9154eb5e5 100644 --- a/crates/client/src/rpc/impls/eth/eth_handler.rs +++ b/crates/client/src/rpc/impls/eth/eth_handler.rs @@ -3,56 +3,29 @@ // See http://www.gnu.org/licenses/ use crate::rpc::{ - errors::{ - geth_call_execution_error, internal_error, invalid_input_rpc_err, - invalid_params, request_rejected_in_catch_up_mode, unknown_block, - EthApiError, RpcInvalidTransactionError, RpcPoolError, - }, - helpers::{FeeHistoryCache, MAX_FEE_HISTORY_CACHE_BLOCK_COUNT}, + errors::{internal_error, EthApiError}, impls::RpcImplConfiguration, traits::eth_space::eth::Eth, types::{ eth::{ AccountPendingTransactions, Block as RpcBlock, BlockNumber, - EthRpcLogFilter, Log, Receipt, SyncInfo, SyncStatus, Transaction, + EthRpcLogFilter, Log, Receipt, SyncStatus, Transaction, TransactionRequest, }, Bytes, FeeHistory, Index, U64 as HexU64, }, }; -use cfx_execute_helper::estimation::EstimateRequest; -use cfx_executor::executive::{ - string_revert_reason_decode, Executed, ExecutionError, ExecutionOutcome, - TxDropError, -}; -use cfx_parameters::rpc::GAS_PRICE_DEFAULT_VALUE; -use cfx_rpc_cfx_types::{traits::BlockProvider, PhantomBlock}; -use cfx_statedb::StateDbExt; -use cfx_types::{ - Address, AddressSpaceUtil, BigEndianHash, Space, H160, H256, U256, U64, -}; -use cfx_vm_types::Error as VmError; +use cfx_rpc::EthApi; +use cfx_types::{Address, AddressSpaceUtil, Space, H160, H256, U256, U64}; use cfxcore::{ - errors::{Error as CoreError, Result as CoreResult}, - ConsensusGraph, ConsensusGraphTrait, SharedConsensusGraph, - SharedSynchronizationService, SharedTransactionPool, + SharedConsensusGraph, SharedSynchronizationService, SharedTransactionPool, }; use clap::crate_version; -use jsonrpc_core::{Error as RpcError, Result as RpcResult}; -use primitives::{ - filter::LogFilter, receipt::EVM_SPACE_SUCCESS, Action, - BlockHashOrEpochNumber, EpochNumber, StorageKey, StorageValue, - TransactionStatus, TransactionWithSignature, -}; -use rustc_hex::ToHex; -use std::convert::TryInto; +use jsonrpc_core::Result as RpcResult; +use primitives::TransactionWithSignature; pub struct EthHandler { - config: RpcImplConfiguration, - consensus: SharedConsensusGraph, - sync: SharedSynchronizationService, - tx_pool: SharedTransactionPool, - fee_history_cache: FeeHistoryCache, + inner: EthApi, } impl EthHandler { @@ -61,424 +34,9 @@ impl EthHandler { sync: SharedSynchronizationService, tx_pool: SharedTransactionPool, ) -> Self { EthHandler { - config, - consensus, - sync, - tx_pool, - fee_history_cache: FeeHistoryCache::new(), - } - } - - fn consensus_graph(&self) -> &ConsensusGraph { - self.consensus - .as_any() - .downcast_ref::() - .expect("downcast should succeed") - } - - pub fn fetch_block_by_height( - &self, height: u64, - ) -> Result { - let maybe_block = self.consensus_graph().get_phantom_block_by_number( - EpochNumber::Number(height), - None, - false, - )?; - if let Some(block) = maybe_block { - Ok(block) - } else { - Err("Specified block header does not exist".into()) - } - } - - pub fn fetch_block_by_hash( - &self, hash: &H256, - ) -> Result { - let maybe_block = self - .consensus_graph() - .get_phantom_block_by_hash(hash, false)?; - if let Some(block) = maybe_block { - Ok(block) - } else { - Err("Specified block header does not exist".into()) + inner: EthApi::new(config, consensus, sync, tx_pool), } } - - fn exec_transaction( - &self, mut request: TransactionRequest, - block_number_or_hash: Option, - ) -> CoreResult<(Executed, U256)> { - let consensus_graph = self.consensus_graph(); - - if request.gas_price.is_some() - && request.max_priority_fee_per_gas.is_some() - { - return Err(RpcError::from( - EthApiError::ConflictingFeeFieldsInRequest, - ) - .into()); - } - - if request.max_fee_per_gas.is_some() - && request.max_priority_fee_per_gas.is_some() - { - if request.max_fee_per_gas.unwrap() - < request.max_priority_fee_per_gas.unwrap() - { - return Err(RpcError::from( - RpcInvalidTransactionError::TipAboveFeeCap, - ) - .into()); - } - } - - let epoch = match block_number_or_hash.unwrap_or_default() { - BlockNumber::Hash { hash, .. } => { - match consensus_graph.get_block_epoch_number(&hash) { - Some(e) => { - // do not expose non-pivot blocks in eth RPC - let pivot = consensus_graph - .get_block_hashes_by_epoch(EpochNumber::Number(e))? - .last() - .cloned(); - - if Some(hash) != pivot { - bail!("Block {:?} not found", hash); - } - - EpochNumber::Number(e) - } - None => bail!("Block {:?} not found", hash), - } - } - epoch => epoch.try_into()?, - }; - - // if gas_price is zero, it is considered as not set - request.unset_zero_gas_price(); - - let estimate_request = EstimateRequest { - has_sender: request.from.is_some(), - has_gas_limit: request.gas.is_some(), - has_gas_price: request.has_gas_price(), - has_nonce: request.nonce.is_some(), - has_storage_limit: false, - }; - - let chain_id = self.consensus.best_chain_id(); - - let max_gas = self.config.max_estimation_gas_limit; - let signed_tx = request.sign_call(chain_id.in_evm_space(), max_gas)?; - - trace!("call tx {:?}, request {:?}", signed_tx, estimate_request); - let (execution_outcome, estimation) = consensus_graph.call_virtual( - &signed_tx, - epoch, - estimate_request, - )?; - - let executed = match execution_outcome { - ExecutionOutcome::NotExecutedDrop(TxDropError::OldNonce( - expected, - got, - )) => bail!(invalid_input_rpc_err( - format! {"nonce is too old expected {:?} got {:?}", expected, got} - )), - ExecutionOutcome::NotExecutedDrop( - TxDropError::InvalidRecipientAddress(recipient), - ) => bail!(invalid_input_rpc_err( - format! {"invalid recipient address {:?}", recipient} - )), - ExecutionOutcome::NotExecutedDrop( - TxDropError::NotEnoughGasLimit { expected, got }, - ) => bail!(invalid_input_rpc_err( - format! {"not enough gas limit with respected to tx size: expected {:?} got {:?}", expected, got} - )), - ExecutionOutcome::NotExecutedToReconsiderPacking(e) => { - bail!(invalid_input_rpc_err(format! {"err: {:?}", e})) - } - ExecutionOutcome::ExecutionErrorBumpNonce( - e @ ExecutionError::NotEnoughCash { .. }, - _executed, - ) => { - bail!(geth_call_execution_error( - format!( - "insufficient funds for gas * price + value: {:?})", - e - ), - "".into() - )) - } - ExecutionOutcome::ExecutionErrorBumpNonce( - ExecutionError::VmError(VmError::Reverted), - executed, - ) => bail!(geth_call_execution_error( - format!( - "execution reverted: revert: {}", - string_revert_reason_decode(&executed.output) - ), - format!("0x{}", executed.output.to_hex::()) - )), - ExecutionOutcome::ExecutionErrorBumpNonce( - ExecutionError::VmError(e), - _executed, - ) => bail!(geth_call_execution_error( - format!("execution reverted: {}", e), - "".into() - )), - ExecutionOutcome::Finished(executed) => executed, - }; - - Ok((executed, estimation.estimated_gas_limit)) - } - - fn send_transaction_with_signature( - &self, tx: TransactionWithSignature, - ) -> CoreResult { - if self.sync.catch_up_mode() { - warn!("Ignore send_transaction request {}. Cannot send transaction when the node is still in catch-up mode.", tx.hash()); - bail!(request_rejected_in_catch_up_mode(None)); - } - let (signed_trans, failed_trans) = - self.tx_pool.insert_new_transactions(vec![tx]); - if signed_trans.len() + failed_trans.len() > 1 { - // This should never happen - error!("insert_new_transactions failed, invalid length of returned result vector {}", signed_trans.len() + failed_trans.len()); - Ok(H256::zero().into()) - } else if signed_trans.len() + failed_trans.len() == 0 { - // For tx in transactions_pubkey_cache, we simply ignore them - debug!("insert_new_transactions ignores inserted transactions"); - bail!(RpcError::from(EthApiError::PoolError( - RpcPoolError::ReplaceUnderpriced - ))); - } else if signed_trans.is_empty() { - let tx_err = failed_trans.into_iter().next().expect("Not empty").1; - bail!(RpcError::from(EthApiError::from(tx_err))) - } else { - let tx_hash = signed_trans[0].hash(); - self.sync.append_received_transactions(signed_trans); - Ok(tx_hash.into()) - } - } - - fn construct_rpc_receipt( - &self, b: &PhantomBlock, idx: usize, prior_log_index: &mut usize, - ) -> RpcResult { - if b.transactions.len() != b.receipts.len() { - return Err(internal_error( - "Inconsistent state: transactions and receipts length mismatch", - )); - } - - if b.transactions.len() != b.errors.len() { - return Err(internal_error( - "Inconsistent state: transactions and errors length mismatch", - )); - } - - if idx >= b.transactions.len() { - return Err(internal_error( - "Inconsistent state: tx index out of bound", - )); - } - - let tx = &b.transactions[idx]; - let receipt = &b.receipts[idx]; - - if receipt.logs.iter().any(|l| l.space != Space::Ethereum) { - return Err(internal_error( - "Inconsistent state: native tx in phantom block", - )); - } - - let contract_address = match receipt.outcome_status { - TransactionStatus::Success => { - Transaction::deployed_contract_address(tx) - } - _ => None, - }; - - let transaction_hash = tx.hash(); - let transaction_index: U256 = idx.into(); - let block_hash = b.pivot_header.hash(); - let block_height: U256 = b.pivot_header.height().into(); - - let logs: Vec<_> = receipt - .logs - .iter() - .cloned() - .enumerate() - .map(|(idx, log)| Log { - address: log.address, - topics: log.topics, - data: Bytes(log.data), - block_hash, - block_number: block_height, - transaction_hash, - transaction_index, - log_index: Some((*prior_log_index + idx).into()), - transaction_log_index: Some(idx.into()), - removed: false, - }) - .collect(); - - *prior_log_index += logs.len(); - - let gas_used = match idx { - 0 => receipt.accumulated_gas_used, - idx => { - receipt.accumulated_gas_used - - b.receipts[idx - 1].accumulated_gas_used - } - }; - - let tx_exec_error_msg = if b.errors[idx].is_empty() { - None - } else { - Some(b.errors[idx].clone()) - }; - - let effective_gas_price = - if let Some(base_price) = b.pivot_header.base_price() { - let base_price = base_price[tx.space()]; - if *tx.gas_price() < base_price { - *tx.gas_price() - } else { - tx.effective_gas_price(&base_price) - } - } else { - *tx.gas_price() - }; - - Ok(Receipt { - transaction_hash, - transaction_index, - block_hash, - from: tx.sender().address, - to: match tx.action() { - Action::Create => None, - Action::Call(addr) => Some(*addr), - }, - block_number: block_height, - cumulative_gas_used: receipt.accumulated_gas_used, - gas_used, - contract_address, - logs, - logs_bloom: receipt.log_bloom, - status_code: receipt - .outcome_status - .in_space(Space::Ethereum) - .into(), - effective_gas_price, - tx_exec_error_msg, - transaction_type: receipt - .burnt_gas_fee - .is_some() - .then_some(U64::from(tx.type_id())), - burnt_gas_fee: receipt.burnt_gas_fee, - }) - } - - fn get_tx_from_txpool(&self, hash: H256) -> Option { - let tx = self.tx_pool.get_transaction(&hash)?; - - if tx.space() == Space::Ethereum { - Some(Transaction::from_signed( - &tx, - (None, None, None), - (None, None), - )) - } else { - None - } - } - - fn get_block_receipts( - &self, block_num: BlockNumber, - ) -> RpcResult> { - let b = { - // keep read lock to ensure consistent view - let _inner = self.consensus_graph().inner.read(); - - let phantom_block = match block_num { - BlockNumber::Hash { hash, .. } => self - .consensus_graph() - .get_phantom_block_by_hash( - &hash, false, /* include_traces */ - ) - .map_err(RpcError::invalid_params)?, - _ => self - .consensus_graph() - .get_phantom_block_by_number( - block_num.try_into()?, - None, - false, /* include_traces */ - ) - .map_err(RpcError::invalid_params)?, - }; - - match phantom_block { - None => return Err(unknown_block()), - Some(b) => b, - } - }; - - let mut block_receipts = vec![]; - let mut prior_log_index = 0; - - for idx in 0..b.receipts.len() { - block_receipts.push(self.construct_rpc_receipt( - &b, - idx, - &mut prior_log_index, - )?); - } - - return Ok(block_receipts); - } - - fn block_tx_by_index( - phantom_block: Option, idx: usize, - ) -> Option { - match phantom_block { - None => None, - Some(pb) => match pb.transactions.get(idx) { - None => None, - Some(tx) => { - let block_number = Some(pb.pivot_header.height().into()); - let receipt = pb.receipts.get(idx).unwrap(); - let status = - receipt.outcome_status.in_space(Space::Ethereum); - let contract_address = match status == EVM_SPACE_SUCCESS { - true => Transaction::deployed_contract_address(&tx), - false => None, - }; - Some(Transaction::from_signed( - &tx, - ( - Some(pb.pivot_header.hash()), - block_number, - Some(idx.into()), - ), - (Some(status.into()), contract_address), - )) - } - }, - } - } -} - -impl BlockProvider for &EthHandler { - fn get_block_epoch_number(&self, hash: &H256) -> Option { - self.consensus_graph().get_block_epoch_number(hash) - } - - fn get_block_hashes_by_epoch( - &self, epoch_number: EpochNumber, - ) -> Result, String> { - self.consensus_graph() - .get_block_hashes_by_epoch(epoch_number) - } } impl Eth for EthHandler { @@ -489,7 +47,7 @@ impl Eth for EthHandler { fn net_version(&self) -> RpcResult { debug!("RPC Request: net_version()"); - Ok(format!("{}", self.consensus.best_chain_id().in_evm_space())) + Ok(format!("{}", self.inner.chain_id())) } fn protocol_version(&self) -> RpcResult { @@ -500,23 +58,7 @@ impl Eth for EthHandler { fn syncing(&self) -> RpcResult { debug!("RPC Request: eth_syncing()"); - if self.sync.catch_up_mode() { - Ok( - // Now pass some statistics of Conflux just to make the - // interface happy - SyncStatus::Info(SyncInfo { - starting_block: U256::from(self.consensus.block_count()), - current_block: U256::from(self.consensus.block_count()), - highest_block: U256::from( - self.sync.get_synchronization_graph().block_count(), - ), - warp_chunks_amount: None, - warp_chunks_processed: None, - }), - ) - } else { - Ok(SyncStatus::None) - } + Ok(self.inner.sync_status()) } fn hashrate(&self) -> RpcResult { @@ -539,46 +81,19 @@ impl Eth for EthHandler { fn chain_id(&self) -> RpcResult> { debug!("RPC Request: eth_chainId()"); - return Ok(Some(self.consensus.best_chain_id().in_evm_space().into())); + return Ok(Some(self.inner.chain_id().into())); } fn gas_price(&self) -> RpcResult { debug!("RPC Request: eth_gasPrice()"); - let (_, maybe_base_price) = - self.tx_pool.get_best_info_with_parent_base_price(); - if let Some(base_price) = maybe_base_price { - return Ok(base_price[Space::Ethereum]); - } - - let consensus_gas_price = self - .consensus_graph() - .gas_price(Space::Ethereum) - .unwrap_or(GAS_PRICE_DEFAULT_VALUE.into()); - Ok(std::cmp::max( - consensus_gas_price, - self.tx_pool.config.min_eth_tx_price.into(), - )) + Ok(self.inner.gas_price()) } fn max_priority_fee_per_gas(&self) -> RpcResult { debug!("RPC Request: eth_maxPriorityFeePerGas()"); - let evm_ratio = - self.tx_pool.machine().params().evm_transaction_block_ratio - as usize; - - let fee_history = self.fee_history( - HexU64::from(300), - BlockNumber::Latest, - Some(vec![50f64]), - )?; - - let total_reward: U256 = fee_history - .reward() - .iter() - .map(|x| x.first().unwrap()) - .fold(U256::zero(), |x, y| x + *y); - - Ok(total_reward * evm_ratio / 300) + self.inner + .max_priority_fee_per_gas() + .map_err(|err| err.into()) } fn accounts(&self) -> RpcResult> { @@ -589,61 +104,33 @@ impl Eth for EthHandler { fn block_number(&self) -> RpcResult { debug!("RPC Request: eth_blockNumber()"); - - let consensus_graph = self.consensus_graph(); - let epoch_num = EpochNumber::LatestState; - match consensus_graph.get_height_from_epoch_number(epoch_num.into()) { - Ok(height) => Ok(height.into()), - Err(e) => Err(RpcError::invalid_params(e)), - } + self.inner.latest_block_number().map_err(|err| err.into()) } fn balance( &self, address: H160, num: Option, ) -> RpcResult { - let epoch_num = num.unwrap_or_default().try_into()?; debug!( "RPC Request: eth_getBalance(address={:?}, epoch_num={:?})", - address, epoch_num + address, num ); - let state_db = self - .consensus - .get_eth_state_db_by_epoch_number(epoch_num, "num")?; - let acc = state_db - .get_account(&address.with_evm_space()) - .map_err(|err| CoreError::from(err))?; - - Ok(acc.map_or(U256::zero(), |acc| acc.balance).into()) + self.inner + .user_balance(address, num) + .map_err(|err| err.into()) } fn storage_at( &self, address: H160, position: U256, block_num: Option, ) -> RpcResult { - let epoch_num = block_num.unwrap_or_default().try_into()?; debug!( "RPC Request: eth_getStorageAt(address={:?}, position={:?}, block_num={:?})", - address, position, epoch_num + address, position, block_num ); - let state_db = self - .consensus - .get_eth_state_db_by_epoch_number(epoch_num, "epoch_number")?; - - let position: H256 = H256::from_uint(&position); - - let key = StorageKey::new_storage_key(&address, position.as_ref()) - .with_evm_space(); - - Ok( - match state_db - .get::(key) - .map_err(|err| CoreError::from(err))? - { - Some(entry) => H256::from_uint(&entry.value).into(), - None => H256::zero(), - }, - ) + self.inner + .storage_at(address, position, block_num) + .map_err(|err| err.into()) } fn block_by_hash( @@ -654,21 +141,9 @@ impl Eth for EthHandler { hash, include_txs ); - let phantom_block = { - // keep read lock to ensure consistent view - let _inner = self.consensus_graph().inner.read(); - - self.consensus_graph() - .get_phantom_block_by_hash( - &hash, false, /* include_traces */ - ) - .map_err(RpcError::invalid_params)? - }; - - match phantom_block { - None => Ok(None), - Some(pb) => Ok(Some(RpcBlock::from_phantom(&pb, include_txs))), - } + self.inner + .block_by_hash(hash, include_txs) + .map_err(|err| err.into()) } fn block_by_number( @@ -676,23 +151,9 @@ impl Eth for EthHandler { ) -> RpcResult> { debug!("RPC Request: eth_getBlockByNumber(block_number={:?}, include_txs={:?})", block_num, include_txs); - let phantom_block = { - // keep read lock to ensure consistent view - let _inner = self.consensus_graph().inner.read(); - - self.consensus_graph() - .get_phantom_block_by_number( - block_num.try_into()?, - None, - false, /* include_traces */ - ) - .map_err(RpcError::invalid_params)? - }; - - match phantom_block { - None => Ok(None), - Some(pb) => Ok(Some(RpcBlock::from_phantom(&pb, include_txs))), - } + self.inner + .block_by_number(block_num, include_txs) + .map_err(|err| err.into()) } fn transaction_count( @@ -703,22 +164,9 @@ impl Eth for EthHandler { address, num ); - let nonce = match num { - Some(BlockNumber::Pending) => { - self.tx_pool.get_next_nonce(&address.with_evm_space()) - } - _ => { - let num = num.unwrap_or_default().try_into()?; - - self.consensus_graph().next_nonce( - address.with_evm_space(), - BlockHashOrEpochNumber::EpochNumber(num), - "num", - )? - } - }; - - Ok(nonce) + self.inner + .next_nonce(address, num) + .map_err(|err| err.into()) } fn block_transaction_count_by_hash( @@ -729,21 +177,9 @@ impl Eth for EthHandler { hash, ); - let phantom_block = { - // keep read lock to ensure consistent view - let _inner = self.consensus_graph().inner.read(); - - self.consensus_graph() - .get_phantom_block_by_hash( - &hash, false, /* include_traces */ - ) - .map_err(RpcError::invalid_params)? - }; - - match phantom_block { - None => Ok(None), - Some(pb) => Ok(Some(pb.transactions.len().into())), - } + self.inner + .block_transaction_count_by_hash(hash) + .map_err(|err| err.into()) } fn block_transaction_count_by_number( @@ -754,23 +190,9 @@ impl Eth for EthHandler { block_num ); - let phantom_block = { - // keep read lock to ensure consistent view - let _inner = self.consensus_graph().inner.read(); - - self.consensus_graph() - .get_phantom_block_by_number( - block_num.try_into()?, - None, - false, /* include_traces */ - ) - .map_err(RpcError::invalid_params)? - }; - - match phantom_block { - None => Ok(None), - Some(pb) => Ok(Some(pb.transactions.len().into())), - } + self.inner + .block_transaction_count_by_number(block_num) + .map_err(|err| err.into()) } fn block_uncles_count_by_hash( @@ -778,21 +200,9 @@ impl Eth for EthHandler { ) -> RpcResult> { debug!("RPC Request: eth_getUncleCountByBlockHash(hash={:?})", hash); - let epoch_num = match self.consensus.get_block_epoch_number(&hash) { - None => return Ok(None), - Some(n) => n, - }; - - let maybe_pivot_hash = self - .consensus - .get_block_hashes_by_epoch(epoch_num.into()) - .ok() - .and_then(|hs| hs.last().cloned()); - - match maybe_pivot_hash { - Some(h) if h == hash => Ok(Some(0.into())), - _ => Ok(None), - } + self.inner + .block_uncles_count_by_hash(hash) + .map_err(|err| err.into()) } fn block_uncles_count_by_number( @@ -803,45 +213,22 @@ impl Eth for EthHandler { block_num ); - let maybe_epoch = self - .consensus - .get_block_hashes_by_epoch(block_num.try_into()?) - .ok(); - - Ok(maybe_epoch.map(|_| 0.into())) + self.inner + .block_uncles_count_by_number(block_num) + .map_err(|err| err.into()) } fn code_at( &self, address: H160, epoch_num: Option, ) -> RpcResult { - let epoch_num = epoch_num.unwrap_or_default().try_into()?; - debug!( "RPC Request: eth_getCode(address={:?}, epoch_num={:?})", address, epoch_num ); - let state_db = self - .consensus - .get_eth_state_db_by_epoch_number(epoch_num, "num")?; - - let address = address.with_evm_space(); - - let code = match state_db - .get_account(&address) - .map_err(|err| CoreError::from(err))? - { - Some(acc) => match state_db - .get_code(&address, &acc.code_hash) - .map_err(|err| CoreError::from(err))? - { - Some(code) => (*code.code).clone(), - _ => vec![], - }, - None => vec![], - }; - - Ok(Bytes::new(code)) + self.inner + .code_at(address, epoch_num) + .map_err(|err| err.into()) } fn send_raw_transaction(&self, raw: Bytes) -> RpcResult { @@ -864,7 +251,7 @@ impl Eth for EthHandler { bail!(EthApiError::InvalidTransactionSignature); } - let r = self.send_transaction_with_signature(tx)?; + let r = self.inner.send_transaction_with_signature(tx)?; Ok(r) } @@ -882,7 +269,7 @@ impl Eth for EthHandler { ); let (execution, _estimation) = - self.exec_transaction(request, block_number_or_hash)?; + self.inner.exec_transaction(request, block_number_or_hash)?; Ok(execution.output.into()) } @@ -896,13 +283,13 @@ impl Eth for EthHandler { request, block_number_or_hash ); let (_, estimated_gas) = - self.exec_transaction(request, block_number_or_hash)?; + self.inner.exec_transaction(request, block_number_or_hash)?; Ok(estimated_gas) } fn fee_history( - &self, mut block_count: HexU64, newest_block: BlockNumber, + &self, block_count: HexU64, newest_block: BlockNumber, reward_percentiles: Option>, ) -> RpcResult { debug!( @@ -910,96 +297,9 @@ impl Eth for EthHandler { block_count, newest_block, reward_percentiles ); - if block_count.as_u64() == 0 || newest_block == BlockNumber::Pending { - return Ok(FeeHistory::new()); - } - - if block_count.as_u64() > MAX_FEE_HISTORY_CACHE_BLOCK_COUNT { - block_count = HexU64::from(MAX_FEE_HISTORY_CACHE_BLOCK_COUNT); - } - - if let Some(percentiles) = &reward_percentiles { - if percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) { - return Err(EthApiError::InvalidRewardPercentiles.into()); - } - } - let reward_percentiles = reward_percentiles.unwrap_or_default(); - - // keep read lock to ensure consistent view - let _consensus = self.consensus_graph().inner.read(); - - let newest_height: u64 = self - .consensus_graph() - .get_height_from_epoch_number(newest_block.clone().try_into()?) - .map_err(RpcError::invalid_params)?; - - if newest_block == BlockNumber::Latest { - let fetch_block_by_hash = - |height| self.fetch_block_by_hash(&height); - - let latest_block = self - .fetch_block_by_height(newest_height) - .map_err(RpcError::invalid_params)?; - - self.fee_history_cache - .update_to_latest_block( - newest_height, - latest_block.pivot_header.hash(), - block_count.as_u64(), - fetch_block_by_hash, - ) - .map_err(RpcError::invalid_params)?; - } - - let mut fee_history = FeeHistory::new(); - - let end_block = newest_height; - let start_block = if end_block >= block_count.as_u64() { - end_block - block_count.as_u64() + 1 - } else { - 0 - }; - - let mut cached_fee_history_entries = self - .fee_history_cache - .get_history_with_missing_info(start_block, end_block); - - cached_fee_history_entries.reverse(); - for (i, entry) in cached_fee_history_entries.into_iter().enumerate() { - if entry.is_none() { - let height = end_block - i as u64; - let block = self - .fetch_block_by_height(height) - .map_err(RpcError::invalid_params)?; - - // Internal error happens only if the fetch header has - // inconsistent block height - fee_history - .push_front_block( - Space::Ethereum, - &reward_percentiles, - &block.pivot_header, - block.transactions.iter().map(|x| &**x), - ) - .map_err(|_| RpcError::internal_error())?; - } else { - fee_history - .push_front_entry(&entry.unwrap(), &reward_percentiles) - .expect("always success"); - } - } - - let block = self - .fetch_block_by_height(end_block + 1) - .map_err(RpcError::invalid_params)?; - - fee_history.finish( - start_block, - block.pivot_header.base_price().as_ref(), - Space::Ethereum, - ); - - Ok(fee_history) + self.inner + .fee_history(block_count, newest_block, reward_percentiles) + .map_err(|err| err.into()) } fn transaction_by_hash( @@ -1007,56 +307,9 @@ impl Eth for EthHandler { ) -> RpcResult> { debug!("RPC Request: eth_getTransactionByHash(hash={:?})", hash); - let tx_index = match self - .consensus - .get_data_manager() - .transaction_index_by_hash(&hash, false /* update_cache */) - { - None => return Ok(self.get_tx_from_txpool(hash)), - Some(tx_index) => tx_index, - }; - - let epoch_num = - match self.consensus.get_block_epoch_number(&tx_index.block_hash) { - None => return Ok(self.get_tx_from_txpool(hash)), - Some(n) => n, - }; - - let maybe_block = self - .consensus_graph() - .get_phantom_block_by_number( - EpochNumber::Number(epoch_num), - None, - false, /* include_traces */ - ) - .map_err(RpcError::invalid_params)?; - - let phantom_block = match maybe_block { - None => return Ok(self.get_tx_from_txpool(hash)), - Some(b) => b, - }; - - for (idx, tx) in phantom_block.transactions.iter().enumerate() { - if tx.hash() == hash { - let tx = Self::block_tx_by_index(Some(phantom_block), idx); - if let Some(tx_ref) = &tx { - if tx_ref.status - == Some( - TransactionStatus::Skipped - .in_space(Space::Ethereum) - .into(), - ) - { - // A skipped transaction is not available to clients if - // accessed by its hash. - return Ok(None); - } - } - return Ok(tx); - } - } - - Ok(self.get_tx_from_txpool(hash)) + self.inner + .transaction_by_hash(hash) + .map_err(|err| err.into()) } fn transaction_by_block_hash_and_index( @@ -1064,18 +317,9 @@ impl Eth for EthHandler { ) -> RpcResult> { debug!("RPC Request: eth_getTransactionByBlockHashAndIndex(hash={:?}, idx={:?})", hash, idx); - let phantom_block = { - // keep read lock to ensure consistent view - let _inner = self.consensus_graph().inner.read(); + let phantom_block = self.inner.phantom_block_by_hash(hash)?; - self.consensus_graph() - .get_phantom_block_by_hash( - &hash, false, /* include_traces */ - ) - .map_err(RpcError::invalid_params)? - }; - - Ok(Self::block_tx_by_index(phantom_block, idx.value())) + Ok(EthApi::block_tx_by_index(phantom_block, idx.value())) } fn transaction_by_block_number_and_index( @@ -1083,20 +327,9 @@ impl Eth for EthHandler { ) -> RpcResult> { debug!("RPC Request: eth_getTransactionByBlockNumberAndIndex(block_num={:?}, idx={:?})", block_num, idx); - let phantom_block = { - // keep read lock to ensure consistent view - let _inner = self.consensus_graph().inner.read(); - - self.consensus_graph() - .get_phantom_block_by_number( - block_num.try_into()?, - None, - false, /* include_traces */ - ) - .map_err(RpcError::invalid_params)? - }; + let phantom_block = self.inner.phantom_block_by_number(block_num)?; - Ok(Self::block_tx_by_index(phantom_block, idx.value())) + Ok(EthApi::block_tx_by_index(phantom_block, idx.value())) } fn transaction_receipt(&self, tx_hash: H256) -> RpcResult> { @@ -1105,67 +338,9 @@ impl Eth for EthHandler { tx_hash ); - let tx_index = - match self.consensus.get_data_manager().transaction_index_by_hash( - &tx_hash, false, /* update_cache */ - ) { - None => return Ok(None), - Some(tx_index) => tx_index, - }; - - let epoch_num = - match self.consensus.get_block_epoch_number(&tx_index.block_hash) { - None => return Ok(None), - Some(n) => n, - }; - - if epoch_num > self.consensus_graph().best_executed_state_epoch_number() - { - // The receipt is only visible to optimistic execution. - return Ok(None); - } - - let maybe_block = self - .consensus_graph() - .get_phantom_block_by_number( - EpochNumber::Number(epoch_num), - None, - false, /* include_traces */ - ) - .map_err(RpcError::invalid_params)?; - - let phantom_block = match maybe_block { - None => return Ok(None), - Some(b) => b, - }; - - let mut prior_log_index = 0; - - for (idx, tx) in phantom_block.transactions.iter().enumerate() { - if tx.hash() == tx_hash { - let receipt = self.construct_rpc_receipt( - &phantom_block, - idx, - &mut prior_log_index, - )?; - // A skipped transaction is not available to clients if accessed - // by its hash. - if receipt.status_code - == TransactionStatus::Skipped - .in_space(Space::Ethereum) - .into() - { - return Ok(None); - } - - return Ok(Some(receipt)); - } - - // if the if-branch was not entered, we do the bookeeping here - prior_log_index += phantom_block.receipts[idx].logs.len(); - } - - Ok(None) + self.inner + .transaction_receipt(tx_hash) + .map_err(|err| err.into()) } fn uncle_by_block_hash_and_index( @@ -1190,25 +365,7 @@ impl Eth for EthHandler { fn logs(&self, filter: EthRpcLogFilter) -> RpcResult> { debug!("RPC Request: eth_getLogs(filter={:?})", filter); - let filter: LogFilter = filter.into_primitive(self)?; - - let logs = self - .consensus_graph() - .logs(filter) - .map_err(|err| CoreError::from(err))?; - - // If the results does not fit into `max_limit`, report an error - if let Some(max_limit) = self.config.get_logs_filter_max_limit { - if logs.len() > max_limit { - bail!(invalid_params("filter", format!("This query results in too many logs, max limitation is {}, please use a smaller block range", max_limit))); - } - } - - Ok(logs - .iter() - .cloned() - .map(|l| Log::try_from_localized(l, self, false)) - .collect::>()?) + self.inner.logs(filter).map_err(|err| err.into()) } fn submit_hashrate(&self, _: U256, _: H256) -> RpcResult { @@ -1225,7 +382,7 @@ impl Eth for EthHandler { block ); - self.get_block_receipts(block) + self.inner.get_block_receipts(block).map_err(|e| e.into()) } fn block_receipts( @@ -1237,8 +394,9 @@ impl Eth for EthHandler { ); let block_num = block_num.unwrap_or_default(); - - self.get_block_receipts(block_num) + self.inner + .get_block_receipts(block_num) + .map_err(|e| e.into()) } fn account_pending_transactions( @@ -1249,12 +407,13 @@ impl Eth for EthHandler { address, maybe_start_nonce, maybe_limit); let (pending_txs, tx_status, pending_count) = self - .tx_pool + .inner + .tx_pool() .get_account_pending_transactions( &Address::from(address).with_evm_space(), maybe_start_nonce, maybe_limit.map(|limit| limit.as_usize()), - self.consensus.best_epoch_number(), + self.inner.best_epoch_number(), ) .map_err(|e| internal_error(e))?; Ok(AccountPendingTransactions { diff --git a/crates/rpc/rpc-utils/src/error/errors.rs b/crates/rpc/rpc-utils/src/error/errors.rs index 5b4366c41..872021659 100644 --- a/crates/rpc/rpc-utils/src/error/errors.rs +++ b/crates/rpc/rpc-utils/src/error/errors.rs @@ -7,6 +7,7 @@ use alloy_primitives::{hex, Address, Bytes}; use alloy_rpc_types::error::EthRpcErrorCode; use alloy_sol_types::decode_revert_reason; use jsonrpc_core::{Error as JsonRpcError, ErrorCode}; +use jsonrpsee::types::ErrorObjectOwned; use revm::primitives::{HaltReason, OutOfGasError}; use std::time::Duration; @@ -170,6 +171,13 @@ impl From for JsonRpcError { } } +impl From for ErrorObjectOwned { + fn from(e: EthApiError) -> Self { + let err = JsonRpcError::from(e); + ErrorObjectOwned::owned(err.code.code() as i32, err.message, err.data) + } +} + /// An error due to invalid transaction. /// /// The only reason this exists is to maintain compatibility with other clients diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 54089cdd3..d25cbe052 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -30,3 +30,11 @@ geth-tracer = { workspace = true } cfx-rpc-utils = { workspace = true } parking_lot = { workspace = true } cfx-rpc-cfx-types = { workspace = true } +jsonrpc-core = { workspace = true } +cfx-vm-types = { workspace = true } +cfx-statedb = { workspace = true } +cfx-executor = { workspace = true } +cfx-execute-helper = { workspace = true } +error-chain = { workspace = true } +cfx-parameters = { workspace = true } +rustc-hex = { workspace = true } diff --git a/crates/rpc/rpc/src/eth.rs b/crates/rpc/rpc/src/eth.rs index cb2fbe9a1..2e9b0a1ad 100644 --- a/crates/rpc/rpc/src/eth.rs +++ b/crates/rpc/rpc/src/eth.rs @@ -1,51 +1,1050 @@ +use crate::helpers::{FeeHistoryCache, MAX_FEE_HISTORY_CACHE_BLOCK_COUNT}; use async_trait::async_trait; +use cfx_execute_helper::estimation::EstimateRequest; +use cfx_executor::executive::{ + string_revert_reason_decode, Executed, ExecutionError, ExecutionOutcome, + TxDropError, +}; +use cfx_parameters::rpc::GAS_PRICE_DEFAULT_VALUE; +use cfx_rpc_cfx_types::{ + traits::BlockProvider, PhantomBlock, RpcImplConfiguration, +}; use cfx_rpc_eth_api::EthApiServer; use cfx_rpc_eth_types::{ - Block, BlockNumber as BlockId, FeeHistory, Header, Receipt, SyncStatus, - Transaction, TransactionRequest, + Block, BlockNumber as BlockId, EthRpcLogFilter, FeeHistory, Header, Log, + Receipt, SyncInfo, SyncStatus, Transaction, TransactionRequest, +}; +use cfx_rpc_primitives::{Bytes, Index, U64 as HexU64}; +use cfx_rpc_utils::error::{errors::*, jsonrpc_error_helpers::*}; +use cfx_statedb::StateDbExt; +use cfx_types::{ + Address, AddressSpaceUtil, BigEndianHash, Space, H160, H256, H64, U256, U64, +}; +use cfx_vm_types::Error as VmError; +use cfxcore::{ + errors::{Error as CoreError, Result as CoreResult}, + ConsensusGraph, ConsensusGraphTrait, SharedConsensusGraph, + SharedSynchronizationService, SharedTransactionPool, }; -use cfx_rpc_primitives::{Bytes, Index}; -use cfx_types::{Address, H256, H64, U256, U64}; +use error_chain::bail; +use jsonrpc_core::Error as RpcError; use jsonrpsee::core::RpcResult; +use primitives::{ + filter::LogFilter, receipt::EVM_SPACE_SUCCESS, Action, + BlockHashOrEpochNumber, EpochNumber, StorageKey, StorageValue, + TransactionStatus, TransactionWithSignature, +}; +use rustc_hex::ToHex; +type BlockNumber = BlockId; type BlockNumberOrTag = BlockId; type JsonStorageKey = U256; +type RpcBlock = Block; + +pub struct EthApi { + config: RpcImplConfiguration, + consensus: SharedConsensusGraph, + sync: SharedSynchronizationService, + tx_pool: SharedTransactionPool, + fee_history_cache: FeeHistoryCache, +} + +impl EthApi { + pub fn new( + config: RpcImplConfiguration, consensus: SharedConsensusGraph, + sync: SharedSynchronizationService, tx_pool: SharedTransactionPool, + ) -> Self { + EthApi { + config, + consensus, + sync, + tx_pool, + fee_history_cache: FeeHistoryCache::new(), + } + } + + pub fn consensus_graph(&self) -> &ConsensusGraph { + self.consensus + .as_any() + .downcast_ref::() + .expect("downcast should succeed") + } + + pub fn tx_pool(&self) -> &SharedTransactionPool { &self.tx_pool } + + pub fn fetch_block_by_height( + &self, height: u64, + ) -> Result { + let maybe_block = self.consensus_graph().get_phantom_block_by_number( + EpochNumber::Number(height), + None, + false, + )?; + if let Some(block) = maybe_block { + Ok(block) + } else { + Err("Specified block header does not exist".into()) + } + } + + pub fn fetch_block_by_hash( + &self, hash: &H256, + ) -> Result { + let maybe_block = self + .consensus_graph() + .get_phantom_block_by_hash(hash, false)?; + if let Some(block) = maybe_block { + Ok(block) + } else { + Err("Specified block header does not exist".into()) + } + } + + pub fn exec_transaction( + &self, mut request: TransactionRequest, + block_number_or_hash: Option, + ) -> CoreResult<(Executed, U256)> { + let consensus_graph = self.consensus_graph(); + + if request.gas_price.is_some() + && request.max_priority_fee_per_gas.is_some() + { + return Err(RpcError::from( + EthApiError::ConflictingFeeFieldsInRequest, + ) + .into()); + } + + if request.max_fee_per_gas.is_some() + && request.max_priority_fee_per_gas.is_some() + { + if request.max_fee_per_gas.unwrap() + < request.max_priority_fee_per_gas.unwrap() + { + return Err(RpcError::from( + RpcInvalidTransactionError::TipAboveFeeCap, + ) + .into()); + } + } + + let epoch = match block_number_or_hash.unwrap_or_default() { + BlockNumber::Hash { hash, .. } => { + match consensus_graph.get_block_epoch_number(&hash) { + Some(e) => { + // do not expose non-pivot blocks in eth RPC + let pivot = consensus_graph + .get_block_hashes_by_epoch(EpochNumber::Number(e))? + .last() + .cloned(); + + if Some(hash) != pivot { + bail!("Block {:?} not found", hash); + } + + EpochNumber::Number(e) + } + None => bail!("Block {:?} not found", hash), + } + } + epoch => epoch.try_into()?, + }; + + // if gas_price is zero, it is considered as not set + request.unset_zero_gas_price(); + + let estimate_request = EstimateRequest { + has_sender: request.from.is_some(), + has_gas_limit: request.gas.is_some(), + has_gas_price: request.has_gas_price(), + has_nonce: request.nonce.is_some(), + has_storage_limit: false, + }; + + let chain_id = self.consensus.best_chain_id(); + + let max_gas = self.config.max_estimation_gas_limit; + let signed_tx = request.sign_call(chain_id.in_evm_space(), max_gas)?; + + let (execution_outcome, estimation) = consensus_graph.call_virtual( + &signed_tx, + epoch, + estimate_request, + )?; + + let executed = match execution_outcome { + ExecutionOutcome::NotExecutedDrop(TxDropError::OldNonce( + expected, + got, + )) => bail!(invalid_input_rpc_err( + format! {"nonce is too old expected {:?} got {:?}", expected, got} + )), + ExecutionOutcome::NotExecutedDrop( + TxDropError::InvalidRecipientAddress(recipient), + ) => bail!(invalid_input_rpc_err( + format! {"invalid recipient address {:?}", recipient} + )), + ExecutionOutcome::NotExecutedDrop( + TxDropError::NotEnoughGasLimit { expected, got }, + ) => bail!(invalid_input_rpc_err( + format! {"not enough gas limit with respected to tx size: expected {:?} got {:?}", expected, got} + )), + ExecutionOutcome::NotExecutedToReconsiderPacking(e) => { + bail!(invalid_input_rpc_err(format! {"err: {:?}", e})) + } + ExecutionOutcome::ExecutionErrorBumpNonce( + e @ ExecutionError::NotEnoughCash { .. }, + _executed, + ) => { + bail!(geth_call_execution_error( + format!( + "insufficient funds for gas * price + value: {:?})", + e + ), + "".into() + )) + } + ExecutionOutcome::ExecutionErrorBumpNonce( + ExecutionError::VmError(VmError::Reverted), + executed, + ) => bail!(geth_call_execution_error( + format!( + "execution reverted: revert: {}", + string_revert_reason_decode(&executed.output) + ), + format!("0x{}", executed.output.to_hex::()) + )), + ExecutionOutcome::ExecutionErrorBumpNonce( + ExecutionError::VmError(e), + _executed, + ) => bail!(geth_call_execution_error( + format!("execution reverted: {}", e), + "".into() + )), + ExecutionOutcome::Finished(executed) => executed, + }; + + Ok((executed, estimation.estimated_gas_limit)) + } + + pub fn send_transaction_with_signature( + &self, tx: TransactionWithSignature, + ) -> CoreResult { + if self.sync.catch_up_mode() { + bail!(request_rejected_in_catch_up_mode(None)); + } + let (signed_trans, failed_trans) = + self.tx_pool.insert_new_transactions(vec![tx]); + if signed_trans.len() + failed_trans.len() > 1 { + // This should never happen + Ok(H256::zero().into()) + } else if signed_trans.len() + failed_trans.len() == 0 { + // For tx in transactions_pubkey_cache, we simply ignore them + bail!(RpcError::from(EthApiError::PoolError( + RpcPoolError::ReplaceUnderpriced + ))); + } else if signed_trans.is_empty() { + let tx_err = failed_trans.into_iter().next().expect("Not empty").1; + bail!(RpcError::from(EthApiError::from(tx_err))) + } else { + let tx_hash = signed_trans[0].hash(); + self.sync.append_received_transactions(signed_trans); + Ok(tx_hash.into()) + } + } + + pub fn construct_rpc_receipt( + &self, b: &PhantomBlock, idx: usize, prior_log_index: &mut usize, + ) -> CoreResult { + if b.transactions.len() != b.receipts.len() { + return Err(internal_error( + "Inconsistent state: transactions and receipts length mismatch", + ) + .into()); + } + + if b.transactions.len() != b.errors.len() { + return Err(internal_error( + "Inconsistent state: transactions and errors length mismatch", + ) + .into()); + } + + if idx >= b.transactions.len() { + return Err(internal_error( + "Inconsistent state: tx index out of bound", + ) + .into()); + } + + let tx = &b.transactions[idx]; + let receipt = &b.receipts[idx]; + + if receipt.logs.iter().any(|l| l.space != Space::Ethereum) { + return Err(internal_error( + "Inconsistent state: native tx in phantom block", + ) + .into()); + } + + let contract_address = match receipt.outcome_status { + TransactionStatus::Success => { + Transaction::deployed_contract_address(tx) + } + _ => None, + }; + + let transaction_hash = tx.hash(); + let transaction_index: U256 = idx.into(); + let block_hash = b.pivot_header.hash(); + let block_height: U256 = b.pivot_header.height().into(); + + let logs: Vec<_> = receipt + .logs + .iter() + .cloned() + .enumerate() + .map(|(idx, log)| Log { + address: log.address, + topics: log.topics, + data: Bytes(log.data), + block_hash, + block_number: block_height, + transaction_hash, + transaction_index, + log_index: Some((*prior_log_index + idx).into()), + transaction_log_index: Some(idx.into()), + removed: false, + }) + .collect(); + + *prior_log_index += logs.len(); + + let gas_used = match idx { + 0 => receipt.accumulated_gas_used, + idx => { + receipt.accumulated_gas_used + - b.receipts[idx - 1].accumulated_gas_used + } + }; + + let tx_exec_error_msg = if b.errors[idx].is_empty() { + None + } else { + Some(b.errors[idx].clone()) + }; + + let effective_gas_price = + if let Some(base_price) = b.pivot_header.base_price() { + let base_price = base_price[tx.space()]; + if *tx.gas_price() < base_price { + *tx.gas_price() + } else { + tx.effective_gas_price(&base_price) + } + } else { + *tx.gas_price() + }; + + Ok(Receipt { + transaction_hash, + transaction_index, + block_hash, + from: tx.sender().address, + to: match tx.action() { + Action::Create => None, + Action::Call(addr) => Some(*addr), + }, + block_number: block_height, + cumulative_gas_used: receipt.accumulated_gas_used, + gas_used, + contract_address, + logs, + logs_bloom: receipt.log_bloom, + status_code: receipt + .outcome_status + .in_space(Space::Ethereum) + .into(), + effective_gas_price, + tx_exec_error_msg, + transaction_type: receipt + .burnt_gas_fee + .is_some() + .then_some(U64::from(tx.type_id())), + burnt_gas_fee: receipt.burnt_gas_fee, + }) + } + + pub fn get_tx_from_txpool(&self, hash: H256) -> Option { + let tx = self.tx_pool.get_transaction(&hash)?; + + if tx.space() == Space::Ethereum { + Some(Transaction::from_signed( + &tx, + (None, None, None), + (None, None), + )) + } else { + None + } + } + + pub fn get_block_receipts( + &self, block_num: BlockNumber, + ) -> CoreResult> { + let b = { + // keep read lock to ensure consistent view + let _inner = self.consensus_graph().inner.read(); + + let phantom_block = match block_num { + BlockNumber::Hash { hash, .. } => self + .consensus_graph() + .get_phantom_block_by_hash( + &hash, false, /* include_traces */ + ) + .map_err(RpcError::invalid_params)?, + _ => self + .consensus_graph() + .get_phantom_block_by_number( + block_num.try_into()?, + None, + false, /* include_traces */ + ) + .map_err(RpcError::invalid_params)?, + }; + + match phantom_block { + None => return Err(unknown_block().into()), + Some(b) => b, + } + }; + + let mut block_receipts = vec![]; + let mut prior_log_index = 0; + + for idx in 0..b.receipts.len() { + block_receipts.push(self.construct_rpc_receipt( + &b, + idx, + &mut prior_log_index, + )?); + } + + return Ok(block_receipts); + } + + pub fn block_tx_by_index( + phantom_block: Option, idx: usize, + ) -> Option { + match phantom_block { + None => None, + Some(pb) => match pb.transactions.get(idx) { + None => None, + Some(tx) => { + let block_number = Some(pb.pivot_header.height().into()); + let receipt = pb.receipts.get(idx).unwrap(); + let status = + receipt.outcome_status.in_space(Space::Ethereum); + let contract_address = match status == EVM_SPACE_SUCCESS { + true => Transaction::deployed_contract_address(&tx), + false => None, + }; + Some(Transaction::from_signed( + &tx, + ( + Some(pb.pivot_header.hash()), + block_number, + Some(idx.into()), + ), + (Some(status.into()), contract_address), + )) + } + }, + } + } + + pub fn sync_status(&self) -> SyncStatus { + if self.sync.catch_up_mode() { + SyncStatus::Info(SyncInfo { + starting_block: U256::from(self.consensus.block_count()), + current_block: U256::from(self.consensus.block_count()), + highest_block: U256::from( + self.sync.get_synchronization_graph().block_count(), + ), + warp_chunks_amount: None, + warp_chunks_processed: None, + }) + } else { + SyncStatus::None + } + } + + pub fn chain_id(&self) -> u32 { + self.consensus.best_chain_id().in_evm_space() + } + + pub fn gas_price(&self) -> U256 { + let (_, maybe_base_price) = + self.tx_pool.get_best_info_with_parent_base_price(); + if let Some(base_price) = maybe_base_price { + return base_price[Space::Ethereum]; + } + + let consensus_gas_price = self + .consensus_graph() + .gas_price(Space::Ethereum) + .unwrap_or(GAS_PRICE_DEFAULT_VALUE.into()); + std::cmp::max( + consensus_gas_price, + self.tx_pool.config.min_eth_tx_price.into(), + ) + } + + pub fn latest_block_number(&self) -> CoreResult { + let consensus_graph = self.consensus_graph(); + let epoch_num = EpochNumber::LatestState; + match consensus_graph.get_height_from_epoch_number(epoch_num.into()) { + Ok(height) => Ok(height.into()), + Err(e) => Err(RpcError::invalid_params(e).into()), + } + } + + pub fn best_epoch_number(&self) -> u64 { + self.consensus.best_epoch_number() + } + + pub fn user_balance( + &self, address: H160, num: Option, + ) -> CoreResult { + let epoch_num = num.unwrap_or_default().try_into()?; + let state_db = self + .consensus + .get_eth_state_db_by_epoch_number(epoch_num, "num")?; + let acc = state_db + .get_account(&address.with_evm_space()) + .map_err(|err| CoreError::from(err))?; + + Ok(acc.map_or(U256::zero(), |acc| acc.balance).into()) + } + + pub fn storage_at( + &self, address: H160, position: U256, block_num: Option, + ) -> CoreResult { + let epoch_num = block_num.unwrap_or_default().try_into()?; + + let state_db = self + .consensus + .get_eth_state_db_by_epoch_number(epoch_num, "epoch_number")?; + + let position: H256 = H256::from_uint(&position); + + let key = StorageKey::new_storage_key(&address, position.as_ref()) + .with_evm_space(); + + Ok( + match state_db + .get::(key) + .map_err(|err| CoreError::from(err))? + { + Some(entry) => H256::from_uint(&entry.value).into(), + None => H256::zero(), + }, + ) + } + + pub fn phantom_block_by_hash( + &self, hash: H256, + ) -> CoreResult> { + let phantom_block = { + // keep read lock to ensure consistent view + let _inner = self.consensus_graph().inner.read(); + + self.consensus_graph() + .get_phantom_block_by_hash( + &hash, false, /* include_traces */ + ) + .map_err(RpcError::invalid_params)? + }; + + Ok(phantom_block) + } + + pub fn phantom_block_by_number( + &self, block_num: BlockNumber, + ) -> CoreResult> { + let phantom_block = { + // keep read lock to ensure consistent view + let _inner = self.consensus_graph().inner.read(); + + self.consensus_graph() + .get_phantom_block_by_number( + block_num.try_into()?, + None, + false, /* include_traces */ + ) + .map_err(RpcError::invalid_params)? + }; + + Ok(phantom_block) + } + + pub fn block_by_hash( + &self, hash: H256, include_txs: bool, + ) -> CoreResult> { + let phantom_block = self.phantom_block_by_hash(hash)?; + + match phantom_block { + None => Ok(None), + Some(pb) => Ok(Some(RpcBlock::from_phantom(&pb, include_txs))), + } + } + + pub fn block_by_number( + &self, block_num: BlockNumber, include_txs: bool, + ) -> CoreResult> { + let phantom_block = self.phantom_block_by_number(block_num)?; + + match phantom_block { + None => Ok(None), + Some(pb) => Ok(Some(RpcBlock::from_phantom(&pb, include_txs))), + } + } + + pub fn next_nonce( + &self, address: H160, num: Option, + ) -> CoreResult { + let nonce = match num { + Some(BlockNumber::Pending) => { + self.tx_pool.get_next_nonce(&address.with_evm_space()) + } + _ => { + let num = num.unwrap_or_default().try_into()?; + + self.consensus_graph().next_nonce( + address.with_evm_space(), + BlockHashOrEpochNumber::EpochNumber(num), + "num", + )? + } + }; + + Ok(nonce) + } + + pub fn block_transaction_count_by_hash( + &self, hash: H256, + ) -> CoreResult> { + let phantom_block = { + // keep read lock to ensure consistent view + let _inner = self.consensus_graph().inner.read(); + + self.consensus_graph() + .get_phantom_block_by_hash( + &hash, false, /* include_traces */ + ) + .map_err(RpcError::invalid_params)? + }; + + match phantom_block { + None => Ok(None), + Some(pb) => Ok(Some(pb.transactions.len().into())), + } + } + + pub fn block_transaction_count_by_number( + &self, block_num: BlockNumber, + ) -> CoreResult> { + let phantom_block = { + // keep read lock to ensure consistent view + let _inner = self.consensus_graph().inner.read(); -pub struct EthApi; + self.consensus_graph() + .get_phantom_block_by_number( + block_num.try_into()?, + None, + false, /* include_traces */ + ) + .map_err(RpcError::invalid_params)? + }; + + match phantom_block { + None => Ok(None), + Some(pb) => Ok(Some(pb.transactions.len().into())), + } + } + + pub fn block_uncles_count_by_hash( + &self, hash: H256, + ) -> CoreResult> { + let epoch_num = match self.consensus.get_block_epoch_number(&hash) { + None => return Ok(None), + Some(n) => n, + }; + + let maybe_pivot_hash = self + .consensus + .get_block_hashes_by_epoch(epoch_num.into()) + .ok() + .and_then(|hs| hs.last().cloned()); + + match maybe_pivot_hash { + Some(h) if h == hash => Ok(Some(0.into())), + _ => Ok(None), + } + } + + pub fn block_uncles_count_by_number( + &self, block_num: BlockNumber, + ) -> CoreResult> { + let maybe_epoch = self + .consensus + .get_block_hashes_by_epoch(block_num.try_into()?) + .ok(); + + Ok(maybe_epoch.map(|_| 0.into())) + } + + pub fn code_at( + &self, address: H160, epoch_num: Option, + ) -> CoreResult { + let epoch_num = epoch_num.unwrap_or_default().try_into()?; + + let state_db = self + .consensus + .get_eth_state_db_by_epoch_number(epoch_num, "num")?; + + let address = address.with_evm_space(); + + let code = match state_db + .get_account(&address) + .map_err(|err| CoreError::from(err))? + { + Some(acc) => match state_db + .get_code(&address, &acc.code_hash) + .map_err(|err| CoreError::from(err))? + { + Some(code) => (*code.code).clone(), + _ => vec![], + }, + None => vec![], + }; + + Ok(Bytes::new(code)) + } + + pub fn fee_history( + &self, mut block_count: HexU64, newest_block: BlockNumber, + reward_percentiles: Option>, + ) -> CoreResult { + if block_count.as_u64() == 0 || newest_block == BlockNumber::Pending { + return Ok(FeeHistory::new()); + } + + if block_count.as_u64() > MAX_FEE_HISTORY_CACHE_BLOCK_COUNT { + block_count = HexU64::from(MAX_FEE_HISTORY_CACHE_BLOCK_COUNT); + } + + if let Some(percentiles) = &reward_percentiles { + if percentiles.windows(2).any(|w| w[0] > w[1] || w[0] > 100.) { + return Err(RpcError::from( + EthApiError::InvalidRewardPercentiles, + ) + .into()); + } + } + let reward_percentiles = reward_percentiles.unwrap_or_default(); + + // keep read lock to ensure consistent view + let _consensus = self.consensus_graph().inner.read(); + + let newest_height: u64 = self + .consensus_graph() + .get_height_from_epoch_number(newest_block.clone().try_into()?) + .map_err(RpcError::invalid_params)?; + + if newest_block == BlockNumber::Latest { + let fetch_block_by_hash = + |height| self.fetch_block_by_hash(&height); + + let latest_block = self + .fetch_block_by_height(newest_height) + .map_err(RpcError::invalid_params)?; + + self.fee_history_cache + .update_to_latest_block( + newest_height, + latest_block.pivot_header.hash(), + block_count.as_u64(), + fetch_block_by_hash, + ) + .map_err(RpcError::invalid_params)?; + } + + let mut fee_history = FeeHistory::new(); + + let end_block = newest_height; + let start_block = if end_block >= block_count.as_u64() { + end_block - block_count.as_u64() + 1 + } else { + 0 + }; + + let mut cached_fee_history_entries = self + .fee_history_cache + .get_history_with_missing_info(start_block, end_block); + + cached_fee_history_entries.reverse(); + for (i, entry) in cached_fee_history_entries.into_iter().enumerate() { + if entry.is_none() { + let height = end_block - i as u64; + let block = self + .fetch_block_by_height(height) + .map_err(RpcError::invalid_params)?; + + // Internal error happens only if the fetch header has + // inconsistent block height + fee_history + .push_front_block( + Space::Ethereum, + &reward_percentiles, + &block.pivot_header, + block.transactions.iter().map(|x| &**x), + ) + .map_err(|_| RpcError::internal_error())?; + } else { + fee_history + .push_front_entry(&entry.unwrap(), &reward_percentiles) + .expect("always success"); + } + } + + let block = self + .fetch_block_by_height(end_block + 1) + .map_err(RpcError::invalid_params)?; + + fee_history.finish( + start_block, + block.pivot_header.base_price().as_ref(), + Space::Ethereum, + ); + + Ok(fee_history) + } + + pub fn transaction_by_hash( + &self, hash: H256, + ) -> CoreResult> { + let tx_index = match self + .consensus + .get_data_manager() + .transaction_index_by_hash(&hash, false /* update_cache */) + { + None => return Ok(self.get_tx_from_txpool(hash)), + Some(tx_index) => tx_index, + }; + + let epoch_num = + match self.consensus.get_block_epoch_number(&tx_index.block_hash) { + None => return Ok(self.get_tx_from_txpool(hash)), + Some(n) => n, + }; + + let maybe_block = self + .consensus_graph() + .get_phantom_block_by_number( + EpochNumber::Number(epoch_num), + None, + false, /* include_traces */ + ) + .map_err(RpcError::invalid_params)?; + + let phantom_block = match maybe_block { + None => return Ok(self.get_tx_from_txpool(hash)), + Some(b) => b, + }; + + for (idx, tx) in phantom_block.transactions.iter().enumerate() { + if tx.hash() == hash { + let tx = Self::block_tx_by_index(Some(phantom_block), idx); + if let Some(tx_ref) = &tx { + if tx_ref.status + == Some( + TransactionStatus::Skipped + .in_space(Space::Ethereum) + .into(), + ) + { + // A skipped transaction is not available to clients if + // accessed by its hash. + return Ok(None); + } + } + return Ok(tx); + } + } + + Ok(self.get_tx_from_txpool(hash)) + } + + pub fn transaction_receipt( + &self, tx_hash: H256, + ) -> CoreResult> { + let tx_index = + match self.consensus.get_data_manager().transaction_index_by_hash( + &tx_hash, false, /* update_cache */ + ) { + None => return Ok(None), + Some(tx_index) => tx_index, + }; + + let epoch_num = + match self.consensus.get_block_epoch_number(&tx_index.block_hash) { + None => return Ok(None), + Some(n) => n, + }; + + if epoch_num > self.consensus_graph().best_executed_state_epoch_number() + { + // The receipt is only visible to optimistic execution. + return Ok(None); + } + + let maybe_block = self + .consensus_graph() + .get_phantom_block_by_number( + EpochNumber::Number(epoch_num), + None, + false, /* include_traces */ + ) + .map_err(RpcError::invalid_params)?; + + let phantom_block = match maybe_block { + None => return Ok(None), + Some(b) => b, + }; + + let mut prior_log_index = 0; + + for (idx, tx) in phantom_block.transactions.iter().enumerate() { + if tx.hash() == tx_hash { + let receipt = self.construct_rpc_receipt( + &phantom_block, + idx, + &mut prior_log_index, + )?; + // A skipped transaction is not available to clients if accessed + // by its hash. + if receipt.status_code + == TransactionStatus::Skipped + .in_space(Space::Ethereum) + .into() + { + return Ok(None); + } + + return Ok(Some(receipt)); + } + + // if the if-branch was not entered, we do the bookeeping here + prior_log_index += phantom_block.receipts[idx].logs.len(); + } + + Ok(None) + } + + pub fn logs(&self, filter: EthRpcLogFilter) -> CoreResult> { + let filter: LogFilter = filter.into_primitive(self)?; + + let logs = self + .consensus_graph() + .logs(filter) + .map_err(|err| CoreError::from(err))?; + + // If the results does not fit into `max_limit`, report an error + if let Some(max_limit) = self.config.get_logs_filter_max_limit { + if logs.len() > max_limit { + bail!(invalid_params("filter", format!("This query results in too many logs, max limitation is {}, please use a smaller block range", max_limit))); + } + } + + Ok(logs + .iter() + .cloned() + .map(|l| Log::try_from_localized(l, self, false)) + .collect::>()?) + } + + pub fn max_priority_fee_per_gas(&self) -> CoreResult { + let evm_ratio = + self.tx_pool.machine().params().evm_transaction_block_ratio + as usize; + + let fee_history = self.fee_history( + HexU64::from(300), + BlockNumber::Latest, + Some(vec![50f64]), + )?; + + let total_reward: U256 = fee_history + .reward() + .iter() + .map(|x| x.first().unwrap()) + .fold(U256::zero(), |x, y| x + *y); + + Ok(total_reward * evm_ratio / 300) + } +} + +impl BlockProvider for &EthApi { + fn get_block_epoch_number(&self, hash: &H256) -> Option { + self.consensus_graph().get_block_epoch_number(hash) + } + + fn get_block_hashes_by_epoch( + &self, epoch_number: EpochNumber, + ) -> Result, String> { + self.consensus_graph() + .get_block_hashes_by_epoch(epoch_number) + } +} #[async_trait] impl EthApiServer for EthApi { /// Returns the protocol version encoded as a string. - async fn protocol_version(&self) -> RpcResult { todo!() } + async fn protocol_version(&self) -> RpcResult { Ok(U64::from(65)) } /// Returns an object with data about the sync status or false. - fn syncing(&self) -> RpcResult { todo!() } + fn syncing(&self) -> RpcResult { Ok(self.sync_status()) } /// Returns the client coinbase address. - async fn author(&self) -> RpcResult
{ todo!() } + async fn author(&self) -> RpcResult
{ Ok(H160::zero()) } /// Returns a list of addresses owned by client. - fn accounts(&self) -> RpcResult> { todo!() } + fn accounts(&self) -> RpcResult> { Ok(vec![]) } /// Returns the number of most recent block. - fn block_number(&self) -> RpcResult { todo!() } + fn block_number(&self) -> RpcResult { + self.latest_block_number().map_err(|err| err.into()) + } /// Returns the chain ID of the current network. - async fn chain_id(&self) -> RpcResult> { todo!() } + async fn chain_id(&self) -> RpcResult> { + Ok(Some(self.chain_id().into())) + } /// Returns information about a block by hash. async fn block_by_hash( &self, hash: H256, full: bool, ) -> RpcResult> { - todo!() + self.block_by_hash(hash, full).map_err(|err| err.into()) } /// Returns information about a block by number. async fn block_by_number( &self, number: BlockNumberOrTag, full: bool, ) -> RpcResult> { - todo!() + self.block_by_number(number, full).map_err(|err| err.into()) } /// Returns the number of transactions in a block from a block matching the @@ -53,7 +1052,8 @@ impl EthApiServer for EthApi { async fn block_transaction_count_by_hash( &self, hash: H256, ) -> RpcResult> { - todo!() + self.block_transaction_count_by_hash(hash) + .map_err(|err| err.into()) } /// Returns the number of transactions in a block matching the given block @@ -61,7 +1061,8 @@ impl EthApiServer for EthApi { async fn block_transaction_count_by_number( &self, number: BlockNumberOrTag, ) -> RpcResult> { - todo!() + self.block_transaction_count_by_number(number) + .map_err(|err| err.into()) } /// Returns the number of uncles in a block from a block matching the given @@ -69,35 +1070,41 @@ impl EthApiServer for EthApi { async fn block_uncles_count_by_hash( &self, hash: H256, ) -> RpcResult> { - todo!() + self.block_uncles_count_by_hash(hash) + .map_err(|err| err.into()) } /// Returns the number of uncles in a block with given block number. async fn block_uncles_count_by_number( &self, number: BlockNumberOrTag, ) -> RpcResult> { - todo!() + self.block_uncles_count_by_number(number) + .map_err(|err| err.into()) } /// Returns all transaction receipts for a given block. async fn block_receipts( &self, block_id: BlockId, ) -> RpcResult>> { - todo!() + self.get_block_receipts(block_id) + .map(|val| Some(val)) + .map_err(|e| e.into()) } /// Returns an uncle block of the given block and index. async fn uncle_by_block_hash_and_index( &self, hash: H256, index: Index, ) -> RpcResult> { - todo!() + let _ = (hash, index); + Ok(None) } /// Returns an uncle block of the given block and index. async fn uncle_by_block_number_and_index( &self, number: BlockNumberOrTag, index: Index, ) -> RpcResult> { - todo!() + let _ = (number, index); + Ok(None) } /// Returns the EIP-2718 encoded transaction if it exists. @@ -107,6 +1114,7 @@ impl EthApiServer for EthApi { async fn raw_transaction_by_hash( &self, hash: H256, ) -> RpcResult> { + let _ = hash; todo!() } @@ -115,7 +1123,7 @@ impl EthApiServer for EthApi { async fn transaction_by_hash( &self, hash: H256, ) -> RpcResult> { - todo!() + self.transaction_by_hash(hash).map_err(|err| err.into()) } /// Returns information about a raw transaction by block hash and @@ -123,6 +1131,7 @@ impl EthApiServer for EthApi { async fn raw_transaction_by_block_hash_and_index( &self, hash: H256, index: Index, ) -> RpcResult> { + let _ = (hash, index); todo!() } @@ -131,7 +1140,9 @@ impl EthApiServer for EthApi { async fn transaction_by_block_hash_and_index( &self, hash: H256, index: Index, ) -> RpcResult> { - todo!() + let phantom_block = self.phantom_block_by_hash(hash)?; + + Ok(EthApi::block_tx_by_index(phantom_block, index.value())) } /// Returns information about a raw transaction by block number and @@ -139,6 +1150,7 @@ impl EthApiServer for EthApi { async fn raw_transaction_by_block_number_and_index( &self, number: BlockNumberOrTag, index: Index, ) -> RpcResult> { + let _ = (number, index); todo!() } @@ -147,13 +1159,16 @@ impl EthApiServer for EthApi { async fn transaction_by_block_number_and_index( &self, number: BlockNumberOrTag, index: Index, ) -> RpcResult> { - todo!() + let phantom_block = self.phantom_block_by_number(number)?; + + Ok(EthApi::block_tx_by_index(phantom_block, index.value())) } /// Returns information about a transaction by sender and nonce. async fn transaction_by_sender_and_nonce( &self, address: Address, nonce: U64, ) -> RpcResult> { + let _ = (address, nonce); todo!() } @@ -161,14 +1176,15 @@ impl EthApiServer for EthApi { async fn transaction_receipt( &self, hash: H256, ) -> RpcResult> { - todo!() + self.transaction_receipt(hash).map_err(|err| err.into()) } /// Returns the balance of the account of given address. async fn balance( &self, address: Address, block_number: Option, ) -> RpcResult { - todo!() + self.user_balance(address, block_number) + .map_err(|err| err.into()) } /// Returns the value from a storage position at a given address @@ -176,7 +1192,8 @@ impl EthApiServer for EthApi { &self, address: Address, index: JsonStorageKey, block_number: Option, ) -> RpcResult { - todo!() + self.storage_at(address, index, block_number) + .map_err(|err| err.into()) } /// Returns the number of transactions sent from an address at given block @@ -184,25 +1201,29 @@ impl EthApiServer for EthApi { async fn transaction_count( &self, address: Address, block_number: Option, ) -> RpcResult { - todo!() + self.next_nonce(address, block_number) + .map_err(|err| err.into()) } /// Returns code at a given address at given block number. async fn get_code( &self, address: Address, block_number: Option, ) -> RpcResult { - todo!() + self.code_at(address, block_number) + .map_err(|err| err.into()) } /// Returns the block's header at given number. async fn header_by_number( &self, hash: BlockNumberOrTag, ) -> RpcResult> { + let _ = hash; todo!() } /// Returns the block's header at given hash. async fn header_by_hash(&self, hash: H256) -> RpcResult> { + let _ = hash; todo!() } @@ -224,7 +1245,10 @@ impl EthApiServer for EthApi { // state_overrides: Option, // block_overrides: Option>, ) -> RpcResult { - todo!() + let (execution, _estimation) = + self.exec_transaction(request, block_number)?; + + Ok(execution.output.into()) } /// Simulate arbitrary number of transactions at an arbitrary blockchain @@ -266,11 +1290,14 @@ impl EthApiServer for EthApi { block_number: Option, // state_override: Option, ) -> RpcResult { - todo!() + let (_, estimated_gas) = + self.exec_transaction(request, block_number)?; + + Ok(estimated_gas) } /// Returns the current price per gas in wei. - async fn gas_price(&self) -> RpcResult { todo!() } + async fn gas_price(&self) -> RpcResult { Ok(self.gas_price()) } /// Returns the account details by specifying an address and a block /// number/tag @@ -282,7 +1309,9 @@ impl EthApiServer for EthApi { /// Introduced in EIP-1559, returns suggestion for the priority for dynamic /// fee transactions. - async fn max_priority_fee_per_gas(&self) -> RpcResult { todo!() } + async fn max_priority_fee_per_gas(&self) -> RpcResult { + self.max_priority_fee_per_gas().map_err(|err| err.into()) + } /// Introduced in EIP-4844, returns the current blob base fee in wei. // async fn blob_base_fee(&self) -> RpcResult; @@ -300,14 +1329,19 @@ impl EthApiServer for EthApi { &self, block_count: U64, newest_block: BlockNumberOrTag, reward_percentiles: Option>, ) -> RpcResult { - todo!() + self.fee_history( + block_count.as_u64().into(), + newest_block, + reward_percentiles, + ) + .map_err(|err| err.into()) } /// Returns whether the client is actively mining new blocks. - async fn is_mining(&self) -> RpcResult { todo!() } + async fn is_mining(&self) -> RpcResult { Ok(false) } /// Returns the number of hashes per second that the node is mining with. - async fn hashrate(&self) -> RpcResult { todo!() } + async fn hashrate(&self) -> RpcResult { Ok(U256::zero()) } /// Returns the hash of the current block, the seedHash, and the boundary /// condition to be met (“target”) @@ -322,14 +1356,16 @@ impl EthApiServer for EthApi { async fn submit_hashrate( &self, hashrate: U256, id: H256, ) -> RpcResult { - todo!() + let _ = (hashrate, id); + Ok(false) } /// Used for submitting a proof-of-work solution. async fn submit_work( &self, nonce: H64, pow_hash: H256, mix_digest: H256, ) -> RpcResult { - todo!() + let _ = (nonce, pow_hash, mix_digest); + Ok(false) } /// Sends transaction; will block waiting for signer to return the @@ -337,18 +1373,39 @@ impl EthApiServer for EthApi { async fn send_transaction( &self, request: TransactionRequest, ) -> RpcResult { + let _ = request; todo!() } /// Sends signed transaction, returning its hash. async fn send_raw_transaction(&self, bytes: Bytes) -> RpcResult { - todo!() + let tx = if let Ok(tx) = + TransactionWithSignature::from_raw(&bytes.into_vec()) + { + tx + } else { + bail!(EthApiError::FailedToDecodeSignedTransaction) + }; + + if tx.space() != Space::Ethereum { + bail!(EthApiError::Other( + "Incorrect transaction space".to_string() + )); + } + + if tx.recover_public().is_err() { + bail!(EthApiError::InvalidTransactionSignature); + } + + let r = self.send_transaction_with_signature(tx)?; + Ok(r) } /// Returns an Ethereum specific signature with: /// sign(keccak256("\x19Ethereum Signed Message:\n" /// + len(message) + message))). async fn sign(&self, address: Address, message: Bytes) -> RpcResult { + let _ = (address, message); todo!() } @@ -357,6 +1414,7 @@ impl EthApiServer for EthApi { async fn sign_transaction( &self, transaction: TransactionRequest, ) -> RpcResult { + let _ = transaction; todo!() } } diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index e6bd68973..b7e63979d 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -6,6 +6,7 @@ mod rpc; mod web3; pub use debug::DebugApi; +pub use eth::EthApi; pub use net::NetApi; pub use rpc::RPCApi; pub use web3::Web3Api; From 628c6d475cc69a4c6bed9c050f6a39140070d151 Mon Sep 17 00:00:00 2001 From: Pana Date: Tue, 24 Sep 2024 16:29:22 +0800 Subject: [PATCH 05/31] add three fake rpc trait impl: trace, filter, pubsub --- crates/rpc/rpc/src/filter.rs | 49 ++++++++++++++++++++++++++++++++++++ crates/rpc/rpc/src/lib.rs | 6 +++++ crates/rpc/rpc/src/pubsub.rs | 20 +++++++++++++++ crates/rpc/rpc/src/trace.rs | 34 +++++++++++++++++++++++++ 4 files changed, 109 insertions(+) create mode 100644 crates/rpc/rpc/src/filter.rs create mode 100644 crates/rpc/rpc/src/pubsub.rs create mode 100644 crates/rpc/rpc/src/trace.rs diff --git a/crates/rpc/rpc/src/filter.rs b/crates/rpc/rpc/src/filter.rs new file mode 100644 index 000000000..5ad922dbd --- /dev/null +++ b/crates/rpc/rpc/src/filter.rs @@ -0,0 +1,49 @@ +use cfx_rpc_eth_api::EthFilterApiServer; +use cfx_rpc_eth_types::{EthRpcLogFilter as Filter, FilterChanges, Log}; +use cfx_types::H128 as FilterId; +use jsonrpsee::core::RpcResult; + +type PendingTransactionFilterKind = (); + +pub struct EthFilterApi; + +impl EthFilterApi { + pub fn new() -> EthFilterApi { EthFilterApi } +} + +#[async_trait::async_trait] +impl EthFilterApiServer for EthFilterApi { + async fn new_filter(&self, filter: Filter) -> RpcResult { + let _ = filter; + todo!() + } + + async fn new_block_filter(&self) -> RpcResult { todo!() } + + async fn new_pending_transaction_filter( + &self, kind: Option, + ) -> RpcResult { + let _ = kind; + todo!() + } + + async fn filter_changes(&self, id: FilterId) -> RpcResult { + let _ = id; + todo!() + } + + async fn filter_logs(&self, id: FilterId) -> RpcResult> { + let _ = id; + todo!() + } + + async fn uninstall_filter(&self, id: FilterId) -> RpcResult { + let _ = id; + todo!() + } + + async fn logs(&self, filter: Filter) -> RpcResult> { + let _ = filter; + todo!() + } +} diff --git a/crates/rpc/rpc/src/lib.rs b/crates/rpc/rpc/src/lib.rs index b7e63979d..3ebd9e1da 100644 --- a/crates/rpc/rpc/src/lib.rs +++ b/crates/rpc/rpc/src/lib.rs @@ -1,12 +1,18 @@ mod debug; mod eth; +mod filter; pub mod helpers; mod net; +mod pubsub; mod rpc; +mod trace; mod web3; pub use debug::DebugApi; pub use eth::EthApi; +pub use filter::EthFilterApi; pub use net::NetApi; +pub use pubsub::PubSubApi; pub use rpc::RPCApi; +pub use trace::TraceApi; pub use web3::Web3Api; diff --git a/crates/rpc/rpc/src/pubsub.rs b/crates/rpc/rpc/src/pubsub.rs new file mode 100644 index 000000000..346bb55e2 --- /dev/null +++ b/crates/rpc/rpc/src/pubsub.rs @@ -0,0 +1,20 @@ +use cfx_rpc_eth_api::EthPubSubApiServer; +use cfx_rpc_eth_types::eth_pubsub::{Kind as SubscriptionKind, Params}; +use jsonrpsee::{core::SubscriptionResult, PendingSubscriptionSink}; + +pub struct PubSubApi; + +impl PubSubApi { + pub fn new() -> PubSubApi { PubSubApi } +} + +#[async_trait::async_trait] +impl EthPubSubApiServer for PubSubApi { + async fn subscribe( + &self, pending: PendingSubscriptionSink, kind: SubscriptionKind, + params: Option, + ) -> SubscriptionResult { + let _ = (pending, kind, params); + todo!() + } +} diff --git a/crates/rpc/rpc/src/trace.rs b/crates/rpc/rpc/src/trace.rs new file mode 100644 index 000000000..dfced0764 --- /dev/null +++ b/crates/rpc/rpc/src/trace.rs @@ -0,0 +1,34 @@ +use cfx_rpc_eth_api::TraceApiServer; +use cfx_rpc_eth_types::{BlockNumber, LocalizedTrace, TraceFilter}; +use cfx_types::H256; +use jsonrpsee::core::RpcResult; + +pub struct TraceApi; + +impl TraceApi { + pub fn new() -> TraceApi { TraceApi } +} + +#[async_trait::async_trait] +impl TraceApiServer for TraceApi { + fn block_traces( + &self, block_number: BlockNumber, + ) -> RpcResult>> { + let _ = block_number; + todo!() + } + + fn filter_traces( + &self, filter: TraceFilter, + ) -> RpcResult>> { + let _ = filter; + todo!() + } + + fn transaction_traces( + &self, tx_hash: H256, + ) -> RpcResult>> { + let _ = tx_hash; + todo!() + } +} From 35685d33cf8894c5bff96f20b65fc78f24d6c9a4 Mon Sep 17 00:00:00 2001 From: Pana Date: Tue, 29 Oct 2024 14:38:04 +0800 Subject: [PATCH 06/31] return not implemented error for eth not implement methods --- Cargo.lock | 2 +- crates/rpc/rpc/src/eth.rs | 51 ++++++++++++++++++--------------------- 2 files changed, 24 insertions(+), 29 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 49558eceb..0b90bfdaf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1333,7 +1333,7 @@ dependencies = [ "clap", "error-chain", "geth-tracer", - "jsonrpc-core", + "jsonrpc-core 15.1.0", "jsonrpsee", "keccak-hash", "parity-version", diff --git a/crates/rpc/rpc/src/eth.rs b/crates/rpc/rpc/src/eth.rs index 2e9b0a1ad..93b291bf4 100644 --- a/crates/rpc/rpc/src/eth.rs +++ b/crates/rpc/rpc/src/eth.rs @@ -15,7 +15,10 @@ use cfx_rpc_eth_types::{ Receipt, SyncInfo, SyncStatus, Transaction, TransactionRequest, }; use cfx_rpc_primitives::{Bytes, Index, U64 as HexU64}; -use cfx_rpc_utils::error::{errors::*, jsonrpc_error_helpers::*}; +use cfx_rpc_utils::error::{ + errors::*, jsonrpc_error_helpers::*, + jsonrpsee_error_helpers::internal_error as jsonrpsee_internal_error, +}; use cfx_statedb::StateDbExt; use cfx_types::{ Address, AddressSpaceUtil, BigEndianHash, Space, H160, H256, H64, U256, U64, @@ -76,29 +79,21 @@ impl EthApi { pub fn fetch_block_by_height( &self, height: u64, ) -> Result { - let maybe_block = self.consensus_graph().get_phantom_block_by_number( - EpochNumber::Number(height), - None, - false, - )?; - if let Some(block) = maybe_block { - Ok(block) - } else { - Err("Specified block header does not exist".into()) - } + self.consensus_graph() + .get_phantom_block_by_number( + EpochNumber::Number(height), + None, + false, + )? + .ok_or("Specified block header does not exist".to_string()) } pub fn fetch_block_by_hash( &self, hash: &H256, ) -> Result { - let maybe_block = self - .consensus_graph() - .get_phantom_block_by_hash(hash, false)?; - if let Some(block) = maybe_block { - Ok(block) - } else { - Err("Specified block header does not exist".into()) - } + self.consensus_graph() + .get_phantom_block_by_hash(hash, false)? + .ok_or("Specified block header does not exist".into()) } pub fn exec_transaction( @@ -1115,7 +1110,7 @@ impl EthApiServer for EthApi { &self, hash: H256, ) -> RpcResult> { let _ = hash; - todo!() + Err(jsonrpsee_internal_error("Not implemented")) } /// Returns the information about a transaction requested by transaction @@ -1132,7 +1127,7 @@ impl EthApiServer for EthApi { &self, hash: H256, index: Index, ) -> RpcResult> { let _ = (hash, index); - todo!() + Err(jsonrpsee_internal_error("Not implemented")) } /// Returns information about a transaction by block hash and transaction @@ -1151,7 +1146,7 @@ impl EthApiServer for EthApi { &self, number: BlockNumberOrTag, index: Index, ) -> RpcResult> { let _ = (number, index); - todo!() + Err(jsonrpsee_internal_error("Not implemented")) } /// Returns information about a transaction by block number and transaction @@ -1169,7 +1164,7 @@ impl EthApiServer for EthApi { &self, address: Address, nonce: U64, ) -> RpcResult> { let _ = (address, nonce); - todo!() + Err(jsonrpsee_internal_error("Not implemented")) } /// Returns the receipt of a transaction by transaction hash. @@ -1218,13 +1213,13 @@ impl EthApiServer for EthApi { &self, hash: BlockNumberOrTag, ) -> RpcResult> { let _ = hash; - todo!() + Err(jsonrpsee_internal_error("Not implemented")) } /// Returns the block's header at given hash. async fn header_by_hash(&self, hash: H256) -> RpcResult> { let _ = hash; - todo!() + Err(jsonrpsee_internal_error("Not implemented")) } /// `eth_simulateV1` executes an arbitrary number of transactions on top of @@ -1374,7 +1369,7 @@ impl EthApiServer for EthApi { &self, request: TransactionRequest, ) -> RpcResult { let _ = request; - todo!() + Err(jsonrpsee_internal_error("Not implemented")) } /// Sends signed transaction, returning its hash. @@ -1406,7 +1401,7 @@ impl EthApiServer for EthApi { /// + len(message) + message))). async fn sign(&self, address: Address, message: Bytes) -> RpcResult { let _ = (address, message); - todo!() + Err(jsonrpsee_internal_error("Not implemented")) } /// Signs a transaction that can be submitted to the network at a later time @@ -1415,6 +1410,6 @@ impl EthApiServer for EthApi { &self, transaction: TransactionRequest, ) -> RpcResult { let _ = transaction; - todo!() + Err(jsonrpsee_internal_error("Not implemented")) } } From cf41cce9d4d3522edf7e2b8a7d964f52fbcdad3e Mon Sep 17 00:00:00 2001 From: Chenxing Li Date: Tue, 29 Oct 2024 18:54:19 +0800 Subject: [PATCH 07/31] Remove StateDb checkpoint --- crates/dbs/statedb/src/lib.rs | 204 +++++++------------------------- crates/dbs/statedb/src/tests.rs | 116 ------------------ 2 files changed, 45 insertions(+), 275 deletions(-) diff --git a/crates/dbs/statedb/src/lib.rs b/crates/dbs/statedb/src/lib.rs index e974a14cb..eb497bc0c 100644 --- a/crates/dbs/statedb/src/lib.rs +++ b/crates/dbs/statedb/src/lib.rs @@ -19,7 +19,7 @@ mod tests; pub use self::{ error::{Error, ErrorKind, Result}, - impls::{StateDb as StateDbGeneric, StateDbCheckpointMethods}, + impls::StateDb as StateDbGeneric, statedb_ext::StateDbExt, }; pub use cfx_storage::utils::access_mode; @@ -35,10 +35,6 @@ mod impls { // see `delete_all` type AccessedEntries = BTreeMap; - // A checkpoint contains the previous values for all keys - // modified or deleted since the last checkpoint. - type Checkpoint = BTreeMap>; - // Use generic type for better test-ability. pub struct StateDb { /// Contains the original storage key values for all loaded and @@ -48,25 +44,6 @@ mod impls { /// The underlying storage, The storage is updated only upon fn /// commit(). storage: Box, - - /// Checkpoints allow callers to revert un-committed changes. - checkpoints: Vec, - } - - // Note: Not used currently. - pub trait StateDbCheckpointMethods { - /// Create a new checkpoint. Returns the index of the checkpoint. - fn checkpoint(&mut self) -> usize; - - /// Discard checkpoint. - /// This means giving up the ability to revert to the latest checkpoint. - /// Older checkpoints remain valid. - fn discard_checkpoint(&mut self); - - /// Revert to checkpoint. - /// Revert all values in `accessed_entries` to their value before - /// creating the latest checkpoint. - fn revert_to_checkpoint(&mut self); } impl StateDb { @@ -74,7 +51,6 @@ mod impls { StateDb { accessed_entries: Default::default(), storage, - checkpoints: Default::default(), } } @@ -94,14 +70,6 @@ mod impls { )) } - /// Set `key` to `value` in latest checkpoint if not set previously. - fn update_checkpoint(&mut self, key: &Key, value: Option) { - if let Some(checkpoint) = self.checkpoints.last_mut() { - // only insert if key not in checkpoint already - checkpoint.entry(key.clone()).or_insert(value); - } - } - #[cfg(test)] pub fn get_from_cache(&self, key: &Vec) -> Value { self.accessed_entries @@ -156,7 +124,7 @@ mod impls { self.accessed_entries.get_mut().entry(key_bytes.clone()); let value = value.map(Into::into); - let old_value = match &mut entry { + match &mut entry { Occupied(o) => { // set `current_value` to `value` and keep the old value Some(std::mem::replace( @@ -166,7 +134,7 @@ mod impls { } // Vacant - _ => { + &mut Vacant(_) => { let original_value = self.storage.get(key)?.map(Into::into); entry.or_insert(EntryValue::new_modified( @@ -178,9 +146,6 @@ mod impls { } }; - // store old value in latest checkpoint if not stored yet - self.update_checkpoint(&key_bytes, old_value); - Ok(()) } @@ -285,14 +250,6 @@ mod impls { } } - // update latest checkpoint if necessary - if !AM::READ_ONLY { - for (k, v) in &deleted_kvs { - let v: Value = Some(v.clone().into()); - self.update_checkpoint(k, Some(v)); - } - } - Ok(deleted_kvs) } @@ -312,45 +269,46 @@ mod impls { storage: &dyn StorageStateTrait, accessed_entries: &AccessedEntries, ) -> Result<()> { - if !storage_layouts_to_rewrite + if storage_layouts_to_rewrite .contains_key(&(address.to_vec(), space)) { - let storage_layout_key = - StorageKey::StorageRootKey(address).with_space(space); - let current_storage_layout = match accessed_entries - .get(&storage_layout_key.to_key_bytes()) - { - Some(entry) => match &entry.current_value { - // We don't rewrite storage layout for account to - // delete. - None => { - if accept_account_deletion { - return Ok(()); - } else { - // This is defensive checking, against certain - // cases when we are not deleting the account - // for sure. - bail!(ErrorKind::IncompleteDatabase( - Address::from_slice(address) - )); - } - } - Some(value_ref) => { - StorageLayout::from_bytes(&*value_ref)? - } - }, - None => match storage.get(storage_layout_key)? { - // A new account must set StorageLayout before accessing - // the storage. - None => bail!(ErrorKind::IncompleteDatabase( - Address::from_slice(address) - )), - Some(raw) => StorageLayout::from_bytes(raw.as_ref())?, - }, - }; - storage_layouts_to_rewrite - .insert((address.into(), space), current_storage_layout); + return Ok(()); } + let storage_layout_key = + StorageKey::StorageRootKey(address).with_space(space); + let current_storage_layout = match accessed_entries + .get(&storage_layout_key.to_key_bytes()) + { + Some(entry) => match &entry.current_value { + // We don't rewrite storage layout for account to + // delete. + None => { + if accept_account_deletion { + return Ok(()); + } else { + // This is defensive checking, against certain + // cases when we are not deleting the account + // for sure. + bail!(ErrorKind::IncompleteDatabase( + Address::from_slice(address) + )); + } + } + + Some(value_ref) => StorageLayout::from_bytes(&*value_ref)?, + }, + None => match storage.get(storage_layout_key)? { + // A new account must set StorageLayout before accessing + // the storage. + None => bail!(ErrorKind::IncompleteDatabase( + Address::from_slice(address) + )), + Some(raw) => StorageLayout::from_bytes(raw.as_ref())?, + }, + }; + storage_layouts_to_rewrite + .insert((address.into(), space), current_storage_layout); + Ok(()) } @@ -458,7 +416,7 @@ mod impls { } // Set storage layout for contracts with storage modification or // contracts with storage_layout initialization or modification. - for ((k, space), v) in &mut storage_layouts_to_rewrite { + for ((k, space), v) in &storage_layouts_to_rewrite { self.commit_storage_layout( k, *space, @@ -487,9 +445,6 @@ mod impls { mut debug_record: Option<&mut ComputeEpochDebugRecord>, ) -> Result { self.apply_changes_to_storage(debug_record.as_deref_mut())?; - if !self.checkpoints.is_empty() { - panic!("Active checkpoints during state-db commit"); - } let result = match self.storage.get_state_root() { Ok(r) => r, @@ -502,78 +457,6 @@ mod impls { } } - impl StateDbCheckpointMethods for StateDb { - fn checkpoint(&mut self) -> usize { - trace!("Creating checkpoint #{}", self.checkpoints.len()); - self.checkpoints.push(BTreeMap::new()); // no values are modified yet - self.checkpoints.len() - 1 - } - - fn discard_checkpoint(&mut self) { - // checkpoint `n` (to be discarded) - let latest = match self.checkpoints.pop() { - Some(checkpoint) => checkpoint, - None => { - // TODO: panic? - warn!("Attempt to discard non-existent checkpoint"); - return; - } - }; - - trace!("Discarding checkpoint #{}", self.checkpoints.len()); - - // checkpoint `n - 1` - let previous = match self.checkpoints.last_mut() { - Some(checkpoint) => checkpoint, - None => return, - }; - - // insert all keys that have been updated in `n` but not in `n - 1` - if previous.is_empty() { - *previous = latest; - } else { - for (k, v) in latest { - previous.entry(k).or_insert(v); - } - } - } - - fn revert_to_checkpoint(&mut self) { - let checkpoint = match self.checkpoints.pop() { - Some(checkpoint) => checkpoint, - None => { - // TODO: panic? - warn!("Attempt to revert to non-existent checkpoint"); - return; - } - }; - - trace!("Reverting to checkpoint #{}", self.checkpoints.len()); - - // revert all modified keys to their old version - for (k, v) in checkpoint { - let entry = self.accessed_entries.get_mut().entry(k); - - match (entry, v) { - // prior to the checkpoint `k` was not present - (Occupied(o), None) => { - o.remove(); - } - // the value under `k` has been modified after checkpoint - (Occupied(mut o), Some(original_value)) => { - o.get_mut().current_value = original_value; - } - (_, _) => { - // keys are not removed from `accessed_entries` other - // than during revert and commit, so this should not - // happen - panic!("Enountered non-existent key while reverting to checkpoint"); - } - } - } - } - } - struct EntryValue { original_value: Value, current_value: Value, @@ -618,7 +501,10 @@ mod impls { EpochId, SkipInputCheck, StorageKey, StorageKeyWithSpace, StorageLayout, }; use std::{ - collections::{btree_map::Entry::Occupied, BTreeMap}, + collections::{ + btree_map::Entry::{Occupied, Vacant}, + BTreeMap, + }, ops::Bound::{Excluded, Included, Unbounded}, sync::Arc, }; diff --git a/crates/dbs/statedb/src/tests.rs b/crates/dbs/statedb/src/tests.rs index e90bbe9d7..4cd1d44df 100644 --- a/crates/dbs/statedb/src/tests.rs +++ b/crates/dbs/statedb/src/tests.rs @@ -213,119 +213,3 @@ fn test_basic() { // // we need to write all values modified or removed // assert_eq!(storage.get_num_writes(), 4); } - -#[test] -fn test_checkpoint() { - use super::StateDbCheckpointMethods; - - let mut state_db = init_state_db(); - - // (11, v0) --> (11, v1) - state_db - .set_raw(storage_key(b"11"), value(b"v1"), None) - .unwrap(); - - // create checkpoint #0 - state_db.checkpoint(); - - // delete (22, v0) - state_db.delete(storage_key(b"22"), None).unwrap(); - - // create checkpoint #1 - state_db.checkpoint(); - - // delete (00, v0) and (01, v0) - state_db - .delete_all::(storage_key(b"0"), None) - .unwrap(); - - // discard checkpoint #1 - state_db.discard_checkpoint(); - - // create (33, v0) - state_db - .set_raw(storage_key(b"33"), value(b"v0"), None) - .unwrap(); - - // revert to checkpoint #0 --> undo deletes - state_db.revert_to_checkpoint(); - - state_db.commit(MERKLE_NULL_NODE, None).unwrap(); - // let storage = (state_db.get_storage_mut() as &dyn - // Any).downcast_ref::().unwrap(); let contents = - // &storage.contents; - // - // // only the initial `set` was committed - // let expected: HashMap<_, _> = [ - // (key(b"00"), value(b"v0")), - // (key(b"01"), value(b"v0")), - // (key(b"11"), value(b"v1")), - // (key(b"22"), value(b"v0")), - // ] - // .iter() - // .cloned() - // .collect(); - // - // assert_eq!(*contents, expected); - // - // // we need to read all values touched - // assert_eq!(storage.get_num_reads(), 5); - // - // // we need to write all values modified or removed - // assert_eq!(storage.get_num_writes(), 1); -} - -#[test] -fn test_checkpoint_evict_memory() { - use super::StateDbCheckpointMethods; - - let mut state_db = init_state_db(); - - // value is not read yet - assert_eq!(state_db.get_from_cache(&key(b"00")), None); - - // create checkpoint #0 - state_db.checkpoint(); - - // (00, v0) --> (00, v1) [new value] - state_db - .set_raw(storage_key(b"00"), value(b"v1"), None) - .unwrap(); - - // value has been read - assert_eq!( - state_db.get_from_cache(&key(b"00")), - Some(value(b"v1").into()) - ); - - // create checkpoint #1 - state_db.checkpoint(); - - // (00, v1) --> (00, v0) [back to original value] - state_db - .set_raw(storage_key(b"00"), value(b"v0"), None) - .unwrap(); - - // value stays in state-db - assert_eq!( - state_db.get_from_cache(&key(b"00")), - Some(value(b"v0").into()) - ); - - // revert to checkpoint #1 - // (00, v0) --> (00, v1) - state_db.revert_to_checkpoint(); - - // value stays in state-db - assert_eq!( - state_db.get_from_cache(&key(b"00")), - Some(value(b"v1").into()) - ); - - // revert to checkpoint #0 - // (00, v0) --> None - state_db.revert_to_checkpoint(); - - // value is removed from state-db - assert_eq!(state_db.get_from_cache(&key(b"00")), None); -} From 42d9345be7144a054b1ddc8adbc62ccd6f094173 Mon Sep 17 00:00:00 2001 From: Pana Date: Sat, 2 Nov 2024 15:56:58 +0800 Subject: [PATCH 08/31] move parity crate dependencies to workspace --- Cargo.toml | 20 +++++++++++++++++-- bins/cfx_key/Cargo.toml | 4 ++-- bins/cfx_store/Cargo.toml | 4 ++-- bins/conflux/Cargo.toml | 8 ++++---- crates/cfx_key/Cargo.toml | 8 ++++---- crates/cfx_store/Cargo.toml | 6 +++--- crates/cfx_types/Cargo.toml | 2 +- crates/cfxcore/core/Cargo.toml | 13 +++++------- .../cfxcore/core/benchmark/storage/Cargo.toml | 4 ++-- .../core/src/pos/storage/schemadb/Cargo.toml | 5 +---- crates/cfxcore/executor/Cargo.toml | 6 +++--- crates/client/Cargo.toml | 4 ++-- crates/dbs/kvdb-rocksdb/Cargo.toml | 10 ++-------- crates/dbs/storage/Cargo.toml | 5 +---- crates/network/Cargo.toml | 2 +- crates/primitives/Cargo.toml | 2 +- 16 files changed, 52 insertions(+), 51 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c367b070e..9bb2675e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -159,8 +159,6 @@ cfx-rpc-utils = { path = "./crates/rpc/rpc-utils" } serde = { version = "1.0", features = ["derive", "alloc"] } serde_json = "1.0" serde_derive = "1.0" -rlp = "0.4.0" -rlp_derive = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } hex = "0.4.3" rustc-hex = "2.1" @@ -212,3 +210,21 @@ itertools = "0.10.0" once_cell = "1.17.1" chrono = "=0.4.38" byteorder = "1.2.7" + +# conflux forked crates +rocksdb = { git = "https://github.com/Conflux-Chain/rust-rocksdb.git", rev = "3773afe5b953997188f37c39308105b5deb0faac" } + +# parity crates +rlp = "0.4.0" +rlp_derive = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } +panic_hook = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } +dir = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } +unexpected = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } +ethereum-types = "0.9" +parity-wordlist = "1.3.0" +parity-crypto = "0.9.0" +parity-path = "0.1" +parity-util-mem = { version = "0.5", default-features = false } +parity-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1.git" } +ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git", rev="b523017108bb2d571a7a69bd97bc406e63bc7a9d" } +substrate-bn = { git = "https://github.com/paritytech/bn", rev="63f8c587356a67b33c7396af98e065b66fca5dda", default-features = false } diff --git a/bins/cfx_key/Cargo.toml b/bins/cfx_key/Cargo.toml index a0628e896..0d77ad2d5 100644 --- a/bins/cfx_key/Cargo.toml +++ b/bins/cfx_key/Cargo.toml @@ -8,8 +8,8 @@ authors = ["Parity Technologies "] docopt = "1.0" env_logger = "0.5" cfxkey = { workspace = true } -panic_hook = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } -parity-wordlist="1.2" +panic_hook = { workspace = true } +parity-wordlist={ workspace = true } rustc-hex = "2.1" serde = "1.0" serde_derive = "1.0" diff --git a/bins/cfx_store/Cargo.toml b/bins/cfx_store/Cargo.toml index e98609fee..6167b71ed 100644 --- a/bins/cfx_store/Cargo.toml +++ b/bins/cfx_store/Cargo.toml @@ -13,8 +13,8 @@ serde = "1.0" serde_derive = "1.0" parking_lot = { workspace = true } cfxstore = { workspace = true } -panic_hook = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } -dir = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } +panic_hook = { workspace = true } +dir = { workspace = true } [[bin]] name = "cfxstore" diff --git a/bins/conflux/Cargo.toml b/bins/conflux/Cargo.toml index 303528d1b..4f28dcbd0 100644 --- a/bins/conflux/Cargo.toml +++ b/bins/conflux/Cargo.toml @@ -15,15 +15,15 @@ serde = "1.0" serde_json = "1.0" serde_derive = "1.0" parking_lot = { workspace = true } -panic_hook = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } +panic_hook = { workspace = true } app_dirs = "1.2.1" -dir = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } +dir = { workspace = true } cfxstore = { workspace = true } cfxcore-accounts = { workspace = true } home = "0.5.0" rpassword = "4.0.1" io = { workspace = true } -ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git", rev="b523017108bb2d571a7a69bd97bc406e63bc7a9d" } +ctrlc = { workspace = true } jsonrpc-core = "18.0.0" jsonrpc-tcp-server = "18.0.0" jsonrpc-http-server = "18.0.0" @@ -47,7 +47,7 @@ client = { workspace = true } cfx-types = { workspace = true } docopt = "1.0" cfxkey = { workspace = true } -parity-wordlist = "1.3.0" +parity-wordlist = { workspace = true } rustc-hex = "2.1" env_logger = "0.5" malloc_size_of = { workspace = true } diff --git a/crates/cfx_key/Cargo.toml b/crates/cfx_key/Cargo.toml index 06f943b20..ac694c29b 100644 --- a/crates/cfx_key/Cargo.toml +++ b/crates/cfx_key/Cargo.toml @@ -7,11 +7,11 @@ authors = ["Conflux Foundation"] [dependencies] cfx-types = { workspace = true } edit-distance = "2.0" -parity-crypto = "0.9.0" -parity-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1.git" } +parity-crypto = { workspace = true } +parity-secp256k1 = { workspace = true } lazy_static = "1.4" log = "0.4" -parity-wordlist = "1.3" +parity-wordlist = { workspace = true } quick-error = "1.2.2" rand = "0.7" rustc-hex = "2.1" @@ -23,6 +23,6 @@ malloc_size_of_derive = { workspace = true } malloc_size_of = { workspace = true } docopt = "1.0" env_logger = "0.5" -panic_hook = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } +panic_hook = { workspace = true } threadpool = "1.7" diff --git a/crates/cfx_store/Cargo.toml b/crates/cfx_store/Cargo.toml index 3f6e24a87..6f770c3ce 100644 --- a/crates/cfx_store/Cargo.toml +++ b/crates/cfx_store/Cargo.toml @@ -17,10 +17,10 @@ rustc-hex = "2.1" tiny-keccak = "1.4" time = "0.1.34" parking_lot = { workspace = true } -parity-crypto = { version = "0.9.0", features = ["publickey"] } -dir = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } +parity-crypto = { workspace = true, features = ["publickey"] } +dir = { workspace = true } smallvec = "1.4" -parity-wordlist = "1.0" +parity-wordlist = { workspace = true } tempdir = "0.3" [dev-dependencies] diff --git a/crates/cfx_types/Cargo.toml b/crates/cfx_types/Cargo.toml index 7140cffeb..bc107f965 100644 --- a/crates/cfx_types/Cargo.toml +++ b/crates/cfx_types/Cargo.toml @@ -5,7 +5,7 @@ license = "GPL-3.0" description = "Conflux types" [dependencies] -ethereum-types = "0.9" +ethereum-types = { workspace = true } serde_derive = { workspace = true } serde = { workspace = true } rlp = { workspace = true } diff --git a/crates/cfxcore/core/Cargo.toml b/crates/cfxcore/core/Cargo.toml index f6c61c5f4..b67033d90 100644 --- a/crates/cfxcore/core/Cargo.toml +++ b/crates/cfxcore/core/Cargo.toml @@ -3,12 +3,12 @@ description = "Conflux core library" homepage = "https://www.confluxnetwork.org" license = "GPL-3.0" name = "cfxcore" -version = { workspace = true} +version = { workspace = true } edition = "2021" [dependencies] bit-set = "0.4" -substrate-bn = { git = "https://github.com/paritytech/bn", default-features = false, rev="63f8c587356a67b33c7396af98e065b66fca5dda" } +substrate-bn = { workspace = true, default-features = false } byteorder = { workspace = true } cfxkey = { workspace = true } cfx-addr = { workspace = true } @@ -57,7 +57,7 @@ metrics = { workspace = true } network = { workspace = true } num = "0.2" num-traits = { version = "0.2.8", default-features = false } -parity-crypto = "0.9.0" +parity-crypto = { workspace = true } parking_lot = { workspace = true } primal = "0.2.3" primitives = { workspace = true } @@ -92,7 +92,7 @@ toml = "0.5.8" tokio = { version = "1.6", features = ["full"] } tokio-timer = "0.2.13" tokio-stream = "0.1.4" -unexpected = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } +unexpected = { workspace = true } strum = { workspace = true } strum_macros = { workspace = true } smart-default = "0.6.0" @@ -143,6 +143,7 @@ cfx-rpc-cfx-types = { workspace = true } cfx-rpc-eth-types = { workspace = true } jsonrpsee = { workspace = true, features = ["jsonrpsee-types"] } cfx-rpc-utils = { workspace = true } +parity-util-mem = { workspace = true, default-features = false } [dev-dependencies] @@ -154,10 +155,6 @@ proptest-derive = "0.3.0" consensus-types = { path = "src/pos/consensus/consensus-types", features = ["fuzzing"] } #tokio = { version = "0.2.11", features = ["time"] } -[dependencies.parity-util-mem] -version = "0.5" -default-features = false - [features] default = [] # Unfortunately cfg test attributes doesn't work across crates, diff --git a/crates/cfxcore/core/benchmark/storage/Cargo.toml b/crates/cfxcore/core/benchmark/storage/Cargo.toml index 2ecff9c0e..28b1b3241 100644 --- a/crates/cfxcore/core/benchmark/storage/Cargo.toml +++ b/crates/cfxcore/core/benchmark/storage/Cargo.toml @@ -22,13 +22,13 @@ ethcore = { package = "ethcore", git = "https://github.com/paritytech/parity-eth ethkey = { package = "ethkey", git = "https://github.com/paritytech/parity-ethereum", tag = "v2.4.0" } ethcore_types = { package = "common-types", git = "https://github.com/paritytech/parity-ethereum", tag = "v2.4.0" } ethjson = { package = "ethjson", git = "https://github.com/paritytech/parity-ethereum", tag = "v2.4.0" } -ethereum-types = "0.4" +ethereum-types = { workspace = true } heapsize = "0.4" kvdb = "0.4" lazy_static = "1.4" log = "0.4" parking_lot = { workspace = true } -rlp = { version = "0.3.0", feature = ["ethereum"] } +rlp = { workspace = true, feature = ["ethereum"] } serde_json = "1.0" base64ct = "=1.1.1" bevy = "0.11.3" diff --git a/crates/cfxcore/core/src/pos/storage/schemadb/Cargo.toml b/crates/cfxcore/core/src/pos/storage/schemadb/Cargo.toml index ebe14b34f..787c40309 100644 --- a/crates/cfxcore/core/src/pos/storage/schemadb/Cargo.toml +++ b/crates/cfxcore/core/src/pos/storage/schemadb/Cargo.toml @@ -15,10 +15,7 @@ once_cell = "1.7.2" diem-config = { path = "../../config" } diem-logger = { path = "../../common/logger" } diem-metrics = { path = "../../common/metrics" } - -[dependencies.rocksdb] -git = "https://github.com/Conflux-Chain/rust-rocksdb.git" -rev = "3773afe5b953997188f37c39308105b5deb0faac" +rocksdb = { workspace = true } [dev-dependencies] byteorder = "1.4.3" diff --git a/crates/cfxcore/executor/Cargo.toml b/crates/cfxcore/executor/Cargo.toml index 66c91172d..69a1e1ceb 100644 --- a/crates/cfxcore/executor/Cargo.toml +++ b/crates/cfxcore/executor/Cargo.toml @@ -7,7 +7,7 @@ version = "2.0.2" edition = "2021" [dependencies] -substrate-bn = { git = "https://github.com/paritytech/bn", default-features = false, rev="63f8c587356a67b33c7396af98e065b66fca5dda" } +substrate-bn = { workspace = true, default-features = false } byteorder = "1.0" cfxkey = { workspace = true } cfx-bytes = { workspace = true } @@ -26,7 +26,7 @@ log = { workspace = true } malloc_size_of = { workspace = true } malloc_size_of_derive = { workspace = true } num = "0.2" -parity-crypto = "0.9.0" +parity-crypto = { workspace = true } parking_lot = { workspace = true } primitives = { workspace = true } rlp ={ workspace = true } @@ -40,7 +40,7 @@ solidity-abi-derive = { workspace = true } sha3-macro = { workspace = true } strum = { workspace = true } strum_macros = { workspace = true } -bls-signatures = {git = "https://github.com/Conflux-Chain/bls-signatures.git", rev = "fb52187df92d27c365642cb7e7b2aaf60437cf9c", default-features = false, features = ["multicore"]} +bls-signatures = { workspace = true } tiny-keccak = { workspace = true, features = ["keccak"]} diem-crypto = { path = "../core/src/pos/crypto/crypto" } diem-types = { path = "../core/src/pos/types" } diff --git a/crates/client/Cargo.toml b/crates/client/Cargo.toml index 829c9d1b7..261c2c12d 100644 --- a/crates/client/Cargo.toml +++ b/crates/client/Cargo.toml @@ -25,8 +25,8 @@ cfx-statedb = { workspace = true } cfx-storage = { workspace = true } cfx-vm-types = { workspace = true } app_dirs = "1.2.1" -dir = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } -ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git", rev="b523017108bb2d571a7a69bd97bc406e63bc7a9d" } +dir = { workspace = true } +ctrlc = { workspace = true } jsonrpc-core = { workspace = true } jsonrpc-tcp-server = { workspace = true } jsonrpc-http-server = { workspace = true } diff --git a/crates/dbs/kvdb-rocksdb/Cargo.toml b/crates/dbs/kvdb-rocksdb/Cargo.toml index 79696b50b..b5342d857 100644 --- a/crates/dbs/kvdb-rocksdb/Cargo.toml +++ b/crates/dbs/kvdb-rocksdb/Cargo.toml @@ -17,14 +17,8 @@ parking_lot = { workspace = true } regex = "1.3.1" malloc_size_of = { workspace = true } malloc_size_of_derive = { workspace = true } - -[dependencies.parity-util-mem] -version = "0.5" -default-features = false +parity-util-mem = { workspace = true, default-features = false } +rocksdb = { workspace = true } [dev-dependencies] tempdir = "0.3.7" - -[dependencies.rocksdb] -git = "https://github.com/Conflux-Chain/rust-rocksdb.git" -rev = "3773afe5b953997188f37c39308105b5deb0faac" diff --git a/crates/dbs/storage/Cargo.toml b/crates/dbs/storage/Cargo.toml index a7d00777c..87a0cff15 100644 --- a/crates/dbs/storage/Cargo.toml +++ b/crates/dbs/storage/Cargo.toml @@ -43,14 +43,11 @@ sqlite3-sys = "0.12" strfmt = "0.1" tokio = { version = "0.2", features = ["full"] } once_cell = "1.10.0" +parity-util-mem = { workspace = true, default-features = false } [dev-dependencies] primitives = { workspace = true, features = ["test_no_account_length_check"] } -[dependencies.parity-util-mem] -version = "0.5" -default-features = false - [features] default = ["primitives"] # Unfortunately cfg test attributes doesn't work across crates, diff --git a/crates/network/Cargo.toml b/crates/network/Cargo.toml index 172b2086f..cda06fc49 100644 --- a/crates/network/Cargo.toml +++ b/crates/network/Cargo.toml @@ -24,7 +24,7 @@ serde_derive = "1.0" igd = "0.10" libc = "0.2.66" rand = "0.7" -parity-path = "0.1" +parity-path = { workspace = true } keccak-hash = { workspace = true } enum-map = "0.4.0" enum-map-derive = "0.4.0" diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 62e6c4e62..8b4ceafcb 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -19,7 +19,7 @@ rlp_derive = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_derive = { workspace = true } siphasher = "0.3" -unexpected = { git = "https://github.com/Conflux-Chain/conflux-parity-deps.git", rev = "1597a9cab02343eb2322ca0ac58d39b64e3f42d1" } +unexpected = { workspace = true } once_cell = { workspace = true } cfx-parameters = { workspace = true } From c8f429c6681467a8c06d3475c413c90690d5d13e Mon Sep 17 00:00:00 2001 From: Pana Date: Mon, 4 Nov 2024 11:10:36 +0800 Subject: [PATCH 09/31] make cfx_addr support no_std feature, and optimize code remove already handled fixme --- Cargo.lock | 2 +- crates/cfx_addr/Cargo.toml | 15 +- crates/cfx_addr/src/checksum.rs | 31 --- crates/cfx_addr/src/consts.rs | 188 ++++----------- crates/cfx_addr/src/lib.rs | 218 ++---------------- crates/cfx_addr/src/{errors.rs => types.rs} | 181 ++++++++++++++- crates/cfx_addr/src/utils.rs | 167 ++++++++++++++ crates/cfx_addr/tests/decode.rs | 59 +++++ crates/cfx_addr/tests/encode.rs | 14 ++ .../cfx_addr/{src/tests.rs => tests/misc.rs} | 71 +----- 10 files changed, 485 insertions(+), 461 deletions(-) delete mode 100644 crates/cfx_addr/src/checksum.rs rename crates/cfx_addr/src/{errors.rs => types.rs} (52%) create mode 100644 crates/cfx_addr/src/utils.rs create mode 100644 crates/cfx_addr/tests/decode.rs create mode 100644 crates/cfx_addr/tests/encode.rs rename crates/cfx_addr/{src/tests.rs => tests/misc.rs} (67%) diff --git a/Cargo.lock b/Cargo.lock index fae2e343a..583d3cd69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1113,7 +1113,7 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cfx-addr" -version = "0.1.0" +version = "0.2.0" dependencies = [ "cfx-types", "lazy_static", diff --git a/crates/cfx_addr/Cargo.toml b/crates/cfx_addr/Cargo.toml index 23600b5df..0c46a9f1f 100644 --- a/crates/cfx_addr/Cargo.toml +++ b/crates/cfx_addr/Cargo.toml @@ -1,10 +1,17 @@ [package] name = "cfx-addr" -version = "0.1.0" +version = "0.2.0" license = "GPL-3.0" -description = "Conflux Address Encoder/Decoder" +description = "Conflux Base32 Address Encoder/Decoder" +edition = "2021" [dependencies] cfx-types = { workspace = true } -lazy_static = "1.4" -rustc-hex = "2.1" +lazy_static = { workspace = true } + +[dev-dependencies] +rustc-hex = { workspace = true } + +[features] +default = ["std"] +std = [] diff --git a/crates/cfx_addr/src/checksum.rs b/crates/cfx_addr/src/checksum.rs deleted file mode 100644 index c4f84a06f..000000000 --- a/crates/cfx_addr/src/checksum.rs +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 Conflux Foundation. All rights reserved. -// Conflux is free software and distributed under GNU General Public License. -// See http://www.gnu.org/licenses/ -// -// Modification based on https://github.com/hlb8122/rust-bitcoincash-addr in MIT License. -// A copy of the original license is included in LICENSE.rust-bitcoincash-addr. - -// https://github.com/bitcoincashorg/bitcoincash.org/blob/master/spec/cashaddr.md#checksum -pub fn polymod(v: &[u8]) -> u64 { - let mut c = 1; - for d in v { - let c0 = (c >> 35) as u8; - c = ((c & 0x07ffffffff) << 5) ^ u64::from(*d); - if c0 & 0x01 != 0 { - c ^= 0x98f2bc8e61; - } - if c0 & 0x02 != 0 { - c ^= 0x79b76d99e2; - } - if c0 & 0x04 != 0 { - c ^= 0xf33e5fb3c4; - } - if c0 & 0x08 != 0 { - c ^= 0xae2eabe2a8; - } - if c0 & 0x10 != 0 { - c ^= 0x1e4f43e470; - } - } - c ^ 1 -} diff --git a/crates/cfx_addr/src/consts.rs b/crates/cfx_addr/src/consts.rs index d281875f7..ae8776d80 100644 --- a/crates/cfx_addr/src/consts.rs +++ b/crates/cfx_addr/src/consts.rs @@ -1,15 +1,3 @@ -// Copyright 2021 Conflux Foundation. All rights reserved. -// Conflux is free software and distributed under GNU General Public License. -// See http://www.gnu.org/licenses/ -// -// Modification based on https://github.com/hlb8122/rust-bitcoincash-addr in MIT License. -// A copy of the original license is included in LICENSE.rust-bitcoincash-addr. - -use super::errors::{DecodingError, EncodingError}; - -use cfx_types::address_util::{self, AddressUtil}; -use std::{fmt, string::ToString}; - pub const CHARSET_SIZE: usize = 32; pub const RESERVED_BITS_MASK: u8 = 0xf8; @@ -28,6 +16,7 @@ pub const RESERVED_BITS_MASK: u8 = 0xf8; pub const SIZE_MASK: u8 = 0x07; pub const SIZE_160: u8 = 0x00; + // In Conflux we only have 160 bits hash size, however we keep these unused // sizes for unit test and compatibility. pub const SIZE_192: u8 = 0x01; @@ -38,141 +27,52 @@ pub const SIZE_384: u8 = 0x05; pub const SIZE_448: u8 = 0x06; pub const SIZE_512: u8 = 0x07; -#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)] -pub enum Network { - /// Main network. - Main, - /// Test network. - Test, - /// Specific Network Id. - Id(u64), -} - -// Prefixes -const MAINNET_PREFIX: &str = "cfx"; -const TESTNET_PREFIX: &str = "cfxtest"; -const NETWORK_ID_PREFIX: &str = "net"; -// These two network_ids are reserved. -const RESERVED_NETWORK_IDS: [u64; 2] = [1, 1029]; - -#[derive(Debug, PartialEq, Eq, Clone)] -pub enum AddressType { - Builtin, - Contract, - Null, - User, - Unknown, -} - -impl fmt::Display for Network { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self.to_prefix() { - Err(EncodingError::InvalidNetworkId(network_id)) => { - write!(f, "invalid network prefix net{}", network_id) - } - Err(_) => unreachable!(), - Ok(prefix) => write!(f, "{}", prefix), - } - } -} - -impl Network { - pub fn to_prefix(&self) -> Result { - match self { - Network::Main => Ok(MAINNET_PREFIX.into()), - Network::Test => Ok(TESTNET_PREFIX.into()), - Network::Id(network_id) => { - if RESERVED_NETWORK_IDS.contains(network_id) { - Err(EncodingError::InvalidNetworkId(*network_id)) - } else { - Ok(format!("net{}", network_id)) - } - } - } - } - - pub fn from_prefix(prefix: &str) -> Result { - match prefix { - MAINNET_PREFIX => Ok(Network::Main), - TESTNET_PREFIX => Ok(Network::Test), - _ => { - let maybe_network_id = if !prefix.starts_with(NETWORK_ID_PREFIX) - { - None - } else { - match prefix[NETWORK_ID_PREFIX.len()..].parse::() { - Err(_) => None, - Ok(network_id) => { - // Check if network_id is valid. - if RESERVED_NETWORK_IDS.contains(&network_id) { - None - } else { - Some(network_id) - } - } - } - }; - - match maybe_network_id { - None => { - Err(DecodingError::InvalidPrefix(prefix.to_string())) - } - Some(network_id) => Ok(Network::Id(network_id)), - } - } - } - } -} +pub const BASE32_CHARS: &str = "abcdefghijklmnopqrstuvwxyz0123456789"; +pub const EXCLUDE_CHARS: [char; 4] = ['o', 'i', 'l', 'q']; -impl AddressType { - const BUILTIN: &'static str = "builtin"; - const CONTRACT: &'static str = "contract"; - const NULL: &'static str = "null"; - const UNKNOWN: &'static str = "unknown"; - const USER: &'static str = "user"; +// network prefix +pub const MAINNET_PREFIX: &str = "cfx"; +pub const TESTNET_PREFIX: &str = "cfxtest"; +pub const NETWORK_ID_PREFIX: &str = "net"; - pub fn parse(text: &str) -> Result { - if text == Self::BUILTIN { - Ok(Self::Builtin) - } else if text == Self::CONTRACT { - Ok(Self::Contract) - } else if text == Self::NULL { - Ok(Self::Null) - } else if text == Self::USER { - Ok(Self::User) - } else { - Ok(Self::Unknown) - } - } +// address types +pub const ADDRESS_TYPE_BUILTIN: &'static str = "builtin"; +pub const ADDRESS_TYPE_CONTRACT: &'static str = "contract"; +pub const ADDRESS_TYPE_NULL: &'static str = "null"; +pub const ADDRESS_TYPE_UNKNOWN: &'static str = "unknown"; +pub const ADDRESS_TYPE_USER: &'static str = "user"; - pub fn from_address( - address_hex: &T, - ) -> Result { - match address_hex.address_type_bits() { - address_util::TYPE_BITS_BUILTIN => { - if address_hex.is_null_address() { - Ok(Self::Null) - } else { - Ok(Self::Builtin) - } +// These two network_ids are reserved. +pub const RESERVED_NETWORK_IDS: [u64; 2] = [1, 1029]; + +#[cfg(not(feature = "std"))] +use alloc::{format, string::String, vec::Vec}; +use lazy_static::lazy_static; + +lazy_static! { + // Regular expression for application to match string. This regex isn't strict, + // because our SDK will. + // "(?i)[:=_-0123456789abcdefghijklmnopqrstuvwxyz]*" + pub static ref REGEXP: String = format!{"(?i)[:=_-{}]*", BASE32_CHARS}; + + // For encoding. + pub static ref CHARSET: Vec = + // Remove EXCLUDE_CHARS from charset. + BASE32_CHARS.replace(&EXCLUDE_CHARS[..], "").into_bytes(); + + // For decoding. + pub static ref CHAR_INDEX: [Option; 128] = (|| { + let mut index = [None; 128]; + assert_eq!(CHARSET.len(), CHARSET_SIZE); + for i in 0..CHARSET_SIZE { + let c = CHARSET[i] as usize; + index[c] = Some(i as u8); + // Support uppercase as well. + let u = (c as u8 as char).to_ascii_uppercase() as u8 as usize; + if u != c { + index[u] = Some(i as u8); } - address_util::TYPE_BITS_CONTRACT => Ok(Self::Contract), - address_util::TYPE_BITS_USER_ACCOUNT => Ok(Self::User), - _ => Ok(Self::Unknown), } - } - - pub fn to_str(&self) -> &'static str { - match self { - Self::Builtin => Self::BUILTIN, - Self::Contract => Self::CONTRACT, - Self::Null => Self::NULL, - Self::User => Self::USER, - Self::Unknown => Self::UNKNOWN, - } - } -} - -impl ToString for AddressType { - fn to_string(&self) -> String { self.to_str().into() } + return index; + }) (); } diff --git a/crates/cfx_addr/src/lib.rs b/crates/cfx_addr/src/lib.rs index 7038ef687..5db4046a8 100644 --- a/crates/cfx_addr/src/lib.rs +++ b/crates/cfx_addr/src/lib.rs @@ -1,78 +1,25 @@ -// Copyright 2021 Conflux Foundation. All rights reserved. -// Conflux is free software and distributed under GNU General Public License. -// See http://www.gnu.org/licenses/ -// -// Modification based on https://github.com/hlb8122/rust-bitcoincash-addr in MIT License. -// A copy of the original license is included in LICENSE.rust-bitcoincash-addr. +#![cfg_attr(not(feature = "std"), no_std)] -extern crate cfx_types; -#[macro_use] -extern crate lazy_static; -extern crate rustc_hex; - -#[allow(dead_code)] -pub mod checksum; -pub mod consts; -pub mod errors; -#[cfg(test)] -mod tests; +mod consts; +mod types; +mod utils; -use cfx_types::Address; -use checksum::polymod; -pub use consts::{AddressType, Network}; -pub use errors::DecodingError; -use errors::*; - -const BASE32_CHARS: &str = "abcdefghijklmnopqrstuvwxyz0123456789"; -const EXCLUDE_CHARS: [char; 4] = ['o', 'i', 'l', 'q']; -lazy_static! { - // Regular expression for application to match string. This regex isn't strict, - // because our SDK will. - // "(?i)[:=_-0123456789abcdefghijklmnopqrstuvwxyz]*" - static ref REGEXP: String = format!{"(?i)[:=_-{}]*", BASE32_CHARS}; +pub use consts::*; +pub use types::*; +pub use utils::*; - // For encoding. - static ref CHARSET: Vec = - // Remove EXCLUDE_CHARS from charset. - BASE32_CHARS.replace(&EXCLUDE_CHARS[..], "").into_bytes(); +extern crate cfx_types; +extern crate core; +extern crate lazy_static; - // For decoding. - static ref CHAR_INDEX: [Option; 128] = (|| { - let mut index = [None; 128]; - assert_eq!(CHARSET.len(), consts::CHARSET_SIZE); - for i in 0..consts::CHARSET_SIZE { - let c = CHARSET[i] as usize; - index[c] = Some(i as u8); - // Support uppercase as well. - let u = (c as u8 as char).to_ascii_uppercase() as u8 as usize; - if u != c { - index[u] = Some(i as u8); - } - } - return index; - }) (); -} +#[cfg(not(feature = "std"))] +extern crate alloc; -/// Struct containing the raw bytes and metadata of a Conflux address. -#[derive(PartialEq, Eq, Clone, Debug, Hash)] -pub struct DecodedRawAddress { - /// Base32 address. This is included for debugging purposes. - pub input_base32_address: String, - /// Address bytes - pub parsed_address_bytes: Vec, - /// The parsed address in H160 format. - pub hex_address: Option
, - /// Network - pub network: Network, -} +#[cfg(not(feature = "std"))] +use alloc::{string::String, vec::Vec}; -#[derive(Copy, Clone)] -pub enum EncodingOptions { - Simple, - QrCode, -} +use cfx_types::Address; -// TODO: verbose level and address type. pub fn cfx_addr_encode( raw: &[u8], network: Network, encoding_options: EncodingOptions, ) -> Result { @@ -145,7 +92,6 @@ pub fn cfx_addr_encode( pub fn cfx_addr_decode( addr_str: &str, ) -> Result { - // FIXME: add a unit test for addr_str in capital letters. let has_lowercase = addr_str.chars().any(|c| c.is_lowercase()); let has_uppercase = addr_str.chars().any(|c| c.is_uppercase()); if has_lowercase && has_uppercase { @@ -270,137 +216,3 @@ pub fn cfx_addr_decode( network, }) } - -/// The checksum calculation includes the lower 5 bits of each character of the -/// prefix. -/// - e.g. "bit..." becomes 2,9,20,... -// Expand the address prefix for the checksum operation. -fn expand_prefix(prefix: &str) -> Vec { - let mut ret: Vec = prefix.chars().map(|c| (c as u8) & 0x1f).collect(); - ret.push(0); - ret -} - -// This method assume that data is valid string of inbits. -// When pad is true, any remaining bits are padded and encoded into a new byte; -// when pad is false, any remaining bits are checked to be zero and discarded. -fn convert_bits( - data: &[u8], inbits: u8, outbits: u8, pad: bool, -) -> Result, DecodingError> { - assert!(inbits <= 8 && outbits <= 8); - let num_bytes = (data.len() * inbits as usize + outbits as usize - 1) - / outbits as usize; - let mut ret = Vec::with_capacity(num_bytes); - let mut acc: u16 = 0; // accumulator of bits - let mut num: u8 = 0; // num bits in acc - let groupmask = (1 << outbits) - 1; - for d in data.iter() { - // We push each input chunk into a 16-bit accumulator - acc = (acc << inbits) | u16::from(*d); - num += inbits; - // Then we extract all the output groups we can - while num >= outbits { - // Store only the highest outbits. - ret.push((acc >> (num - outbits)) as u8); - // Clear the highest outbits. - acc &= !(groupmask << (num - outbits)); - num -= outbits; - } - } - if pad { - // If there's some bits left, pad and add it - if num > 0 { - ret.push((acc << (outbits - num)) as u8); - } - } else { - // FIXME: add unit tests for it. - // If there's some bits left, figure out if we need to remove padding - // and add it - let padding = ((data.len() * inbits as usize) % outbits as usize) as u8; - if num >= inbits || acc != 0 { - return Err(DecodingError::InvalidPadding { - from_bits: inbits, - padding_bits: padding, - padding: acc, - }); - } - } - Ok(ret) -} - -#[test] -fn test_expand_prefix() { - assert_eq!(expand_prefix("cfx"), vec![0x03, 0x06, 0x18, 0x00]); - - assert_eq!( - expand_prefix("cfxtest"), - vec![0x03, 0x06, 0x18, 0x14, 0x05, 0x13, 0x14, 0x00] - ); - - assert_eq!( - expand_prefix("net17"), - vec![0x0e, 0x05, 0x14, 0x11, 0x17, 0x00] - ); -} - -#[test] -fn test_convert_bits() { - // 00000000 --> 0, 0, 0, 0, 0, 0, 0, 0 - assert_eq!(convert_bits(&[0], 8, 1, false), Ok(vec![0; 8])); - - // 00000000 --> 000, 000, 00_ - assert_eq!(convert_bits(&[0], 8, 3, false), Ok(vec![0, 0])); // 00_ is dropped - assert_eq!(convert_bits(&[0], 8, 3, true), Ok(vec![0, 0, 0])); // 00_ becomes 000 - - // 00000001 --> 000, 000, 01_ - assert!(convert_bits(&[1], 8, 3, false).is_err()); // 01_ != 0 (ignored incomplete chunk must be 0) - assert_eq!(convert_bits(&[1], 8, 3, true), Ok(vec![0, 0, 2])); // 01_ becomes 010 - - // 00000001 --> 0000000, 1______ - assert_eq!(convert_bits(&[1], 8, 7, true), Ok(vec![0, 64])); // 1______ becomes 1000000 - - // 0, 0, 0, 0, 0, 0, 0, 0 --> 00000000 - assert_eq!(convert_bits(&[0; 8], 1, 8, false), Ok(vec![0])); - - // 000, 000, 010 -> 00000001, 0_______ - assert_eq!(convert_bits(&[0, 0, 2], 3, 8, false), Ok(vec![1])); // 0_______ is dropped - assert_eq!(convert_bits(&[0, 0, 2], 3, 8, true), Ok(vec![1, 0])); // 0_______ becomes 00000000 - - // 000, 000, 011 -> 00000001, 1_______ - assert!(convert_bits(&[0, 0, 3], 3, 8, false).is_err()); // 1_______ != 0 (ignored incomplete chunk must be 0) - - // 00000000, 00000001, 00000010, 00000011, 00000100 --> - // 00000, 00000, 00000, 10000, 00100, 00000, 11000, 00100 - assert_eq!( - convert_bits(&[0, 1, 2, 3, 4], 8, 5, false), - Ok(vec![0, 0, 0, 16, 4, 0, 24, 4]) - ); - - // 00000000, 00000001, 00000010 --> - // 00000, 00000, 00000, 10000, 0010_ - assert!(convert_bits(&[0, 1, 2], 8, 5, false).is_err()); // 0010_ != 0 (ignored incomplete chunk must be 0) - - assert_eq!( - convert_bits(&[0, 1, 2], 8, 5, true), - Ok(vec![0, 0, 0, 16, 4]) - ); // 0010_ becomes 00100 - - // 00000, 00000, 00000, 10000, 00100, 00000, 11000, 00100 --> - // 00000000, 00000001, 00000010, 00000011, 00000100 - assert_eq!( - convert_bits(&[0, 0, 0, 16, 4, 0, 24, 4], 5, 8, false), - Ok(vec![0, 1, 2, 3, 4]) - ); - - // 00000, 00000, 00000, 10000, 00100 --> - // 00000000, 00000001, 00000010, 0_______ - assert_eq!( - convert_bits(&[0, 0, 0, 16, 4], 5, 8, false), - Ok(vec![0, 1, 2]) - ); // 0_______ is dropped - - assert_eq!( - convert_bits(&[0, 0, 0, 16, 4], 5, 8, true), - Ok(vec![0, 1, 2, 0]) - ); // 0_______ becomes 00000000 -} diff --git a/crates/cfx_addr/src/errors.rs b/crates/cfx_addr/src/types.rs similarity index 52% rename from crates/cfx_addr/src/errors.rs rename to crates/cfx_addr/src/types.rs index 4b4e27269..c845d29e8 100644 --- a/crates/cfx_addr/src/errors.rs +++ b/crates/cfx_addr/src/types.rs @@ -1,13 +1,176 @@ -// Copyright 2021 Conflux Foundation. All rights reserved. -// Conflux is free software and distributed under GNU General Public License. -// See http://www.gnu.org/licenses/ -// -// Modification based on https://github.com/hlb8122/rust-bitcoincash-addr in MIT License. -// A copy of the original license is included in LICENSE.rust-bitcoincash-addr. +use crate::consts::{ + ADDRESS_TYPE_BUILTIN, ADDRESS_TYPE_CONTRACT, ADDRESS_TYPE_NULL, + ADDRESS_TYPE_UNKNOWN, ADDRESS_TYPE_USER, MAINNET_PREFIX, NETWORK_ID_PREFIX, + RESERVED_NETWORK_IDS, TESTNET_PREFIX, +}; +use cfx_types::{ + address_util::{self, AddressUtil}, + Address, +}; +use core::fmt; -use super::consts::AddressType; +#[cfg(feature = "std")] +use std::error::Error; -use std::{error::Error, fmt}; +#[cfg(not(feature = "std"))] +use alloc::{ + format, + string::{String, ToString}, + vec::Vec, +}; + +/// Struct containing the raw bytes and metadata of a Conflux address. +#[derive(PartialEq, Eq, Clone, Debug, Hash)] +pub struct DecodedRawAddress { + /// Base32 address. This is included for debugging purposes. + pub input_base32_address: String, + /// Address bytes + pub parsed_address_bytes: Vec, + /// The parsed address in H160 format. + pub hex_address: Option
, + /// Network + pub network: Network, +} + +#[derive(Copy, Clone)] +pub enum EncodingOptions { + Simple, + QrCode, +} + +#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash)] +pub enum Network { + /// Main network. + Main, + /// Test network. + Test, + /// Specific Network Id. + Id(u64), +} + +impl Network { + pub fn to_prefix(&self) -> Result { + match self { + Network::Main => Ok(MAINNET_PREFIX.into()), + Network::Test => Ok(TESTNET_PREFIX.into()), + Network::Id(network_id) => { + if RESERVED_NETWORK_IDS.contains(network_id) { + Err(EncodingError::InvalidNetworkId(*network_id)) + } else { + Ok(format!("net{}", network_id)) + } + } + } + } + + pub fn from_prefix(prefix: &str) -> Result { + match prefix { + MAINNET_PREFIX => Ok(Network::Main), + TESTNET_PREFIX => Ok(Network::Test), + _ => { + let maybe_network_id = if !prefix.starts_with(NETWORK_ID_PREFIX) + { + None + } else { + match prefix[NETWORK_ID_PREFIX.len()..].parse::() { + Err(_) => None, + Ok(network_id) => { + // Check if network_id is valid. + if RESERVED_NETWORK_IDS.contains(&network_id) { + None + } else { + Some(network_id) + } + } + } + }; + + match maybe_network_id { + None => { + Err(DecodingError::InvalidPrefix(prefix.to_string())) + } + Some(network_id) => Ok(Network::Id(network_id)), + } + } + } + } +} + +impl fmt::Display for Network { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self.to_prefix() { + Err(EncodingError::InvalidNetworkId(network_id)) => { + write!(f, "invalid network prefix net{}", network_id) + } + Err(_) => unreachable!(), + Ok(prefix) => write!(f, "{}", prefix), + } + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum AddressType { + Builtin, + Contract, + Null, + User, + Unknown, +} + +impl AddressType { + const BUILTIN: &'static str = ADDRESS_TYPE_BUILTIN; + const CONTRACT: &'static str = ADDRESS_TYPE_CONTRACT; + const NULL: &'static str = ADDRESS_TYPE_NULL; + const UNKNOWN: &'static str = ADDRESS_TYPE_UNKNOWN; + const USER: &'static str = ADDRESS_TYPE_USER; + + pub fn parse(text: &str) -> Result { + if text == Self::BUILTIN { + Ok(Self::Builtin) + } else if text == Self::CONTRACT { + Ok(Self::Contract) + } else if text == Self::NULL { + Ok(Self::Null) + } else if text == Self::USER { + Ok(Self::User) + } else { + Ok(Self::Unknown) + } + } + + pub fn from_address( + address_hex: &T, + ) -> Result { + match address_hex.address_type_bits() { + address_util::TYPE_BITS_BUILTIN => { + if address_hex.is_null_address() { + Ok(Self::Null) + } else { + Ok(Self::Builtin) + } + } + address_util::TYPE_BITS_CONTRACT => Ok(Self::Contract), + address_util::TYPE_BITS_USER_ACCOUNT => Ok(Self::User), + _ => Ok(Self::Unknown), + } + } + + pub fn to_str(&self) -> &'static str { + match self { + Self::Builtin => Self::BUILTIN, + Self::Contract => Self::CONTRACT, + Self::Null => Self::NULL, + Self::User => Self::USER, + Self::Unknown => Self::UNKNOWN, + } + } +} + +impl fmt::Display for AddressType { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.to_str()) + } +} /// Error concerning encoding of cfx_base32_addr. #[derive(Debug, PartialEq, Eq, Clone)] @@ -33,6 +196,7 @@ impl fmt::Display for EncodingError { } } +#[cfg(feature = "std")] impl Error for EncodingError { fn cause(&self) -> Option<&dyn Error> { None } @@ -137,6 +301,7 @@ impl fmt::Display for DecodingError { } } +#[cfg(feature = "std")] impl Error for DecodingError { fn cause(&self) -> Option<&dyn Error> { None } diff --git a/crates/cfx_addr/src/utils.rs b/crates/cfx_addr/src/utils.rs new file mode 100644 index 000000000..c71d58159 --- /dev/null +++ b/crates/cfx_addr/src/utils.rs @@ -0,0 +1,167 @@ +use crate::types::DecodingError; +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + +// The polymod function is used to calculate the checksum of the address. +pub fn polymod(v: &[u8]) -> u64 { + let mut c = 1; + for d in v { + let c0 = (c >> 35) as u8; + c = ((c & 0x07ffffffff) << 5) ^ u64::from(*d); + if c0 & 0x01 != 0 { + c ^= 0x98f2bc8e61; + } + if c0 & 0x02 != 0 { + c ^= 0x79b76d99e2; + } + if c0 & 0x04 != 0 { + c ^= 0xf33e5fb3c4; + } + if c0 & 0x08 != 0 { + c ^= 0xae2eabe2a8; + } + if c0 & 0x10 != 0 { + c ^= 0x1e4f43e470; + } + } + c ^ 1 +} + +/// The checksum calculation includes the lower 5 bits of each character of the +/// prefix. +/// - e.g. "bit..." becomes 2,9,20,... +// Expand the address prefix for the checksum operation. +pub fn expand_prefix(prefix: &str) -> Vec { + let mut ret: Vec = prefix.chars().map(|c| (c as u8) & 0x1f).collect(); + ret.push(0); + ret +} + +// This method assume that data is valid string of inbits. +// When pad is true, any remaining bits are padded and encoded into a new byte; +// when pad is false, any remaining bits are checked to be zero and discarded. +pub fn convert_bits( + data: &[u8], inbits: u8, outbits: u8, pad: bool, +) -> Result, DecodingError> { + assert!(inbits <= 8 && outbits <= 8); + let num_bytes = (data.len() * inbits as usize + outbits as usize - 1) + / outbits as usize; + let mut ret = Vec::with_capacity(num_bytes); + let mut acc: u16 = 0; // accumulator of bits + let mut num: u8 = 0; // num bits in acc + let groupmask = (1 << outbits) - 1; + for d in data.iter() { + // We push each input chunk into a 16-bit accumulator + acc = (acc << inbits) | u16::from(*d); + num += inbits; + // Then we extract all the output groups we can + while num >= outbits { + // Store only the highest outbits. + ret.push((acc >> (num - outbits)) as u8); + // Clear the highest outbits. + acc &= !(groupmask << (num - outbits)); + num -= outbits; + } + } + if pad { + // If there's some bits left, pad and add it + if num > 0 { + ret.push((acc << (outbits - num)) as u8); + } + } else { + // FIXME: add unit tests for it. + // If there's some bits left, figure out if we need to remove padding + // and add it + let padding = ((data.len() * inbits as usize) % outbits as usize) as u8; + if num >= inbits || acc != 0 { + return Err(DecodingError::InvalidPadding { + from_bits: inbits, + padding_bits: padding, + padding: acc, + }); + } + } + Ok(ret) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_expand_prefix() { + assert_eq!(expand_prefix("cfx"), vec![0x03, 0x06, 0x18, 0x00]); + + assert_eq!( + expand_prefix("cfxtest"), + vec![0x03, 0x06, 0x18, 0x14, 0x05, 0x13, 0x14, 0x00] + ); + + assert_eq!( + expand_prefix("net17"), + vec![0x0e, 0x05, 0x14, 0x11, 0x17, 0x00] + ); + } + + #[test] + fn test_convert_bits() { + // 00000000 --> 0, 0, 0, 0, 0, 0, 0, 0 + assert_eq!(convert_bits(&[0], 8, 1, false), Ok(vec![0; 8])); + + // 00000000 --> 000, 000, 00_ + assert_eq!(convert_bits(&[0], 8, 3, false), Ok(vec![0, 0])); // 00_ is dropped + assert_eq!(convert_bits(&[0], 8, 3, true), Ok(vec![0, 0, 0])); // 00_ becomes 000 + + // 00000001 --> 000, 000, 01_ + assert!(convert_bits(&[1], 8, 3, false).is_err()); // 01_ != 0 (ignored incomplete chunk must be 0) + assert_eq!(convert_bits(&[1], 8, 3, true), Ok(vec![0, 0, 2])); // 01_ becomes 010 + + // 00000001 --> 0000000, 1______ + assert_eq!(convert_bits(&[1], 8, 7, true), Ok(vec![0, 64])); // 1______ becomes 1000000 + + // 0, 0, 0, 0, 0, 0, 0, 0 --> 00000000 + assert_eq!(convert_bits(&[0; 8], 1, 8, false), Ok(vec![0])); + + // 000, 000, 010 -> 00000001, 0_______ + assert_eq!(convert_bits(&[0, 0, 2], 3, 8, false), Ok(vec![1])); // 0_______ is dropped + assert_eq!(convert_bits(&[0, 0, 2], 3, 8, true), Ok(vec![1, 0])); // 0_______ becomes 00000000 + + // 000, 000, 011 -> 00000001, 1_______ + assert!(convert_bits(&[0, 0, 3], 3, 8, false).is_err()); // 1_______ != 0 (ignored incomplete chunk must be 0) + + // 00000000, 00000001, 00000010, 00000011, 00000100 --> + // 00000, 00000, 00000, 10000, 00100, 00000, 11000, 00100 + assert_eq!( + convert_bits(&[0, 1, 2, 3, 4], 8, 5, false), + Ok(vec![0, 0, 0, 16, 4, 0, 24, 4]) + ); + + // 00000000, 00000001, 00000010 --> + // 00000, 00000, 00000, 10000, 0010_ + assert!(convert_bits(&[0, 1, 2], 8, 5, false).is_err()); // 0010_ != 0 (ignored incomplete chunk must be 0) + + assert_eq!( + convert_bits(&[0, 1, 2], 8, 5, true), + Ok(vec![0, 0, 0, 16, 4]) + ); // 0010_ becomes 00100 + + // 00000, 00000, 00000, 10000, 00100, 00000, 11000, 00100 --> + // 00000000, 00000001, 00000010, 00000011, 00000100 + assert_eq!( + convert_bits(&[0, 0, 0, 16, 4, 0, 24, 4], 5, 8, false), + Ok(vec![0, 1, 2, 3, 4]) + ); + + // 00000, 00000, 00000, 10000, 00100 --> + // 00000000, 00000001, 00000010, 0_______ + assert_eq!( + convert_bits(&[0, 0, 0, 16, 4], 5, 8, false), + Ok(vec![0, 1, 2]) + ); // 0_______ is dropped + + assert_eq!( + convert_bits(&[0, 0, 0, 16, 4], 5, 8, true), + Ok(vec![0, 1, 2, 0]) + ); // 0_______ becomes 00000000 + } +} diff --git a/crates/cfx_addr/tests/decode.rs b/crates/cfx_addr/tests/decode.rs new file mode 100644 index 000000000..9705a0da7 --- /dev/null +++ b/crates/cfx_addr/tests/decode.rs @@ -0,0 +1,59 @@ +use cfx_addr::*; +use rustc_hex::FromHex; + +#[test] +#[rustfmt::skip] +fn decoding_errors() { + let hex_addr = "85d80245dc02f5a89589e1f19c5c718e405b56cd".from_hex::>().unwrap(); + let base32_addr = cfx_addr_encode(&hex_addr, Network::Main, EncodingOptions::Simple).unwrap(); + assert_eq!(base32_addr, "cfx:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp"); + + // mixed case + assert!(cfx_addr_decode("cfx:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_ok()); + assert!(cfx_addr_decode("CFX:ACC7UAWF5UBTNMEZVHU9DHC6SGHEA0403Y2DGPYFJP").is_ok()); + assert!(cfx_addr_decode("Cfx:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); + assert!(cfx_addr_decode("cfx:acc7Uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); + + // prefix + assert!(cfx_addr_decode("acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); + assert!(cfx_addr_decode("bch:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); + assert!(cfx_addr_decode("cfx1:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); + assert!(cfx_addr_decode("cfx1029:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); + + // optional address type + assert!(cfx_addr_decode("cfx:type.contract:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_ok()); + assert!(cfx_addr_decode("cfx:type.contract:opt.random:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_ok()); + assert!(cfx_addr_decode("cfx:type.user:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); + assert!(cfx_addr_decode("cfx:contract:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); + assert!(cfx_addr_decode("cfx:type.contract.2:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); + + // length check + assert!(cfx_addr_decode("cfx:").is_err()); + assert!(cfx_addr_decode("cfx:agc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); // change length in version byte to 001 + assert!(cfx_addr_decode("cfx:aacc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); + + // charset check + assert!(cfx_addr_decode("cfx:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfip").is_err()); // j --> i + + // checksum check + for ii in 4..46 { + let mut x = base32_addr.clone(); + + // need unsafe to mutate utf-8 + unsafe { + match &mut x.as_mut_vec()[ii] { + ch if *ch == 48 => *ch = 49, // change '0' to '1' + ch => *ch = 48, // change to '0' + }; + } + + assert!(cfx_addr_decode(&x).is_err()); + } + + // version check + assert!(cfx_addr_decode("cfx:t22xg0j5vg1fba4nh7gz372we6740puptm91kazw6t").is_err()); // version byte: 0b10000000 + assert!(cfx_addr_decode("cfx:jcc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); // version byte: 0b01000000 + assert!(cfx_addr_decode("cfx:ecc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); // version byte: 0b00100000 + assert!(cfx_addr_decode("cfx:ccc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); // version byte: 0b00010000 + assert!(cfx_addr_decode("cfx:bcc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); // version byte: 0b00001000 +} diff --git a/crates/cfx_addr/tests/encode.rs b/crates/cfx_addr/tests/encode.rs new file mode 100644 index 000000000..15b0e984e --- /dev/null +++ b/crates/cfx_addr/tests/encode.rs @@ -0,0 +1,14 @@ +use cfx_addr::*; +use rustc_hex::FromHex; + +#[test] +fn encoding_errors() { + // invalid input length + let data = "85d80245dc02f5a89589e1f19c5c718e405b56" + .from_hex::>() + .unwrap(); + + assert!( + cfx_addr_encode(&data, Network::Main, EncodingOptions::Simple).is_err() + ); +} diff --git a/crates/cfx_addr/src/tests.rs b/crates/cfx_addr/tests/misc.rs similarity index 67% rename from crates/cfx_addr/src/tests.rs rename to crates/cfx_addr/tests/misc.rs index db6650958..cf96f384d 100644 --- a/crates/cfx_addr/src/tests.rs +++ b/crates/cfx_addr/tests/misc.rs @@ -5,7 +5,7 @@ // Modification based on https://github.com/hlb8122/rust-cfx-addr in MIT License. // A copy of the original license is included in LICENSE.rust-cfx-addr. -use super::{consts::Network, *}; +use cfx_addr::*; use rustc_hex::FromHex; #[test] @@ -83,75 +83,6 @@ fn spec_test_vectors() { ); } -#[test] -fn encoding_errors() { - // invalid input length - let data = "85d80245dc02f5a89589e1f19c5c718e405b56" - .from_hex::>() - .unwrap(); - - assert!( - cfx_addr_encode(&data, Network::Main, EncodingOptions::Simple).is_err() - ); -} - -#[test] -#[rustfmt::skip] -fn decoding_errors() { - let hex_addr = "85d80245dc02f5a89589e1f19c5c718e405b56cd".from_hex::>().unwrap(); - let base32_addr = cfx_addr_encode(&hex_addr, Network::Main, EncodingOptions::Simple).unwrap(); - assert_eq!(base32_addr, "cfx:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp"); - - // mixed case - assert!(cfx_addr_decode("cfx:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_ok()); - assert!(cfx_addr_decode("CFX:ACC7UAWF5UBTNMEZVHU9DHC6SGHEA0403Y2DGPYFJP").is_ok()); - assert!(cfx_addr_decode("Cfx:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); - assert!(cfx_addr_decode("cfx:acc7Uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); - - // prefix - assert!(cfx_addr_decode("acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); - assert!(cfx_addr_decode("bch:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); - assert!(cfx_addr_decode("cfx1:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); - assert!(cfx_addr_decode("cfx1029:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); - - // optional address type - assert!(cfx_addr_decode("cfx:type.contract:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_ok()); - assert!(cfx_addr_decode("cfx:type.contract:opt.random:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_ok()); - assert!(cfx_addr_decode("cfx:type.user:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); - assert!(cfx_addr_decode("cfx:contract:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); - assert!(cfx_addr_decode("cfx:type.contract.2:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); - - // length check - assert!(cfx_addr_decode("cfx:").is_err()); - assert!(cfx_addr_decode("cfx:agc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); // change length in version byte to 001 - assert!(cfx_addr_decode("cfx:aacc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); - - // charset check - assert!(cfx_addr_decode("cfx:acc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfip").is_err()); // j --> i - - // checksum check - for ii in 4..46 { - let mut x = base32_addr.clone(); - - // need unsafe to mutate utf-8 - unsafe { - match &mut x.as_mut_vec()[ii] { - ch if *ch == 48 => *ch = 49, // change '0' to '1' - ch => *ch = 48, // change to '0' - }; - } - - assert!(cfx_addr_decode(&x).is_err()); - } - - // version check - assert!(cfx_addr_decode("cfx:t22xg0j5vg1fba4nh7gz372we6740puptm91kazw6t").is_err()); // version byte: 0b10000000 - assert!(cfx_addr_decode("cfx:jcc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); // version byte: 0b01000000 - assert!(cfx_addr_decode("cfx:ecc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); // version byte: 0b00100000 - assert!(cfx_addr_decode("cfx:ccc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); // version byte: 0b00010000 - assert!(cfx_addr_decode("cfx:bcc7uawf5ubtnmezvhu9dhc6sghea0403y2dgpyfjp").is_err()); // version byte: 0b00001000 -} - #[test] fn bch_tests() { // 20-byte public key hash on mainnet From 7e7bc40d540cd235e64181109b687158c879dafd Mon Sep 17 00:00:00 2001 From: darwintree <17946284+darwintree@users.noreply.github.com> Date: Mon, 4 Nov 2024 17:18:59 +0800 Subject: [PATCH 10/31] tests: replace camelCase web3 methods to snake case --- dev-support/dep_pip3.sh | 6 +- requirements.txt | 14 ++++ tests/blockhash_test.py | 6 +- tests/conflux/rpc.py | 2 +- tests/contract_bench_test.py | 84 ++++++++++----------- tests/contract_remove_test.py | 2 +- tests/erc20_test.py | 6 +- tests/estimation_test.py | 4 +- tests/evm_space/base.py | 2 +- tests/evm_space/debug_trace_tx_test.py | 2 +- tests/evm_space/eip1559_test.py | 2 +- tests/evm_space/filter_log_test.py | 2 +- tests/evm_space/filter_transaction_test.py | 2 +- tests/evm_space/phantom_trace_test.py | 2 +- tests/evm_space/phantom_transaction_test.py | 2 +- tests/fixed_supply_token_test.py | 8 +- tests/issue988_test.py | 4 +- tests/light/rpc_test.py | 10 +-- tests/params_dao_vote_test.py | 2 +- tests/pubsub/eth_logs_test.py | 2 +- tests/rpc/test_token_supply_info.py | 2 +- tests/test_framework/contracts.py | 6 +- tests/web3_test.py | 8 +- tests/withdraw_deposit_test.py | 4 +- 24 files changed, 97 insertions(+), 87 deletions(-) create mode 100644 requirements.txt diff --git a/dev-support/dep_pip3.sh b/dev-support/dep_pip3.sh index 62559d494..c4a4fa8b7 100755 --- a/dev-support/dep_pip3.sh +++ b/dev-support/dep_pip3.sh @@ -8,14 +8,14 @@ function install() { fi } -install git+https://github.com/conflux-fans/cfx-account.git@v1.1.0-beta.2 # install cfx-account lib and prepare for CIP-1559 tests +install cfx-account install eth-utils install rlp==1.2.0 install py-ecc==5.2.0 install coincurve==19.0.1 install pysha3 install trie==1.4.0 -install web3==5.31.1 +install web3==7.4.0 install py-solc-x install jsonrpcclient==3.3.6 install asyncio @@ -23,7 +23,7 @@ install websockets install pyyaml install numpy -python3 -m solcx.install v0.5.17 +# python3 -m solcx.install v0.5.17 # TODO cross platform #yum install clang snappy snappy-devel zlib zlib-devel bzip2 bzip2-devel lz4-devel diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..f15aed897 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,14 @@ +cfx-account +eth-utils +# rlp==1.2.0 +# py-ecc +coincurve==19.0.1 +pysha3 +# trie==1.4.0 +web3==7.4.0 +py-solc-x +jsonrpcclient==3.3.6 +asyncio +websockets +pyyaml +numpy diff --git a/tests/blockhash_test.py b/tests/blockhash_test.py index 3eb9c6b2a..410100f07 100644 --- a/tests/blockhash_test.py +++ b/tests/blockhash_test.py @@ -1,9 +1,5 @@ -from web3 import Web3 -from web3.contract import ContractFunction, Contract - -from conflux.rpc import RpcClient from conflux.utils import * -from test_framework.contracts import ConfluxTestFrameworkForContract, ZERO_ADDRESS +from test_framework.contracts import ConfluxTestFrameworkForContract from test_framework.util import * from test_framework.mininode import * diff --git a/tests/conflux/rpc.py b/tests/conflux/rpc.py index aab24d215..0dfcf558d 100644 --- a/tests/conflux/rpc.py +++ b/tests/conflux/rpc.py @@ -87,7 +87,7 @@ def rand_addr(self) -> str: def rand_account(self) -> (str, bytes): priv_key = eth_utils.encode_hex(os.urandom(32)) addr = eth_utils.encode_hex(priv_to_addr(priv_key)) - return (Web3.toChecksumAddress(addr), priv_key) + return (Web3.to_checksum_address(addr), priv_key) def rand_hash(self, seed: bytes = None) -> str: if seed is None: diff --git a/tests/contract_bench_test.py b/tests/contract_bench_test.py index db71e6282..faf552e59 100755 --- a/tests/contract_bench_test.py +++ b/tests/contract_bench_test.py @@ -35,7 +35,7 @@ def testEventContract(self): assert(os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read().strip() receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, bytecode, storage_limit=1024) - contractAddr = Web3.toChecksumAddress(contractAddr) + contractAddr = Web3.to_checksum_address(contractAddr) logs = self.rpc.get_logs(self.filter) assert_equal(len(logs), l + 1) @@ -100,7 +100,7 @@ def testBallotContract(self): # deploy contract data = contract.constructor(10).buildTransaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=(1024 + 64 * 3)) - contractAddr = Web3.toChecksumAddress(contractAddr) + contractAddr = Web3.to_checksum_address(contractAddr) self.tx_conf["to"] = contractAddr # interact with vote() @@ -131,7 +131,7 @@ def testHTLCContract(self): data = contract.constructor().buildTransaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=2560) tx_hash = receipt['transactionHash'] - contractAddr = Web3.toChecksumAddress(contractAddr) + contractAddr = Web3.to_checksum_address(contractAddr) self.tx_conf["to"] = contractAddr logs = self.rpc.get_logs(self.filter) assert_equal(len(logs), l + 1) @@ -213,7 +213,7 @@ def testPayContract(self): # deploy contract data = contract.constructor().buildTransaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=512) - contractAddr = Web3.toChecksumAddress(contractAddr) + contractAddr = Web3.to_checksum_address(contractAddr) self.tx_conf["to"] = contractAddr logs = self.rpc.get_logs(self.filter) l = len(logs) @@ -251,12 +251,12 @@ def testMappingContract(self): data = contract.constructor(1).buildTransaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=(1536 + 64)) tx_hash = receipt['transactionHash'] - contractAddr = Web3.toChecksumAddress(contractAddr) + contractAddr = Web3.to_checksum_address(contractAddr) self.tx_conf["to"] = contractAddr c = "0x81f3521d71990945b99e1c592750d7157f2b545f" def check_wards(x, y, z): - data = contract.functions.wards(Web3.toChecksumAddress(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.wards(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) A = int(result, 0) assert(A == x) @@ -264,23 +264,23 @@ def check_wards(x, y, z): result = self.rpc.call(contractAddr, data) B = int(result, 0) assert(B == y) - data = contract.functions.wards(Web3.toChecksumAddress(c)).buildTransaction(self.tx_conf)["data"] + data = contract.functions.wards(Web3.to_checksum_address(c)).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) C = int(result, 0) assert(C == z) # deny pub[0] check_wards(0, 2, 0) - data = contract.functions.set1(Web3.toChecksumAddress(c)).buildTransaction(self.tx_conf)["data"] + data = contract.functions.set1(Web3.to_checksum_address(c)).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=64) check_wards(0, 2, 1) - data = contract.functions.set2(Web3.toChecksumAddress(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.set2(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=64) check_wards(2, 2, 1) - data = contract.functions.set0(Web3.toChecksumAddress(c)).buildTransaction(self.tx_conf)["data"] + data = contract.functions.set0(Web3.to_checksum_address(c)).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=64) check_wards(2, 2, 0) - data = contract.functions.set0(Web3.toChecksumAddress(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.set0(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=64) check_wards(0, 2, 0) @@ -301,35 +301,35 @@ def testDaiContract(self): data = contract.constructor(1).buildTransaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=(4096 + 64 * 2)) tx_hash = receipt['transactionHash'] - contractAddr = Web3.toChecksumAddress(contractAddr) + contractAddr = Web3.to_checksum_address(contractAddr) self.tx_conf["to"] = contractAddr # rely [0,5) for i in range(5): - data = contract.functions.rely(Web3.toChecksumAddress(self.pub[i])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.rely(Web3.to_checksum_address(self.pub[i])).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, 0, storage_limit=64) assert_equal(result["outcomeStatus"], "0x0") # deny 1, 3 for i in range(5): if (i % 2 == 1): - data = contract.functions.deny(Web3.toChecksumAddress(self.pub[i])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.deny(Web3.to_checksum_address(self.pub[i])).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.pub[i - 1], self.pri[i - 1], contractAddr, data, 0) assert_equal(result["outcomeStatus"], "0x0") # check wards for i in range(5): - data = contract.functions.wards(Web3.toChecksumAddress(self.pub[i])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.wards(Web3.to_checksum_address(self.pub[i])).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert_equal(int(result, 0), int(i % 2 == 0)) # mint tokens - data = contract.functions.mint(Web3.toChecksumAddress(self.pub[0]), 100000).buildTransaction(self.tx_conf)["data"] + data = contract.functions.mint(Web3.to_checksum_address(self.pub[0]), 100000).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, 0, storage_limit=128) logs = self.rpc.get_logs(self.filter) # check balance - data = contract.functions.balanceOf(Web3.toChecksumAddress(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.balanceOf(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(int(result, 0) == 100000) @@ -339,7 +339,7 @@ def testDaiContract(self): logs = self.rpc.get_logs(self.filter) # check allowance - data = contract.functions.allowance(Web3.toChecksumAddress(self.pub[0]), self.sender_checksum).buildTransaction(self.tx_conf)["data"] + data = contract.functions.allowance(Web3.to_checksum_address(self.pub[0]), self.sender_checksum).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(int(result, 0) == 50000) @@ -349,18 +349,18 @@ def testDaiContract(self): assert(result["outcomeStatus"] != "0x0") # insuffcient allowance - data = contract.functions.transferFrom(Web3.toChecksumAddress(self.pub[0]), self.sender_checksum, 10000).buildTransaction(self.tx_conf)["data"] + data = contract.functions.transferFrom(Web3.to_checksum_address(self.pub[0]), self.sender_checksum, 10000).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.pub[1], self.pri[1], contractAddr, data, storage_limit=128) assert(result["outcomeStatus"] != "0x0") # transfer 50000 use allowance - data = contract.functions.transferFrom(Web3.toChecksumAddress(self.pub[0]), Web3.toChecksumAddress(self.pub[1]), 50000).buildTransaction(self.tx_conf)["data"] + data = contract.functions.transferFrom(Web3.to_checksum_address(self.pub[0]), Web3.to_checksum_address(self.pub[1]), 50000).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=64) assert(result["outcomeStatus"] == "0x0") # get digest and sign it ts = int(time.time()) + 7200 - data = contract.functions.getHash(Web3.toChecksumAddress(self.pub[0]), Web3.toChecksumAddress(self.pub[1]), 0, ts, True).buildTransaction(self.tx_conf)["data"] + data = contract.functions.getHash(Web3.to_checksum_address(self.pub[0]), Web3.to_checksum_address(self.pub[1]), 0, ts, True).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) v, r, s = ecsign(bytes.fromhex(result[2:]), self.pri[0]) v -= 27 @@ -370,28 +370,28 @@ def testDaiContract(self): assert(len(s) == 66) # premit - data = contract.functions.permit(Web3.toChecksumAddress(self.pub[0]), Web3.toChecksumAddress(self.pub[1]), 0, ts, True, v, r, s).buildTransaction(self.tx_conf)["data"] + data = contract.functions.permit(Web3.to_checksum_address(self.pub[0]), Web3.to_checksum_address(self.pub[1]), 0, ts, True, v, r, s).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.pub[5], self.pri[5], contractAddr, data, storage_limit=128) assert(result["outcomeStatus"] == "0x0") # check allowance - data = contract.functions.allowance(Web3.toChecksumAddress(self.pub[0]), self.sender_checksum).buildTransaction(self.tx_conf)["data"] + data = contract.functions.allowance(Web3.to_checksum_address(self.pub[0]), self.sender_checksum).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(int(result, 0) == 0) - data = contract.functions.allowance(Web3.toChecksumAddress(self.pub[0]), Web3.toChecksumAddress(self.pub[1])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.allowance(Web3.to_checksum_address(self.pub[0]), Web3.to_checksum_address(self.pub[1])).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(result == '0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff') # burn pub[0] - data = contract.functions.burn(Web3.toChecksumAddress(self.pub[0]), 50000).buildTransaction(self.tx_conf)["data"] + data = contract.functions.burn(Web3.to_checksum_address(self.pub[0]), 50000).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.pub[1], self.pri[1], contractAddr, data, storage_limit=64) assert(result["outcomeStatus"] == "0x0") # check balance - data = contract.functions.balanceOf(Web3.toChecksumAddress(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.balanceOf(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(int(result, 0) == 0) - data = contract.functions.balanceOf(Web3.toChecksumAddress(self.pub[1])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.balanceOf(Web3.to_checksum_address(self.pub[1])).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(int(result, 0) == 50000) data = contract.functions.totalSupply().buildTransaction(self.tx_conf)["data"] @@ -407,7 +407,7 @@ def testDaiJoinContract(self): ) data = dai.constructor(1).buildTransaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=(4096 + 64 * 2)) - dai_addr = Web3.toChecksumAddress(contractAddr) + dai_addr = Web3.to_checksum_address(contractAddr) CONTRACT_PATH = "contracts/Vat_bytecode.dat" file_dir = os.path.dirname(os.path.realpath(__file__)) @@ -417,7 +417,7 @@ def testDaiJoinContract(self): ) data = vat.constructor().buildTransaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=(5632 + 64 * 2)) - vat_addr = Web3.toChecksumAddress(contractAddr) + vat_addr = Web3.to_checksum_address(contractAddr) CONTRACT_PATH = "contracts/DaiJoin_bytecode.dat" file_dir = os.path.dirname(os.path.realpath(__file__)) @@ -427,17 +427,17 @@ def testDaiJoinContract(self): ) data = dai_join.constructor(vat_addr, dai_addr).buildTransaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=(1024 + 64 * 2)) - dai_join_addr = Web3.toChecksumAddress(contractAddr) + dai_join_addr = Web3.to_checksum_address(contractAddr) # mint dai tokens & give approval self.tx_conf["to"] = dai_addr - data = dai.functions.mint(Web3.toChecksumAddress(self.pub[0]), 100000).buildTransaction(self.tx_conf)["data"] + data = dai.functions.mint(Web3.to_checksum_address(self.pub[0]), 100000).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, dai_addr, data, 0, storage_limit=128) assert(result["outcomeStatus"] == "0x0") data = dai.functions.approve(dai_join_addr, 100000).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.pub[0], self.pri[0], dai_addr, data, 0, storage_limit=64) assert(result["outcomeStatus"] == "0x0") - data = dai.functions.allowance(Web3.toChecksumAddress(self.pub[0]), dai_join_addr).buildTransaction(self.tx_conf)["data"] + data = dai.functions.allowance(Web3.to_checksum_address(self.pub[0]), dai_join_addr).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(dai_addr, data) assert_equal(int(result, 0), 100000) data = dai.functions.rely(dai_join_addr).buildTransaction(self.tx_conf)["data"] @@ -459,43 +459,43 @@ def testDaiJoinContract(self): # join self.tx_conf["to"] = dai_join_addr - data = dai_join.functions.join(Web3.toChecksumAddress(self.pub[0]), 50000).buildTransaction(self.tx_conf)["data"] + data = dai_join.functions.join(Web3.to_checksum_address(self.pub[0]), 50000).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.pub[0], self.pri[0], dai_join_addr, data, 0, storage_limit=320) assert(result["outcomeStatus"] == "0x0") # check self.tx_conf["to"] = dai_addr - data = dai.functions.balanceOf(Web3.toChecksumAddress(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = dai.functions.balanceOf(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(dai_addr, data) assert_equal(int(result, 0), 50000) self.tx_conf["to"] = vat_addr - data = vat.functions.can(dai_join_addr, Web3.toChecksumAddress(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = vat.functions.can(dai_join_addr, Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(vat_addr, data) assert_equal(int(result, 0), 1) - data = vat.functions.dai(Web3.toChecksumAddress(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = vat.functions.dai(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(vat_addr, data) assert_equal(int(result, 0), 50000000000000000000000000000000) # exit self.tx_conf["to"] = dai_join_addr - data = dai_join.functions.exit(Web3.toChecksumAddress(self.pub[0]), 50000).buildTransaction(self.tx_conf)["data"] + data = dai_join.functions.exit(Web3.to_checksum_address(self.pub[0]), 50000).buildTransaction(self.tx_conf)["data"] result = self.call_contract(self.pub[0], self.pri[0], dai_join_addr, data, 0, storage_limit=128) assert(result["outcomeStatus"] == "0x0") # check self.tx_conf["to"] = dai_addr - data = dai.functions.balanceOf(Web3.toChecksumAddress(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = dai.functions.balanceOf(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(dai_addr, data) assert_equal(int(result, 0), 100000) self.tx_conf["to"] = vat_addr - data = vat.functions.can(dai_join_addr, Web3.toChecksumAddress(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = vat.functions.can(dai_join_addr, Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(vat_addr, data) assert_equal(int(result, 0), 0) - data = vat.functions.dai(Web3.toChecksumAddress(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = vat.functions.dai(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] result = self.rpc.call(vat_addr, data) assert_equal(int(result, 0), 0) @@ -541,13 +541,13 @@ def run_test(self): file_path = os.path.join(file_dir, "..", "internal_contract", "metadata", "Staking.json") staking_contract_dict = json.loads(open(os.path.join(file_path), "r").read()) staking_contract = get_contract_instance(contract_dict=staking_contract_dict) - staking_contract_addr = Web3.toChecksumAddress("0888000000000000000000000000000000000002") + staking_contract_addr = Web3.to_checksum_address("0888000000000000000000000000000000000002") self.problem = "0x2bc79b7514884ab00da924607d71542cc4fed3beb8518e747726ae30ab6c7944" self.solution = "0xc4d2751c52311d0d7efe44e5c4195e058ad5ef4bb89b3e1761b24dc277b132c2" self.priv_key = default_config["GENESIS_PRI_KEY"] self.sender = encode_hex_0x(priv_to_addr(self.priv_key)) - self.sender_checksum = Web3.toChecksumAddress(self.sender) + self.sender_checksum = Web3.to_checksum_address(self.sender) self.pub = [] self.pri = [] self.rpc = RpcClient(self.nodes[0]) diff --git a/tests/contract_remove_test.py b/tests/contract_remove_test.py index bcab2ea14..05a3ba058 100644 --- a/tests/contract_remove_test.py +++ b/tests/contract_remove_test.py @@ -10,7 +10,7 @@ SNAPSHOT_EPOCH = 60 def temp_address(number: int): - return Web3.toChecksumAddress("{:#042x}".format(number + 100)) + return Web3.to_checksum_address("{:#042x}".format(number + 100)) class ContractRemoveTest(ConfluxTestFrameworkForContract): diff --git a/tests/erc20_test.py b/tests/erc20_test.py index c9688fe06..565f037dd 100755 --- a/tests/erc20_test.py +++ b/tests/erc20_test.py @@ -37,7 +37,7 @@ def run_test(self): nonce = 0 block_gen_thread = BlockGenThread(self.nodes, self.log) block_gen_thread.start() - self.tx_conf = {"from":Web3.toChecksumAddress(genesis_addr), "nonce":int_to_hex(nonce), "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} + self.tx_conf = {"from":Web3.to_checksum_address(genesis_addr), "nonce":int_to_hex(nonce), "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} raw_create = erc20_contract.constructor().buildTransaction(self.tx_conf) tx_data = decode_hex(raw_create["data"]) tx_create = create_transaction(pri_key=genesis_key, receiver=b'', nonce=nonce, gas_price=gas_price, data=tx_data, gas=gas, value=0, storage_limit=1920) @@ -60,7 +60,7 @@ def run_test(self): value = int((balance_map[sender_key] - ((tx_n - i) * 21000 * gas_price)) * random.random()) receiver_sk, _ = ec_random_keys() balance_map[receiver_sk] = value - tx_data = decode_hex(erc20_contract.functions.transfer(Web3.toChecksumAddress(encode_hex(priv_to_addr(receiver_sk))), value).buildTransaction(self.tx_conf)["data"]) + tx_data = decode_hex(erc20_contract.functions.transfer(Web3.to_checksum_address(encode_hex(priv_to_addr(receiver_sk))), value).buildTransaction(self.tx_conf)["data"]) tx = create_transaction(pri_key=sender_key, receiver=decode_hex(self.tx_conf["to"]), value=0, nonce=nonce, gas=gas, gas_price=gas_price, data=tx_data, storage_limit=64) r = random.randint(0, self.num_nodes - 1) @@ -82,7 +82,7 @@ def run_test(self): self.log.info("Pass") def get_balance(self, contract, token_address): - tx = contract.functions.balanceOf(Web3.toChecksumAddress(encode_hex(token_address))).buildTransaction(self.tx_conf) + tx = contract.functions.balanceOf(Web3.to_checksum_address(encode_hex(token_address))).buildTransaction(self.tx_conf) result = self.client.call(tx["to"], tx["data"]) balance = bytes_to_int(decode_hex(result)) self.log.debug("address=%s, balance=%s", encode_hex(token_address), balance) diff --git a/tests/estimation_test.py b/tests/estimation_test.py index 0a765ebe2..682482adf 100644 --- a/tests/estimation_test.py +++ b/tests/estimation_test.py @@ -14,7 +14,7 @@ BYTE_COLLATERAL = int(CFX / 1024) ENTRY_COLLATERAL = BYTE_COLLATERAL * 64 SPONSOR_INTERNAL_CONTRACT = "0888000000000000000000000000000000000001" -ZERO_ADDR = Web3.toChecksumAddress("0" * 40) +ZERO_ADDR = Web3.to_checksum_address("0" * 40) class EstimationTest(ConfluxTestFramework): @@ -60,7 +60,7 @@ def clear_user_balance(): tx = client.new_contract_tx(None, bytecode, gas=500000, storage_limit=1024) send_tx(tx) receipt = client.get_transaction_receipt(tx.hash_hex()) - contract = w3.eth.contract(abi=abi, address=Web3.toChecksumAddress(receipt["contractCreated"])) + contract = w3.eth.contract(abi=abi, address=Web3.to_checksum_address(receipt["contractCreated"])) contract_address = contract.address contract_base32_address = hex_to_b32_address(contract.address) diff --git a/tests/evm_space/base.py b/tests/evm_space/base.py index 5c901a651..b8c9b165b 100644 --- a/tests/evm_space/base.py +++ b/tests/evm_space/base.py @@ -29,7 +29,7 @@ def setup_network(self): ip = self.nodes[0].ip port = self.nodes[0].ethrpcport self.w3 = Web3(Web3.HTTPProvider(f'http://{ip}:{port}/')) - assert_equal(self.w3.isConnected(), True) + assert_equal(self.w3.is_connected(), True) def cross_space_transfer(self, to, value): to = to.replace('0x', '') diff --git a/tests/evm_space/debug_trace_tx_test.py b/tests/evm_space/debug_trace_tx_test.py index 17f05ee61..a98a527e3 100644 --- a/tests/evm_space/debug_trace_tx_test.py +++ b/tests/evm_space/debug_trace_tx_test.py @@ -95,7 +95,7 @@ def erc20_transfer_tx_trace(self, erc20_address): erc20 = self.w3.eth.contract(address=erc20_address, abi=abi) # balance = erc20.functions.balanceOf(self.evmAccount.address).call() - target_addr = Web3.toChecksumAddress("0x8b14d287b4150ff22ac73df8be720e933f659abc") + target_addr = Web3.to_checksum_address("0x8b14d287b4150ff22ac73df8be720e933f659abc") data = erc20.encodeABI(fn_name="transfer", args=[target_addr, 100]) diff --git a/tests/evm_space/eip1559_test.py b/tests/evm_space/eip1559_test.py index 72fcb793d..1188a9932 100755 --- a/tests/evm_space/eip1559_test.py +++ b/tests/evm_space/eip1559_test.py @@ -30,7 +30,7 @@ def setup_network(self): ip = self.nodes[0].ip port = self.nodes[0].ethrpcport self.w3 = Web3(Web3.HTTPProvider(f'http://{ip}:{port}/')) - assert_equal(self.w3.isConnected(), True) + assert_equal(self.w3.is_connected(), True) def run_test(self): diff --git a/tests/evm_space/filter_log_test.py b/tests/evm_space/filter_log_test.py index 2f5a1b198..8982674e7 100644 --- a/tests/evm_space/filter_log_test.py +++ b/tests/evm_space/filter_log_test.py @@ -75,7 +75,7 @@ async def run_async(self): ip = self.nodes[0].ip port = self.nodes[0].ethrpcport self.w3 = Web3(Web3.HTTPProvider(f"http://{ip}:{port}/")) - assert_equal(self.w3.isConnected(), True) + assert_equal(self.w3.is_connected(), True) # initialize EVM account self.evmAccount = self.w3.eth.account.privateKeyToAccount( diff --git a/tests/evm_space/filter_transaction_test.py b/tests/evm_space/filter_transaction_test.py index 8f464aa96..444979e49 100644 --- a/tests/evm_space/filter_transaction_test.py +++ b/tests/evm_space/filter_transaction_test.py @@ -38,7 +38,7 @@ def setup_network(self): ip = self.nodes[0].ip port = self.nodes[0].ethrpcport self.w3 = Web3(Web3.HTTPProvider(f"http://{ip}:{port}/")) - assert_equal(self.w3.isConnected(), True) + assert_equal(self.w3.is_connected(), True) async def run_async(self): client = self.rpc diff --git a/tests/evm_space/phantom_trace_test.py b/tests/evm_space/phantom_trace_test.py index 5ec612b27..f1b1cb3f5 100755 --- a/tests/evm_space/phantom_trace_test.py +++ b/tests/evm_space/phantom_trace_test.py @@ -453,7 +453,7 @@ def test_withdrawFromMapped(self): assert_equal(receipt["outcomeStatus"], "0x1") # failure # transfer funds to mapped account - receiver = Web3.toChecksumAddress(mapped_address(self.confluxContractAddr)) + receiver = Web3.to_checksum_address(mapped_address(self.confluxContractAddr)) nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) signed = self.evmAccount.signTransaction({ diff --git a/tests/evm_space/phantom_transaction_test.py b/tests/evm_space/phantom_transaction_test.py index fc54f741f..71daf4750 100755 --- a/tests/evm_space/phantom_transaction_test.py +++ b/tests/evm_space/phantom_transaction_test.py @@ -46,7 +46,7 @@ def setup_network(self): ip = self.nodes[0].ip port = self.nodes[0].ethrpcport self.w3 = Web3(Web3.HTTPProvider(f'http://{ip}:{port}/')) - assert_equal(self.w3.isConnected(), True) + assert_equal(self.w3.is_connected(), True) def run_test(self): # initialize Conflux account diff --git a/tests/fixed_supply_token_test.py b/tests/fixed_supply_token_test.py index 7f99058fa..6e859003f 100755 --- a/tests/fixed_supply_token_test.py +++ b/tests/fixed_supply_token_test.py @@ -29,14 +29,14 @@ def setup_contract(self): def generate_transactions(self, _): self.call_contract_function(self.contract, "transfer", - [Web3.toChecksumAddress(priv_to_addr(self.accounts[0])), 1000], + [Web3.to_checksum_address(priv_to_addr(self.accounts[0])), 1000], self.default_account_key, self.contract_address, True, True, storage_limit=512) self.call_contract_function(self.contract, "approve", - [Web3.toChecksumAddress(priv_to_addr(self.accounts[1])), 500], + [Web3.to_checksum_address(priv_to_addr(self.accounts[1])), 500], self.accounts[0], self.contract_address, True, True, storage_limit=512) self.call_contract_function(self.contract, "transferFrom", - [Web3.toChecksumAddress(priv_to_addr(self.accounts[0])), - Web3.toChecksumAddress(priv_to_addr(self.default_account_key)), 300], + [Web3.to_checksum_address(priv_to_addr(self.accounts[0])), + Web3.to_checksum_address(priv_to_addr(self.default_account_key)), 300], self.accounts[1], self.contract_address, True, True, storage_limit=512) diff --git a/tests/issue988_test.py b/tests/issue988_test.py index 797c208ac..645482b8c 100755 --- a/tests/issue988_test.py +++ b/tests/issue988_test.py @@ -90,7 +90,7 @@ def call_contract_function_rpc(self, contract, name, args, contract_addr): attrs["gas"] = int_to_hex(gas) attrs["gasPrice"] = int_to_hex(gas_price) attrs["chainId"] = 0 - attrs["to"] = Web3.toChecksumAddress(contract_addr) + attrs["to"] = Web3.to_checksum_address(contract_addr) tx = func(*args).buildTransaction(attrs) return RpcClient(self.nodes[0]).call(contract_addr, tx["data"]) @@ -112,7 +112,7 @@ def run_test(self): gas = CONTRACT_DEFAULT_GAS block_gen_thread = BlockGenThread(self.nodes, self.log) block_gen_thread.start() - self.tx_conf = {"from":Web3.toChecksumAddress(encode_hex_0x(genesis_addr)), "nonce":int_to_hex(nonce), "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} + self.tx_conf = {"from":Web3.to_checksum_address(encode_hex_0x(genesis_addr)), "nonce":int_to_hex(nonce), "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} # setup balance for node 0 node = self.nodes[0] diff --git a/tests/light/rpc_test.py b/tests/light/rpc_test.py index 6f2216a34..6a0775699 100755 --- a/tests/light/rpc_test.py +++ b/tests/light/rpc_test.py @@ -61,9 +61,9 @@ def _setup_stake_contract(self, addr, priv): file_path = os.path.join(file_dir, "../..", "internal_contract", "metadata", "Staking.json") staking_contract_dict = json.loads(open(os.path.join(file_path), "r").read()) staking_contract = get_contract_instance(contract_dict=staking_contract_dict) - contract_addr = Web3.toChecksumAddress("0888000000000000000000000000000000000002") + contract_addr = Web3.to_checksum_address("0888000000000000000000000000000000000002") tx_conf = { - "from": Web3.toChecksumAddress(addr), + "from": Web3.to_checksum_address(addr), "to": contract_addr, "nonce": 0, "gas": 3_000_000, @@ -87,18 +87,18 @@ def _setup_sponsor(self, contractAddr): whitelist_control_addr = "0x0888000000000000000000000000000000000001" tx_conf = { - "from": Web3.toChecksumAddress(self.rpc[FULLNODE0].GENESIS_ADDR), + "from": Web3.to_checksum_address(self.rpc[FULLNODE0].GENESIS_ADDR), "gas": 3_000_000, "gasPrice": 1, "chainId": 0, } # setSponsorForGas - data = whitelist_control.functions.setSponsorForGas(Web3.toChecksumAddress(contractAddr), 2000000).buildTransaction({"to":Web3.toChecksumAddress(whitelist_control_addr), **tx_conf})["data"] + data = whitelist_control.functions.setSponsorForGas(Web3.to_checksum_address(contractAddr), 2000000).buildTransaction({"to":Web3.to_checksum_address(whitelist_control_addr), **tx_conf})["data"] self.call_contract(contract=whitelist_control_addr, data_hex=data, value=20000000000000000000) # setSponsorForCollateral - data = whitelist_control.functions.setSponsorForCollateral(Web3.toChecksumAddress(contractAddr)).buildTransaction({"to":Web3.toChecksumAddress(whitelist_control_addr), **tx_conf})["data"] + data = whitelist_control.functions.setSponsorForCollateral(Web3.to_checksum_address(contractAddr)).buildTransaction({"to":Web3.to_checksum_address(whitelist_control_addr), **tx_conf})["data"] self.call_contract(contract=whitelist_control_addr, data_hex=data, value=20000000000000000000) # add to whitelist diff --git a/tests/params_dao_vote_test.py b/tests/params_dao_vote_test.py index 3d2a6934c..571d09db3 100755 --- a/tests/params_dao_vote_test.py +++ b/tests/params_dao_vote_test.py @@ -295,7 +295,7 @@ def run_test(self): assert_equal(2_000_000 * 10 ** 18, int(client.call("0x0888000000000000000000000000000000000007", eth_utils.encode_hex(data)), 0)) data = get_contract_function_data(params_control_contract, "totalVotes", args=[round]) total = client.call("0x0888000000000000000000000000000000000007", eth_utils.encode_hex(data)) - data = get_contract_function_data(params_control_contract, "readVote", args=[Web3.toChecksumAddress(client.GENESIS_ADDR)]) + data = get_contract_function_data(params_control_contract, "readVote", args=[Web3.to_checksum_address(client.GENESIS_ADDR)]) vote = client.call("0x0888000000000000000000000000000000000007", eth_utils.encode_hex(data)) assert_equal(total, vote) diff --git a/tests/pubsub/eth_logs_test.py b/tests/pubsub/eth_logs_test.py index 1283c9577..a58f0a6f3 100755 --- a/tests/pubsub/eth_logs_test.py +++ b/tests/pubsub/eth_logs_test.py @@ -82,7 +82,7 @@ async def run_async(self): ip = self.nodes[0].ip port = self.nodes[0].ethrpcport self.w3 = Web3(Web3.HTTPProvider(f'http://{ip}:{port}/')) - assert_equal(self.w3.isConnected(), True) + assert_equal(self.w3.is_connected(), True) # initialize EVM account self.evmAccount = self.w3.eth.account.privateKeyToAccount(DEFAULT_TEST_ACCOUNT_KEY) diff --git a/tests/rpc/test_token_supply_info.py b/tests/rpc/test_token_supply_info.py index a5c672355..1d92d5ac7 100644 --- a/tests/rpc/test_token_supply_info.py +++ b/tests/rpc/test_token_supply_info.py @@ -37,7 +37,7 @@ def test_token_supply_info(self): } tx_conf = REQUEST_BASE tx_conf["nonce"] = 0 - tx_conf["to"] = Web3.toChecksumAddress("0888000000000000000000000000000000000002") + tx_conf["to"] = Web3.to_checksum_address("0888000000000000000000000000000000000002") file_path = os.path.join(file_dir, "..", "..", "internal_contract", "metadata", "Staking.json") staking_contract_dict = json.loads(open(os.path.join(file_path), "r").read()) staking_contract = get_contract_instance(contract_dict=staking_contract_dict) diff --git a/tests/test_framework/contracts.py b/tests/test_framework/contracts.py index d05dc1052..4e6c1b401 100644 --- a/tests/test_framework/contracts.py +++ b/tests/test_framework/contracts.py @@ -62,7 +62,7 @@ def cfx_internal_contract(name: InternalContractName, framework: ConfluxTestFram def _add_address(self: Contract, address: str) -> Contract: w3 = Web3() new_contract = w3.eth.contract( - abi=self.abi, bytecode=self.bytecode, address=Web3.toChecksumAddress(address)) + abi=self.abi, bytecode=self.bytecode, address=Web3.to_checksum_address(address)) new_contract.framework = self.framework _enact_contract(new_contract) @@ -232,9 +232,9 @@ def before_test(self): self.deploy_create2() self.genesis_key = default_config["GENESIS_PRI_KEY"] - self.genesis_addr = Web3.toChecksumAddress(encode_hex_0x(priv_to_addr(self.genesis_key))) + self.genesis_addr = Web3.to_checksum_address(encode_hex_0x(priv_to_addr(self.genesis_key))) self.genesis_key2 = default_config["GENESIS_PRI_KEY_2"] - self.genesis_addr2 = Web3.toChecksumAddress(encode_hex_0x(priv_to_addr(self.genesis_key2))) + self.genesis_addr2 = Web3.to_checksum_address(encode_hex_0x(priv_to_addr(self.genesis_key2))) def cfx_contract(self, name): return cfx_contract(name, self) diff --git a/tests/web3_test.py b/tests/web3_test.py index 95f322f8d..3ea0db418 100644 --- a/tests/web3_test.py +++ b/tests/web3_test.py @@ -114,7 +114,7 @@ def test_deploy_1820(self): client.send_tx(tx, True) self.wait_for_tx([tx], True) - eip1820 = Web3.toChecksumAddress("1820a4b7618bde71dce8cdc73aab6c95905fad24") + eip1820 = Web3.to_checksum_address("1820a4b7618bde71dce8cdc73aab6c95905fad24") receipt = client.get_transaction_receipt(tx.hash.hex()) assert_greater_than(int(receipt['gasUsed'],16), 1_500_000 + 21_000) assert_equal(len(self.w3.eth.getCode(eip1820)), 2501) @@ -126,7 +126,7 @@ def run_test(self): port = self.nodes[0].ethrpcport self.w3 = Web3(Web3.HTTPProvider(f'http://{ip}:{port}/')) - assert_equal(self.w3.isConnected(), True) + assert_equal(self.w3.is_connected(), True) account = self.w3.eth.account.privateKeyToAccount( '0x348ce564d427a3311b6536bbcff9390d69395b06ed6c486954e971d960fe8709') @@ -138,7 +138,7 @@ def run_test(self): self.test_deploy_1820() # Send eip-155 transaction - receiver = Web3.toChecksumAddress("10000000000000000000000000000000000000aa") + receiver = Web3.to_checksum_address("10000000000000000000000000000000000000aa") signed = account.signTransaction( {"to": receiver, "value": 1 * 10 ** 17, "gasPrice": 1, "gas": 21000, "nonce": 0, "chainId": 10}) tx_hash = signed["hash"] @@ -168,7 +168,7 @@ def run_test(self): # Send to transaction mapped_sender = keccak_256(self.genesis_addr).digest()[-20:] - receiver = Web3.toChecksumAddress(mapped_sender.hex()) + receiver = Web3.to_checksum_address(mapped_sender.hex()) signed = account.signTransaction( {"to": receiver, "value": 2 * 10 ** 17, "gasPrice": 1, "gas": 21000, "nonce": 2}) self.w3.eth.sendRawTransaction(signed["rawTransaction"]) diff --git a/tests/withdraw_deposit_test.py b/tests/withdraw_deposit_test.py index 8a3bad676..dd58e63bc 100755 --- a/tests/withdraw_deposit_test.py +++ b/tests/withdraw_deposit_test.py @@ -49,7 +49,7 @@ def run_test(self): gas = CONTRACT_DEFAULT_GAS block_gen_thread = BlockGenThread(self.nodes, self.log) block_gen_thread.start() - self.tx_conf = {"from":Web3.toChecksumAddress(encode_hex_0x(genesis_addr)), "nonce":int_to_hex(nonce), "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} + self.tx_conf = {"from":Web3.to_checksum_address(encode_hex_0x(genesis_addr)), "nonce":int_to_hex(nonce), "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} total_num_blocks = 2 * 60 * 60 * 24 * 365 accumulate_interest_rate = [2 ** 80 * total_num_blocks] @@ -68,7 +68,7 @@ def run_test(self): assert_equal(client.get_balance(addr), 5 * 10 ** 18) assert_equal(client.get_staking_balance(addr), 0) - self.tx_conf["to"] = Web3.toChecksumAddress("0888000000000000000000000000000000000002") + self.tx_conf["to"] = Web3.to_checksum_address("0888000000000000000000000000000000000002") # deposit 10**18 tx_data = decode_hex(staking_contract.functions.deposit(10 ** 18).buildTransaction(self.tx_conf)["data"]) tx = client.new_tx(value=0, sender=addr, receiver=self.tx_conf["to"], gas=gas, data=tx_data, priv_key=priv_key) From e4a9ac3c9554d3a4bba7f014cf90204c9e110590 Mon Sep 17 00:00:00 2001 From: darwintree <17946284+darwintree@users.noreply.github.com> Date: Tue, 5 Nov 2024 14:21:17 +0800 Subject: [PATCH 11/31] chore: continue to migrate to web3py7 --- dev-support/dep_pip3.sh | 6 +- tests/cip107_test.py | 3 - tests/cip118_activation_test.py | 4 - tests/cip98_test.py | 8 +- tests/conflux/rpc.py | 19 +-- tests/conflux/utils.py | 9 +- tests/contract_bench_test.py | 132 +++++++++--------- tests/erc20_test.py | 6 +- tests/estimation_test.py | 22 +-- tests/evm_full_history_state_test.py | 2 +- tests/evm_space/account_pending_tx_test.py | 6 +- tests/evm_space/base.py | 12 +- tests/evm_space/debug_trace_tx_test.py | 30 ++-- tests/evm_space/eip1559_test.py | 24 ++-- tests/evm_space/estimate_and_call_test.py | 10 +- tests/evm_space/filter_log_test.py | 16 +-- tests/evm_space/filter_transaction_test.py | 22 +-- tests/evm_space/log_filtering_test.py | 14 +- tests/evm_space/out_of_balance_test.py | 8 +- tests/evm_space/phantom_trace_test.py | 34 ++--- tests/evm_space/phantom_transaction_test.py | 20 +-- tests/evm_space/phantom_tx_hash_test.py | 10 +- tests/evm_space/rpc_error_test.py | 60 ++++---- tests/evm_space/rpc_test.py | 6 +- tests/evm_space/trace_test.py | 14 +- tests/evm_space/tx_and_receipt_test.py | 10 +- tests/issue2483_test.py | 2 +- tests/issue988_test.py | 4 +- tests/light/rpc_test.py | 8 +- tests/overlay_account_storage_test.py | 5 +- tests/pos/hard_fork_test.py | 2 +- tests/pubsub/eth_logs_test.py | 22 +-- tests/rpc/test_token_supply_info.py | 4 +- tests/storage_value_unchange_test.py | 4 - tests/test_framework/contracts.py | 7 +- .../smart_contract_bench_base.py | 2 +- tests/tools/hard_fork_tool.py | 2 +- tests/web3_test.py | 27 ++-- tests/withdraw_deposit_test.py | 12 +- 39 files changed, 300 insertions(+), 308 deletions(-) diff --git a/dev-support/dep_pip3.sh b/dev-support/dep_pip3.sh index c4a4fa8b7..8570e94d1 100755 --- a/dev-support/dep_pip3.sh +++ b/dev-support/dep_pip3.sh @@ -10,11 +10,11 @@ function install() { install cfx-account install eth-utils -install rlp==1.2.0 -install py-ecc==5.2.0 +# install rlp==1.2.0 +# install py-ecc==5.2.0 install coincurve==19.0.1 install pysha3 -install trie==1.4.0 +# install trie==1.4.0 install web3==7.4.0 install py-solc-x install jsonrpcclient==3.3.6 diff --git a/tests/cip107_test.py b/tests/cip107_test.py index 8af1318f9..6c14b5b7f 100644 --- a/tests/cip107_test.py +++ b/tests/cip107_test.py @@ -1,6 +1,3 @@ -from web3 import Web3 -from web3.contract import ContractFunction, Contract - from conflux.rpc import RpcClient from conflux.utils import * from test_framework.util import * diff --git a/tests/cip118_activation_test.py b/tests/cip118_activation_test.py index 619333364..201cbc916 100644 --- a/tests/cip118_activation_test.py +++ b/tests/cip118_activation_test.py @@ -1,7 +1,3 @@ -from web3 import Web3 -from web3.contract import ContractFunction, Contract - -from conflux.rpc import RpcClient from conflux.utils import * from test_framework.contracts import ConfluxTestFrameworkForContract, ZERO_ADDRESS from test_framework.util import * diff --git a/tests/cip98_test.py b/tests/cip98_test.py index 80c1137de..9005913fc 100644 --- a/tests/cip98_test.py +++ b/tests/cip98_test.py @@ -30,7 +30,7 @@ def run_test(self): w3 = Web3(Web3.HTTPProvider(f'http://{ip}:{port}/')) start_p2p_connection(self.nodes) priv = default_config["GENESIS_PRI_KEY"] - account = w3.eth.account.privateKeyToAccount(priv) + account = w3.eth.account.from_key(priv) # Create forks to make block number != epoch number client.generate_block() @@ -50,10 +50,10 @@ def run_test(self): hash_contract_abi_path = join(dirname(realpath(__file__)), *"contracts/cip98_test.json".split("/")) abi = json.loads(open(hash_contract_abi_path, "r").read()) - signed = account.signTransaction(dict(data=bytecode, gas=200000, nonce=0, gasPrice=1, chainId=EVM_CHAIN_ID)) - w3.eth.sendRawTransaction(signed["rawTransaction"]) + signed = account.sign_transaction(dict(data=bytecode, gas=200000, nonce=0, gasPrice=1, chainId=EVM_CHAIN_ID)) + w3.eth.send_raw_transaction(signed["raw_transaction"]) client.generate_blocks(20, 1) - receipt = w3.eth.waitForTransactionReceipt(signed["hash"]) + receipt = w3.eth.wait_for_transaction_receipt(signed["hash"]) contract = w3.eth.contract(abi=abi, address=receipt["contractAddress"]) assert_equal(encode_hex(contract.functions.query().call()), ZERO_HASH) diff --git a/tests/conflux/rpc.py b/tests/conflux/rpc.py index 0dfcf558d..2d6350a8d 100644 --- a/tests/conflux/rpc.py +++ b/tests/conflux/rpc.py @@ -9,6 +9,7 @@ import rlp import json + from .address import hex_to_b32_address, b32_address_to_hex, DEFAULT_PY_TEST_CHAIN_ID from .config import DEFAULT_PY_TEST_CHAIN_ID, default_config from .transactions import CONTRACT_DEFAULT_GAS, Transaction, UnsignedTransaction @@ -146,7 +147,7 @@ def generate_custom_block(self, parent_hash: str, referee: list, txs: list[Union raw_txs = [] for tx in txs: if isinstance(tx, SignedTransaction): - raw_txs.append(tx.rawTransaction) + raw_txs.append(tx.raw_transaction) elif isinstance(tx, Transaction): raw_txs.append(rlp.encode(tx)) else: @@ -159,7 +160,7 @@ def generate_custom_block(self, parent_hash: str, referee: list, txs: list[Union return block_hash def generate_block_with_fake_txs(self, txs: list, adaptive=False, tx_data_len: int = 0) -> str: - encoded_txs = eth_utils.encode_hex(rlp.encode(txs)) + encoded_txs = eth_utils.hexadecimal.encode_hex(rlp.encode(txs)) block_hash = self.node.test_generateBlockWithFakeTxs(encoded_txs, adaptive, tx_data_len) assert_is_hash_string(block_hash) return block_hash @@ -319,7 +320,7 @@ def clear_tx_pool(self): # a temporary patch for transaction compatibility def send_tx(self, tx: Union[Transaction, SignedTransaction], wait_for_receipt=False, wait_for_catchup=True) -> str: if isinstance(tx, SignedTransaction): - encoded = cast(str, tx.rawTransaction.hex()) + encoded = cast(str, tx.raw_transaction.to_0x_hex()) else: encoded = eth_utils.encode_hex(rlp.encode(tx)) tx_hash = self.send_raw_tx(encoded, wait_for_catchup=wait_for_catchup) @@ -403,7 +404,7 @@ def new_tx(self, *, sender=None, receiver=None, nonce=None, gas_price=1, gas=210 if epoch_height is None: epoch_height = self.epoch_number() - action = eth_utils.decode_hex(receiver) + action = eth_utils.hexadecimal.decode_hex(receiver) tx = UnsignedTransaction(nonce, gas_price, gas, action, value, data, storage_limit, epoch_height, chain_id) if sign: @@ -472,12 +473,12 @@ def new_contract_tx(self, receiver: Optional[str], data_hex: str = None, sender= nonce = int(nonce, 0) if receiver is not None: - action = eth_utils.decode_hex(receiver) + action = eth_utils.hexadecimal.decode_hex(receiver) else: action = b'' if data_hex is None: data_hex = "0x" - data = eth_utils.decode_hex(data_hex) + data = eth_utils.hexadecimal.decode_hex(data_hex) if type(gas) is str: gas = int(gas, 0) @@ -672,7 +673,7 @@ def wait_for_pos_register(self, priv_key=None, stake_value=2_000_000, voting_pow receiver="0x0888000000000000000000000000000000000002", gas=CONTRACT_DEFAULT_GAS) self.send_tx(stake_tx, wait_for_receipt=True) data, pos_identifier = self.node.test_posRegister(int_to_hex(voting_power), 0 if legacy else 1) - register_tx = self.new_tx(priv_key=priv_key, data=eth_utils.decode_hex(data), value=0, + register_tx = self.new_tx(priv_key=priv_key, data=eth_utils.hexadecimal.decode_hex(data), value=0, receiver="0x0888000000000000000000000000000000000005", gas=CONTRACT_DEFAULT_GAS, storage_limit=1024) register_tx_hash = self.send_tx(register_tx, wait_for_receipt=True) @@ -770,5 +771,5 @@ def get_contract_function_data(contract, name, args): attrs = { **REQUEST_BASE, } - tx_data = func(*args).buildTransaction(attrs) - return eth_utils.decode_hex(tx_data['data']) + tx_data = func(*args).build_transaction(attrs) + return eth_utils.hexadecimal.decode_hex(tx_data['data']) diff --git a/tests/conflux/utils.py b/tests/conflux/utils.py index 667f48e75..33bb36f58 100644 --- a/tests/conflux/utils.py +++ b/tests/conflux/utils.py @@ -1,11 +1,14 @@ import re import sha3 as _sha3 -from py_ecc.secp256k1 import privtopub, ecdsa_raw_sign, ecdsa_raw_recover +from py_ecc.secp256k1.secp256k1 import privtopub, ecdsa_raw_sign, ecdsa_raw_recover import rlp from rlp.sedes import big_endian_int, BigEndianInt, Binary -from eth_utils import encode_hex as encode_hex_0x -from eth_utils import decode_hex, int_to_big_endian, big_endian_to_int +from eth_utils.hexadecimal import ( + encode_hex as encode_hex_0x, + decode_hex +) +from eth_utils.encoding import int_to_big_endian, big_endian_to_int from rlp.utils import ALL_BYTES import random import coincurve diff --git a/tests/contract_bench_test.py b/tests/contract_bench_test.py index faf552e59..2f0740cb8 100755 --- a/tests/contract_bench_test.py +++ b/tests/contract_bench_test.py @@ -48,7 +48,7 @@ def testEventContract(self): self.tx_conf["to"] = contractAddr # interact with foo() - data = contract.functions.foo().buildTransaction(self.tx_conf)["data"] + data = contract.functions.foo().build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=64) logs = self.rpc.get_logs(self.filter) assert_equal(len(logs), l + 2) @@ -57,7 +57,7 @@ def testEventContract(self): # interact with goo(10), will pass modifier, emit new event - data = contract.functions.goo(10).buildTransaction(self.tx_conf)["data"] + data = contract.functions.goo(10).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data) logs = self.rpc.get_logs(self.filter) assert_equal(len(logs), l + 3) @@ -65,23 +65,23 @@ def testEventContract(self): assert_equal(logs[-1]["topics"][2], self.number_to_topic(11)) # interact with goo(10), will not pass modifier, no event emitted - data = contract.functions.goo(10).buildTransaction(self.tx_conf)["data"] + data = contract.functions.goo(10).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data) logs = self.rpc.get_logs(self.filter) assert_equal(len(logs), l + 3) # call const function hoo() - data = contract.functions.hoo().buildTransaction(self.tx_conf)["data"] + data = contract.functions.hoo().build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert_equal(result, self.number_to_topic(11)) # call const function byte32oo(solution) - data = contract.functions.byte32oo(self.solution).buildTransaction(self.tx_conf)["data"] + data = contract.functions.byte32oo(self.solution).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert_equal(result, self.solution) # call const function getSha256(solution) - data = contract.functions.getSha256(self.solution).buildTransaction(self.tx_conf)["data"] + data = contract.functions.getSha256(self.solution).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert_equal(result, self.problem) @@ -98,20 +98,20 @@ def testBallotContract(self): ) # deploy contract - data = contract.constructor(10).buildTransaction(self.tx_conf)["data"] + data = contract.constructor(10).build_transaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=(1024 + 64 * 3)) contractAddr = Web3.to_checksum_address(contractAddr) self.tx_conf["to"] = contractAddr # interact with vote() - data = contract.functions.vote(5).buildTransaction(self.tx_conf)["data"] + data = contract.functions.vote(5).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=64 * 2) logs = self.rpc.get_logs(self.filter) assert_equal(len(logs), l + 1) assert_equal(logs[-1]["data"], self.number_to_topic(5)) # call const function winningProposal() - data = contract.functions.winningProposal().buildTransaction(self.tx_conf)["data"] + data = contract.functions.winningProposal().build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert_equal(result, self.number_to_topic(5)) @@ -128,7 +128,7 @@ def testHTLCContract(self): ) # deploy contract - data = contract.constructor().buildTransaction(self.tx_conf)["data"] + data = contract.constructor().build_transaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=2560) tx_hash = receipt['transactionHash'] contractAddr = Web3.to_checksum_address(contractAddr) @@ -139,7 +139,7 @@ def testHTLCContract(self): assert_equal(logs[-1]["topics"][2], self.number_to_topic(16)) # call getNow() - data = contract.functions.getNow().buildTransaction(self.tx_conf)["data"] + data = contract.functions.getNow().build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(int(result, 0) - int(time.time()) < 5) @@ -148,7 +148,7 @@ def testHTLCContract(self): gas = CONTRACT_DEFAULT_GAS # interact with newContract(), sender send conflux to himself time_lock = int(time.time()) + 7200 - data = contract.functions.newContract(self.sender_checksum, self.problem, time_lock).buildTransaction(self.tx_conf)["data"]; + data = contract.functions.newContract(self.sender_checksum, self.problem, time_lock).build_transaction(self.tx_conf)["data"]; cost = 5000000000000000000 result = self.call_contract(self.sender, self.priv_key, contractAddr, data, cost, storage_limit=320, gas=gas) logs = self.rpc.get_logs(self.filter) @@ -160,7 +160,7 @@ def testHTLCContract(self): # call getContract cid0 = contract_id - data = contract.functions.getContract(contract_id).buildTransaction(self.tx_conf)["data"] + data = contract.functions.getContract(contract_id).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) result = result[2:] res = ['0x'+result[i * 64 : (i + 1) * 64] for i in range(8)] @@ -174,7 +174,7 @@ def testHTLCContract(self): assert_equal(int(res[7], 0), 0) # interact with withdraw() - data = contract.functions.withdraw(contract_id, self.solution).buildTransaction(self.tx_conf)["data"] + data = contract.functions.withdraw(contract_id, self.solution).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=128) assert_equal(self.rpc.get_balance(contractAddr), 0) c2 = self.rpc.get_collateral_for_storage(self.sender) @@ -184,7 +184,7 @@ def testHTLCContract(self): assert_equal(len(logs), l + 3) # call getContract - data = contract.functions.getContract(contract_id).buildTransaction(self.tx_conf)["data"] + data = contract.functions.getContract(contract_id).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) result = result[2:] res = ['0x'+result[i * 64 : (i + 1) * 64] for i in range(8)] @@ -211,7 +211,7 @@ def testPayContract(self): ) # deploy contract - data = contract.constructor().buildTransaction(self.tx_conf)["data"] + data = contract.constructor().build_transaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=512) contractAddr = Web3.to_checksum_address(contractAddr) self.tx_conf["to"] = contractAddr @@ -220,7 +220,7 @@ def testPayContract(self): b0 = self.rpc.get_balance(self.sender) # interact with recharge() - data = contract.functions.recharge().buildTransaction(self.tx_conf)["data"] + data = contract.functions.recharge().build_transaction(self.tx_conf)["data"] cost = 5000000000000000000 result = self.call_contract(self.sender, self.priv_key, contractAddr, data, cost) b1 = self.rpc.get_balance(self.sender) @@ -228,7 +228,7 @@ def testPayContract(self): assert_equal(bc, cost) #interact with withdraw - data = contract.functions.withdraw(self.sender_checksum).buildTransaction(self.tx_conf)["data"] + data = contract.functions.withdraw(self.sender_checksum).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, 0) b2 = self.rpc.get_balance(self.sender) bc = self.rpc.get_balance(contractAddr) @@ -248,7 +248,7 @@ def testMappingContract(self): ) # deploy contract - data = contract.constructor(1).buildTransaction(self.tx_conf)["data"] + data = contract.constructor(1).build_transaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=(1536 + 64)) tx_hash = receipt['transactionHash'] contractAddr = Web3.to_checksum_address(contractAddr) @@ -256,31 +256,31 @@ def testMappingContract(self): c = "0x81f3521d71990945b99e1c592750d7157f2b545f" def check_wards(x, y, z): - data = contract.functions.wards(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.wards(Web3.to_checksum_address(self.pub[0])).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) A = int(result, 0) assert(A == x) - data = contract.functions.wards(self.sender_checksum).buildTransaction(self.tx_conf)["data"] + data = contract.functions.wards(self.sender_checksum).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) B = int(result, 0) assert(B == y) - data = contract.functions.wards(Web3.to_checksum_address(c)).buildTransaction(self.tx_conf)["data"] + data = contract.functions.wards(Web3.to_checksum_address(c)).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) C = int(result, 0) assert(C == z) # deny pub[0] check_wards(0, 2, 0) - data = contract.functions.set1(Web3.to_checksum_address(c)).buildTransaction(self.tx_conf)["data"] + data = contract.functions.set1(Web3.to_checksum_address(c)).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=64) check_wards(0, 2, 1) - data = contract.functions.set2(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.set2(Web3.to_checksum_address(self.pub[0])).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=64) check_wards(2, 2, 1) - data = contract.functions.set0(Web3.to_checksum_address(c)).buildTransaction(self.tx_conf)["data"] + data = contract.functions.set0(Web3.to_checksum_address(c)).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=64) check_wards(2, 2, 0) - data = contract.functions.set0(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.set0(Web3.to_checksum_address(self.pub[0])).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=64) check_wards(0, 2, 0) @@ -298,7 +298,7 @@ def testDaiContract(self): ) # deploy contract - data = contract.constructor(1).buildTransaction(self.tx_conf)["data"] + data = contract.constructor(1).build_transaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=(4096 + 64 * 2)) tx_hash = receipt['transactionHash'] contractAddr = Web3.to_checksum_address(contractAddr) @@ -306,61 +306,61 @@ def testDaiContract(self): # rely [0,5) for i in range(5): - data = contract.functions.rely(Web3.to_checksum_address(self.pub[i])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.rely(Web3.to_checksum_address(self.pub[i])).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, 0, storage_limit=64) assert_equal(result["outcomeStatus"], "0x0") # deny 1, 3 for i in range(5): if (i % 2 == 1): - data = contract.functions.deny(Web3.to_checksum_address(self.pub[i])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.deny(Web3.to_checksum_address(self.pub[i])).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.pub[i - 1], self.pri[i - 1], contractAddr, data, 0) assert_equal(result["outcomeStatus"], "0x0") # check wards for i in range(5): - data = contract.functions.wards(Web3.to_checksum_address(self.pub[i])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.wards(Web3.to_checksum_address(self.pub[i])).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert_equal(int(result, 0), int(i % 2 == 0)) # mint tokens - data = contract.functions.mint(Web3.to_checksum_address(self.pub[0]), 100000).buildTransaction(self.tx_conf)["data"] + data = contract.functions.mint(Web3.to_checksum_address(self.pub[0]), 100000).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, 0, storage_limit=128) logs = self.rpc.get_logs(self.filter) # check balance - data = contract.functions.balanceOf(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.balanceOf(Web3.to_checksum_address(self.pub[0])).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(int(result, 0) == 100000) # approve - data = contract.functions.approve(self.sender_checksum, 50000).buildTransaction(self.tx_conf)["data"] + data = contract.functions.approve(self.sender_checksum, 50000).build_transaction(self.tx_conf)["data"] result= self.call_contract(self.pub[0], self.pri[0], contractAddr, data, storage_limit=64) logs = self.rpc.get_logs(self.filter) # check allowance - data = contract.functions.allowance(Web3.to_checksum_address(self.pub[0]), self.sender_checksum).buildTransaction(self.tx_conf)["data"] + data = contract.functions.allowance(Web3.to_checksum_address(self.pub[0]), self.sender_checksum).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(int(result, 0) == 50000) # insufficient balance - data = contract.functions.transfer(self.sender_checksum, 200000).buildTransaction(self.tx_conf)["data"] + data = contract.functions.transfer(self.sender_checksum, 200000).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.pub[0], self.pri[0], contractAddr, data, storage_limit=128) assert(result["outcomeStatus"] != "0x0") # insuffcient allowance - data = contract.functions.transferFrom(Web3.to_checksum_address(self.pub[0]), self.sender_checksum, 10000).buildTransaction(self.tx_conf)["data"] + data = contract.functions.transferFrom(Web3.to_checksum_address(self.pub[0]), self.sender_checksum, 10000).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.pub[1], self.pri[1], contractAddr, data, storage_limit=128) assert(result["outcomeStatus"] != "0x0") # transfer 50000 use allowance - data = contract.functions.transferFrom(Web3.to_checksum_address(self.pub[0]), Web3.to_checksum_address(self.pub[1]), 50000).buildTransaction(self.tx_conf)["data"] + data = contract.functions.transferFrom(Web3.to_checksum_address(self.pub[0]), Web3.to_checksum_address(self.pub[1]), 50000).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, contractAddr, data, storage_limit=64) assert(result["outcomeStatus"] == "0x0") # get digest and sign it ts = int(time.time()) + 7200 - data = contract.functions.getHash(Web3.to_checksum_address(self.pub[0]), Web3.to_checksum_address(self.pub[1]), 0, ts, True).buildTransaction(self.tx_conf)["data"] + data = contract.functions.getHash(Web3.to_checksum_address(self.pub[0]), Web3.to_checksum_address(self.pub[1]), 0, ts, True).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) v, r, s = ecsign(bytes.fromhex(result[2:]), self.pri[0]) v -= 27 @@ -370,31 +370,31 @@ def testDaiContract(self): assert(len(s) == 66) # premit - data = contract.functions.permit(Web3.to_checksum_address(self.pub[0]), Web3.to_checksum_address(self.pub[1]), 0, ts, True, v, r, s).buildTransaction(self.tx_conf)["data"] + data = contract.functions.permit(Web3.to_checksum_address(self.pub[0]), Web3.to_checksum_address(self.pub[1]), 0, ts, True, v, r, s).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.pub[5], self.pri[5], contractAddr, data, storage_limit=128) assert(result["outcomeStatus"] == "0x0") # check allowance - data = contract.functions.allowance(Web3.to_checksum_address(self.pub[0]), self.sender_checksum).buildTransaction(self.tx_conf)["data"] + data = contract.functions.allowance(Web3.to_checksum_address(self.pub[0]), self.sender_checksum).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(int(result, 0) == 0) - data = contract.functions.allowance(Web3.to_checksum_address(self.pub[0]), Web3.to_checksum_address(self.pub[1])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.allowance(Web3.to_checksum_address(self.pub[0]), Web3.to_checksum_address(self.pub[1])).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(result == '0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff') # burn pub[0] - data = contract.functions.burn(Web3.to_checksum_address(self.pub[0]), 50000).buildTransaction(self.tx_conf)["data"] + data = contract.functions.burn(Web3.to_checksum_address(self.pub[0]), 50000).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.pub[1], self.pri[1], contractAddr, data, storage_limit=64) assert(result["outcomeStatus"] == "0x0") # check balance - data = contract.functions.balanceOf(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.balanceOf(Web3.to_checksum_address(self.pub[0])).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(int(result, 0) == 0) - data = contract.functions.balanceOf(Web3.to_checksum_address(self.pub[1])).buildTransaction(self.tx_conf)["data"] + data = contract.functions.balanceOf(Web3.to_checksum_address(self.pub[1])).build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(int(result, 0) == 50000) - data = contract.functions.totalSupply().buildTransaction(self.tx_conf)["data"] + data = contract.functions.totalSupply().build_transaction(self.tx_conf)["data"] result = self.rpc.call(contractAddr, data) assert(int(result, 0) == 50000) @@ -405,7 +405,7 @@ def testDaiJoinContract(self): abi_file = os.path.join(file_dir, "contracts/Dai_abi.json"), bytecode_file = os.path.join(file_dir, CONTRACT_PATH), ) - data = dai.constructor(1).buildTransaction(self.tx_conf)["data"] + data = dai.constructor(1).build_transaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=(4096 + 64 * 2)) dai_addr = Web3.to_checksum_address(contractAddr) @@ -415,7 +415,7 @@ def testDaiJoinContract(self): abi_file = os.path.join(file_dir, "contracts/Vat_abi.json"), bytecode_file = os.path.join(file_dir, CONTRACT_PATH), ) - data = vat.constructor().buildTransaction(self.tx_conf)["data"] + data = vat.constructor().build_transaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=(5632 + 64 * 2)) vat_addr = Web3.to_checksum_address(contractAddr) @@ -425,77 +425,77 @@ def testDaiJoinContract(self): abi_file = os.path.join(file_dir, "contracts/DaiJoin_abi.json"), bytecode_file = os.path.join(file_dir, CONTRACT_PATH), ) - data = dai_join.constructor(vat_addr, dai_addr).buildTransaction(self.tx_conf)["data"] + data = dai_join.constructor(vat_addr, dai_addr).build_transaction(self.tx_conf)["data"] receipt, contractAddr = self.deploy_contract(self.sender, self.priv_key, data, storage_limit=(1024 + 64 * 2)) dai_join_addr = Web3.to_checksum_address(contractAddr) # mint dai tokens & give approval self.tx_conf["to"] = dai_addr - data = dai.functions.mint(Web3.to_checksum_address(self.pub[0]), 100000).buildTransaction(self.tx_conf)["data"] + data = dai.functions.mint(Web3.to_checksum_address(self.pub[0]), 100000).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, dai_addr, data, 0, storage_limit=128) assert(result["outcomeStatus"] == "0x0") - data = dai.functions.approve(dai_join_addr, 100000).buildTransaction(self.tx_conf)["data"] + data = dai.functions.approve(dai_join_addr, 100000).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.pub[0], self.pri[0], dai_addr, data, 0, storage_limit=64) assert(result["outcomeStatus"] == "0x0") - data = dai.functions.allowance(Web3.to_checksum_address(self.pub[0]), dai_join_addr).buildTransaction(self.tx_conf)["data"] + data = dai.functions.allowance(Web3.to_checksum_address(self.pub[0]), dai_join_addr).build_transaction(self.tx_conf)["data"] result = self.rpc.call(dai_addr, data) assert_equal(int(result, 0), 100000) - data = dai.functions.rely(dai_join_addr).buildTransaction(self.tx_conf)["data"] + data = dai.functions.rely(dai_join_addr).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, dai_addr, data, 0, storage_limit=64) assert(result["outcomeStatus"] == "0x0") # mint dai tokens for join_addr in vat & add approval self.tx_conf["to"] = vat_addr - data = vat.functions.mint(dai_join_addr, 100000000000).buildTransaction(self.tx_conf)["data"] + data = vat.functions.mint(dai_join_addr, 100000000000).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.sender, self.priv_key, vat_addr, data, 0, storage_limit=128) assert(result["outcomeStatus"] == "0x0") - data = vat.functions.hope(dai_join_addr).buildTransaction(self.tx_conf)["data"] + data = vat.functions.hope(dai_join_addr).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.pub[0], self.pri[0], vat_addr, data, 0, storage_limit=64) assert(result["outcomeStatus"] == "0x0") - data = vat.functions.balanceOf(dai_join_addr).buildTransaction(self.tx_conf)["data"] + data = vat.functions.balanceOf(dai_join_addr).build_transaction(self.tx_conf)["data"] result = self.rpc.call(vat_addr, data) assert_equal(int(result, 0), 100000000000) # join self.tx_conf["to"] = dai_join_addr - data = dai_join.functions.join(Web3.to_checksum_address(self.pub[0]), 50000).buildTransaction(self.tx_conf)["data"] + data = dai_join.functions.join(Web3.to_checksum_address(self.pub[0]), 50000).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.pub[0], self.pri[0], dai_join_addr, data, 0, storage_limit=320) assert(result["outcomeStatus"] == "0x0") # check self.tx_conf["to"] = dai_addr - data = dai.functions.balanceOf(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = dai.functions.balanceOf(Web3.to_checksum_address(self.pub[0])).build_transaction(self.tx_conf)["data"] result = self.rpc.call(dai_addr, data) assert_equal(int(result, 0), 50000) self.tx_conf["to"] = vat_addr - data = vat.functions.can(dai_join_addr, Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = vat.functions.can(dai_join_addr, Web3.to_checksum_address(self.pub[0])).build_transaction(self.tx_conf)["data"] result = self.rpc.call(vat_addr, data) assert_equal(int(result, 0), 1) - data = vat.functions.dai(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = vat.functions.dai(Web3.to_checksum_address(self.pub[0])).build_transaction(self.tx_conf)["data"] result = self.rpc.call(vat_addr, data) assert_equal(int(result, 0), 50000000000000000000000000000000) # exit self.tx_conf["to"] = dai_join_addr - data = dai_join.functions.exit(Web3.to_checksum_address(self.pub[0]), 50000).buildTransaction(self.tx_conf)["data"] + data = dai_join.functions.exit(Web3.to_checksum_address(self.pub[0]), 50000).build_transaction(self.tx_conf)["data"] result = self.call_contract(self.pub[0], self.pri[0], dai_join_addr, data, 0, storage_limit=128) assert(result["outcomeStatus"] == "0x0") # check self.tx_conf["to"] = dai_addr - data = dai.functions.balanceOf(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = dai.functions.balanceOf(Web3.to_checksum_address(self.pub[0])).build_transaction(self.tx_conf)["data"] result = self.rpc.call(dai_addr, data) assert_equal(int(result, 0), 100000) self.tx_conf["to"] = vat_addr - data = vat.functions.can(dai_join_addr, Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = vat.functions.can(dai_join_addr, Web3.to_checksum_address(self.pub[0])).build_transaction(self.tx_conf)["data"] result = self.rpc.call(vat_addr, data) assert_equal(int(result, 0), 0) - data = vat.functions.dai(Web3.to_checksum_address(self.pub[0])).buildTransaction(self.tx_conf)["data"] + data = vat.functions.dai(Web3.to_checksum_address(self.pub[0])).build_transaction(self.tx_conf)["data"] result = self.rpc.call(vat_addr, data) assert_equal(int(result, 0), 0) @@ -515,7 +515,7 @@ def testCreate2Factory(self): fin = open(os.path.join(file_dir, "contracts/erc1820_bytecode.dat"), 'r') erc1820_bytecode = '0x' + fin.readline().strip() fin.close() - data = create2factory.functions.deploy(erc1820_bytecode, 2).buildTransaction(self.tx_conf)["data"] + data = create2factory.functions.deploy(erc1820_bytecode, 2).build_transaction(self.tx_conf)["data"] result = self.rpc.call(create2factory_addr, data) assert(len(result) == 66) erc1820_addr = "0x" + result[-40:] @@ -557,7 +557,7 @@ def run_test(self): # lock token for genesis account self.tx_conf = {"from":self.sender, "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} self.tx_conf['to'] = staking_contract_addr - tx_data = decode_hex(staking_contract.functions.deposit(1000000 * 10 ** 18).buildTransaction(self.tx_conf)["data"]) + tx_data = decode_hex(staking_contract.functions.deposit(1000000 * 10 ** 18).build_transaction(self.tx_conf)["data"]) tx = self.rpc.new_tx(value=0, receiver=staking_contract_addr, data=tx_data, gas=gas, gas_price=gas_price) self.rpc.send_tx(tx, True) @@ -569,7 +569,7 @@ def run_test(self): transaction = self.rpc.new_tx(sender=self.sender, receiver=pub_key, value=1000000 * 10 ** 18, priv_key=self.priv_key) self.rpc.send_tx(transaction, True) # deposit 10000 tokens - tx_data = decode_hex(staking_contract.functions.deposit(10000 * 10 ** 18).buildTransaction(self.tx_conf)["data"]) + tx_data = decode_hex(staking_contract.functions.deposit(10000 * 10 ** 18).build_transaction(self.tx_conf)["data"]) tx = self.rpc.new_tx(value=0, sender=pub_key, receiver=self.tx_conf["to"], gas=gas, data=tx_data, priv_key=priv_key) self.rpc.send_tx(tx) self.tx_conf = {"from":self.sender, "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} diff --git a/tests/erc20_test.py b/tests/erc20_test.py index 565f037dd..7c5ec74d4 100755 --- a/tests/erc20_test.py +++ b/tests/erc20_test.py @@ -38,7 +38,7 @@ def run_test(self): block_gen_thread = BlockGenThread(self.nodes, self.log) block_gen_thread.start() self.tx_conf = {"from":Web3.to_checksum_address(genesis_addr), "nonce":int_to_hex(nonce), "gas":int_to_hex(gas), "gasPrice":int_to_hex(gas_price), "chainId":0} - raw_create = erc20_contract.constructor().buildTransaction(self.tx_conf) + raw_create = erc20_contract.constructor().build_transaction(self.tx_conf) tx_data = decode_hex(raw_create["data"]) tx_create = create_transaction(pri_key=genesis_key, receiver=b'', nonce=nonce, gas_price=gas_price, data=tx_data, gas=gas, value=0, storage_limit=1920) self.client = RpcClient(self.nodes[0]) @@ -60,7 +60,7 @@ def run_test(self): value = int((balance_map[sender_key] - ((tx_n - i) * 21000 * gas_price)) * random.random()) receiver_sk, _ = ec_random_keys() balance_map[receiver_sk] = value - tx_data = decode_hex(erc20_contract.functions.transfer(Web3.to_checksum_address(encode_hex(priv_to_addr(receiver_sk))), value).buildTransaction(self.tx_conf)["data"]) + tx_data = decode_hex(erc20_contract.functions.transfer(Web3.to_checksum_address(encode_hex(priv_to_addr(receiver_sk))), value).build_transaction(self.tx_conf)["data"]) tx = create_transaction(pri_key=sender_key, receiver=decode_hex(self.tx_conf["to"]), value=0, nonce=nonce, gas=gas, gas_price=gas_price, data=tx_data, storage_limit=64) r = random.randint(0, self.num_nodes - 1) @@ -82,7 +82,7 @@ def run_test(self): self.log.info("Pass") def get_balance(self, contract, token_address): - tx = contract.functions.balanceOf(Web3.to_checksum_address(encode_hex(token_address))).buildTransaction(self.tx_conf) + tx = contract.functions.balanceOf(Web3.to_checksum_address(encode_hex(token_address))).build_transaction(self.tx_conf) result = self.client.call(tx["to"], tx["data"]) balance = bytes_to_int(decode_hex(result)) self.log.debug("address=%s, balance=%s", encode_hex(token_address), balance) diff --git a/tests/estimation_test.py b/tests/estimation_test.py index 682482adf..cbbd3024e 100644 --- a/tests/estimation_test.py +++ b/tests/estimation_test.py @@ -73,13 +73,13 @@ def clear_user_balance(): estimate(to=hex_to_b32_address(user), value=hex(1_000_000_000 * CFX)) # Deep Recursive and Set Entries - data = contract.encodeABI("recursive", [100]) + data = contract.encode_abi("recursive", [100]) res = estimate(to=contract_base32_address, data=data) send_tx(client.new_contract_tx(contract_address, gas=res["gasLimit"], data_hex=data, storage_limit=res["storageCollateralized"])) # Repeatedly Set the Same Entry - data = contract.encodeABI("recursive", [0]) + data = contract.encode_abi("recursive", [0]) # For a random sender, it needs to pay for the collateral for resetting entries. res = estimate(to=contract_base32_address, data=data) assert_equal(int(res["storageCollateralized"], 0), 128) @@ -111,11 +111,11 @@ def clear_user_balance(): # # Sponsor gas for this contract. send_tx(client.new_contract_tx(receiver=SPONSOR_INTERNAL_CONTRACT, - data_hex=sponsor_contract.encodeABI("setSponsorForGas", + data_hex=sponsor_contract.encode_abi("setSponsorForGas", [contract_address, 30_000_000]), value=30_000_000_000)) send_tx(client.new_contract_tx(receiver=SPONSOR_INTERNAL_CONTRACT, - data_hex=sponsor_contract.encodeABI("addPrivilegeByAdmin", + data_hex=sponsor_contract.encode_abi("addPrivilegeByAdmin", [contract_address, [ZERO_ADDR]]), storage_limit=64)) @@ -147,40 +147,40 @@ def clear_user_balance(): # Sponsor gas for this contract. send_tx(client.new_contract_tx(receiver=SPONSOR_INTERNAL_CONTRACT, - data_hex=sponsor_contract.encodeABI("setSponsorForCollateral", + data_hex=sponsor_contract.encode_abi("setSponsorForCollateral", [contract_address]), value=5 * ENTRY_COLLATERAL)) - data = contract.encodeABI("inc_prefix", [4]) + data = contract.encode_abi("inc_prefix", [4]) res = estimate(to=contract_base32_address, data=data, **from_user) send_tx(client.new_contract_tx(contract_address, gas=res["gasLimit"], data_hex=data, storage_limit=res["storageCollateralized"], priv_key=user_pri)) # The sponsor have enough balance for collateral, only pay for additional. - data = contract.encodeABI("inc_prefix", [5]) + data = contract.encode_abi("inc_prefix", [5]) res = estimate(to=contract_base32_address, data=data) assert_equal(int(res["storageCollateralized"], 0), 64) # The sponsor don't have enough balance for collateral - data = contract.encodeABI("inc_prefix", [6]) + data = contract.encode_abi("inc_prefix", [6]) res = estimate(to=contract_base32_address, data=data) assert_equal(int(res["storageCollateralized"], 0), 6 * 64) # The user overcome send_tx(client.new_contract_tx(user, value=20 * ENTRY_COLLATERAL + 1_000_000)) - data = contract.encodeABI("inc_prefix", [7]) + data = contract.encode_abi("inc_prefix", [7]) res = estimate(to=contract_base32_address, data=data, **from_user) assert_equal(int(res["storageCollateralized"], 0), 7 * 64) send_tx(client.new_contract_tx(contract_address, gas=res["gasLimit"], data_hex=data, storage_limit=res["storageCollateralized"], priv_key=user_pri)) - data = contract.encodeABI("inc_prefix", [8]) + data = contract.encode_abi("inc_prefix", [8]) res = estimate(to=contract_base32_address, data=data, **from_user) assert_equal(int(res["storageCollateralized"], 0), 6 * 64) send_tx(client.new_contract_tx(contract_address, gas=res["gasLimit"], data_hex=data, storage_limit=res["storageCollateralized"], priv_key=user_pri)) - data = contract.encodeABI("inc_prefix", [3]) + data = contract.encode_abi("inc_prefix", [3]) res = estimate(to=contract_base32_address, data=data, **from_user) assert_equal(int(res["storageCollateralized"], 0), 3 * 64) diff --git a/tests/evm_full_history_state_test.py b/tests/evm_full_history_state_test.py index 155271da6..7c3b81979 100755 --- a/tests/evm_full_history_state_test.py +++ b/tests/evm_full_history_state_test.py @@ -47,7 +47,7 @@ def run_test(self): self.nodes[0].eth_call({"to": "0x0000000000000000000000000000000000000000", "data": "0x00"}, "0x33") assert_raises_rpc_error(None, None, self.nodes[0].eth_call, {"to": "0x0000000000000000000000000000000000000000", "data": "0x00"}, "0x31") - evm_genesis_account = Web3().eth.account.privateKeyToAccount(default_config["GENESIS_PRI_KEY"]).address + evm_genesis_account = Web3().eth.account.from_key(default_config["GENESIS_PRI_KEY"]).address # value = default_config["TOTAL_COIN"] value = 10 ** 18 self.cross_space_transfer(evm_genesis_account, value) diff --git a/tests/evm_space/account_pending_tx_test.py b/tests/evm_space/account_pending_tx_test.py index 76ae325e2..84f0a9db8 100755 --- a/tests/evm_space/account_pending_tx_test.py +++ b/tests/evm_space/account_pending_tx_test.py @@ -11,13 +11,13 @@ def run_test(self): self.cfxAccount = self.rpc.GENESIS_ADDR print(f'Using Conflux account {self.cfxAccount}') # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 1 * 10 ** 18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(1 * 10 ** 18)) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": self.evmAccount.address, "value": 100, "gasPrice": 1, @@ -25,7 +25,7 @@ def run_test(self): "nonce": 2, "chainId": self.TEST_CHAIN_ID, }) - self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + self.w3.eth.send_raw_transaction(signed["raw_transaction"]) pendingTx = self.nodes[0].eth_getAccountPendingTransactions(self.evmAccount.address) assert_equal(pendingTx["pendingCount"], "0x1") diff --git a/tests/evm_space/base.py b/tests/evm_space/base.py index b8c9b165b..bb48788db 100644 --- a/tests/evm_space/base.py +++ b/tests/evm_space/base.py @@ -45,7 +45,7 @@ def cross_space_transfer(self, to, value): self.rpc.send_tx(tx, True) def construct_evm_tx(self, receiver, data_hex, nonce): - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": receiver, "value": 0, "gasPrice": 1, @@ -71,9 +71,9 @@ def deploy_conflux_space(self, bytecode_path): return addr def deploy_evm_space_by_code(self, bytecode): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": None, "value": 0, "gasPrice": 1, @@ -84,12 +84,12 @@ def deploy_evm_space_by_code(self, bytecode): }) tx_hash = signed["hash"] - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) assert_equal(tx_hash, return_tx_hash) self.rpc.generate_block(1) self.rpc.generate_blocks(20, 1) - receipt = self.w3.eth.waitForTransactionReceipt(tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(tx_hash) assert_equal(receipt["status"], 1) addr = receipt["contractAddress"] return addr @@ -130,7 +130,7 @@ def run_test(self): self.cfxAccount = self.rpc.GENESIS_ADDR print(f'Using Conflux account {self.cfxAccount}') # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 1 * 10 ** 18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(1 * 10 ** 18)) diff --git a/tests/evm_space/debug_trace_tx_test.py b/tests/evm_space/debug_trace_tx_test.py index a98a527e3..0f76dbb33 100644 --- a/tests/evm_space/debug_trace_tx_test.py +++ b/tests/evm_space/debug_trace_tx_test.py @@ -7,7 +7,7 @@ from test_framework.util import * from web3 import Web3 -toHex = Web3.toHex +toHex = Web3.to_hex class DebugTraceTxTest(Web3Base): def set_test_params(self): @@ -19,7 +19,7 @@ def run_test(self): self.cfxAccount = self.rpc.GENESIS_ADDR print(f'Using Conflux account {self.cfxAccount}') # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 100 * 10 ** 18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(100 * 10 ** 18)) @@ -37,18 +37,18 @@ def trace_tx(self, tx_hash, opts = None): return trace def common_cfx_transfer_tx_trace(self): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": self.evmAccount.address, "value": 1, "gasPrice": 1, "gas": 210000, "nonce": nonce, - "chainId": self.w3.eth.chainId, + "chainId": self.w3.eth.chain_id, }) - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) self.rpc.generate_blocks(20, 1) trace = self.nodes[0].ethrpc.debug_traceTransaction(toHex(return_tx_hash)) @@ -62,18 +62,18 @@ def contract_deploy_tx_trace(self): assert(os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) + signed = self.evmAccount.sign_transaction({ "to": None, "value": 0, "gasPrice": 1, "gas": 10000000, "nonce": nonce, - "chainId": self.w3.eth.chainId, + "chainId": self.w3.eth.chain_id, "data": bytecode, }) - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) self.rpc.generate_block(1) self.rpc.generate_blocks(20, 1) @@ -97,20 +97,20 @@ def erc20_transfer_tx_trace(self, erc20_address): # balance = erc20.functions.balanceOf(self.evmAccount.address).call() target_addr = Web3.to_checksum_address("0x8b14d287b4150ff22ac73df8be720e933f659abc") - data = erc20.encodeABI(fn_name="transfer", args=[target_addr, 100]) + data = erc20.encode_abi(abi_element_identifier="transfer", args=[target_addr, 100]) - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) + signed = self.evmAccount.sign_transaction({ "to": erc20_address, "value": 0, "gasPrice": 1, "gas": 1000000, "nonce": nonce, - "chainId": self.w3.eth.chainId, + "chainId": self.w3.eth.chain_id, "data": data, }) - tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) # why this method is not working? # tx_hash = erc20.functions.transfer(target_addr, 100).transact() diff --git a/tests/evm_space/eip1559_test.py b/tests/evm_space/eip1559_test.py index 1188a9932..c719006ae 100755 --- a/tests/evm_space/eip1559_test.py +++ b/tests/evm_space/eip1559_test.py @@ -39,7 +39,7 @@ def run_test(self): self.log.info(f'Using Conflux account {self.cfxAccount}') # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) self.log.info(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 100 * 10 ** 18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(100 * 10 ** 18)) @@ -49,7 +49,7 @@ def run_test(self): ret = self.nodes[0].debug_getTransactionsByEpoch("0x1") assert_equal(len(ret), 1) - self.nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + self.nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) tx, receipt = self.send_large_transaction() self.check_node_sync(tx, receipt) @@ -64,7 +64,7 @@ def run_test(self): def send_large_transaction(self): - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "type": "0x2", "to": self.evmAccount.address, "value": 1, @@ -76,10 +76,10 @@ def send_large_transaction(self): }) self.nonce += 1 - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) self.rpc.generate_block(1) self.rpc.generate_blocks(20, 1) - receipt = self.w3.eth.waitForTransactionReceipt(return_tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(return_tx_hash) assert_equal(receipt["status"], 1) # TODO check EIP1559 gas usage @@ -88,13 +88,13 @@ def send_large_transaction(self): tx = self.w3.eth.get_transaction(return_tx_hash) - assert_equal(Web3.toHex(tx["v"]), tx["yParity"]) + assert_equal(tx["v"], tx["yParity"]) return tx, receipt def send_large_cheap_transactions(self): for i in range(0, 5): - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "type": "0x2", "to": self.evmAccount.address, "value": 1, @@ -104,12 +104,12 @@ def send_large_cheap_transactions(self): "nonce": self.nonce + i, "chainId": 10, }) - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) self.nonce += 5 self.rpc.generate_blocks(20, 5) - receipt = self.w3.eth.waitForTransactionReceipt(return_tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(return_tx_hash) assert_equal(receipt["status"], 1) assert_equal(receipt["txExecErrorMsg"], None) @@ -137,7 +137,7 @@ def check_fee_history(self): def send_many_transactions_in_one_block(self): for i in range(0, 10): - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "type": "0x2", "to": self.evmAccount.address, "value": 1, @@ -147,12 +147,12 @@ def send_many_transactions_in_one_block(self): "nonce": self.nonce + i, "chainId": 10, }) - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) self.nonce += 10 self.rpc.generate_block(10) self.rpc.generate_blocks(20, 0) - receipt = self.w3.eth.waitForTransactionReceipt(return_tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(return_tx_hash) assert_equal(receipt["cumulativeGasUsed"], 21000 * 10) assert_equal(receipt["gasUsed"], 21000) diff --git a/tests/evm_space/estimate_and_call_test.py b/tests/evm_space/estimate_and_call_test.py index 4fbdf65b6..026e881cf 100644 --- a/tests/evm_space/estimate_and_call_test.py +++ b/tests/evm_space/estimate_and_call_test.py @@ -11,7 +11,7 @@ def run_test(self): self.cfxAccount = self.rpc.GENESIS_ADDR print(f'Using Conflux account {self.cfxAccount}') # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) print(f'Using EVM account {self.evmAccount.address}') self.test_basic() @@ -24,7 +24,7 @@ def test_basic(self): call_request = { "to": "0x007a026f3fe3c8252f0adb915f0d924aef942f53", "value": "0x100", - "chainId": Web3.toHex(self.TEST_CHAIN_ID) + "chainId": Web3.to_hex(self.TEST_CHAIN_ID) } estimate_result = self.nodes[0].eth_estimateGas(call_request) assert_equal(estimate_result, "0x5208") @@ -43,7 +43,7 @@ def test_revert(self): addr = self.deploy_evm_space_by_code(bytecode) err_contract = self.w3.eth.contract(address=addr, abi=abi) - data = err_contract.encodeABI(fn_name="testRequire", args=[1]) + data = err_contract.encode_abi(abi_element_identifier="testRequire", args=[1]) call_request = { "to": addr, "data": data, @@ -53,7 +53,7 @@ def test_revert(self): assert_raises_rpc_error(3, err_msg, self.nodes[0].eth_estimateGas, call_request, err_data_=err_data) assert_raises_rpc_error(3, err_msg, self.nodes[0].eth_call, call_request, err_data_=err_data) - data = err_contract.encodeABI(fn_name="testRevert", args=[1]) + data = err_contract.encode_abi(abi_element_identifier="testRevert", args=[1]) call_request = { "to": addr, "data": data, @@ -61,7 +61,7 @@ def test_revert(self): assert_raises_rpc_error(3, err_msg, self.nodes[0].eth_estimateGas, call_request, err_data_=err_data) assert_raises_rpc_error(3, err_msg, self.nodes[0].eth_call, call_request, err_data_=err_data) - data = err_contract.encodeABI(fn_name="testCustomError", args=[1]) + data = err_contract.encode_abi(abi_element_identifier="testCustomError", args=[1]) call_request = { "to": addr, "data": data, diff --git a/tests/evm_space/filter_log_test.py b/tests/evm_space/filter_log_test.py index 8982674e7..7430dcccd 100644 --- a/tests/evm_space/filter_log_test.py +++ b/tests/evm_space/filter_log_test.py @@ -78,7 +78,7 @@ async def run_async(self): assert_equal(self.w3.is_connected(), True) # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount( + self.evmAccount = self.w3.eth.account.from_key( DEFAULT_TEST_ACCOUNT_KEY ) print(f"Using EVM account {self.evmAccount.address}") @@ -175,9 +175,9 @@ def run_test(self): asyncio.get_event_loop().run_until_complete(self.run_async()) def deploy_evm_space(self, data_hex): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction( + signed = self.evmAccount.sign_transaction( { "to": None, "value": 0, @@ -190,19 +190,19 @@ def deploy_evm_space(self, data_hex): ) tx_hash = signed["hash"] - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) assert_equal(tx_hash, return_tx_hash) self.rpc[FULLNODE0].generate_block(1) self.rpc[FULLNODE0].generate_blocks(20, 1) - receipt = self.w3.eth.waitForTransactionReceipt(tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(tx_hash) assert_equal(receipt["status"], 1) addr = receipt["contractAddress"] return receipt, addr def call_contract(self, contract, data_hex): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) - signed = self.evmAccount.signTransaction( + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) + signed = self.evmAccount.sign_transaction( { "to": contract, "value": 0, @@ -214,7 +214,7 @@ def call_contract(self, contract, data_hex): } ) - tx = self.w3.eth.sendRawTransaction(signed["rawTransaction"]).hex() + tx = self.w3.eth.send_raw_transaction(signed["raw_transaction"]).hex() time_end = time.time() + 10 while time.time() < time_end: self.rpc[FULLNODE0].generate_block(1) diff --git a/tests/evm_space/filter_transaction_test.py b/tests/evm_space/filter_transaction_test.py index 444979e49..c1270656b 100644 --- a/tests/evm_space/filter_transaction_test.py +++ b/tests/evm_space/filter_transaction_test.py @@ -47,7 +47,7 @@ async def run_async(self): self.cfxAccount = client.GENESIS_ADDR # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount( + self.evmAccount = self.w3.eth.account.from_key( self.DEFAULT_TEST_ACCOUNT_KEY ) self.cross_space_transfer(self.evmAccount.address, 1 * 10**18) @@ -56,7 +56,7 @@ async def run_async(self): ) # new account - account2 = self.w3.eth.account.privateKeyToAccount(hex(random.getrandbits(256))) + account2 = self.w3.eth.account.from_key(hex(random.getrandbits(256))) self.cross_space_transfer(account2.address, 1 * 10**18) assert_equal(self.nodes[0].eth_getBalance(account2.address), hex(1 * 10**18)) @@ -66,16 +66,16 @@ async def run_async(self): assert_equal(len(filter_txs), 0) # target address - to_address = self.w3.eth.account.privateKeyToAccount( + to_address = self.w3.eth.account.from_key( hex(random.getrandbits(256)) ) - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) # create txs txs_size = 20 txs = [] for i in range(txs_size): - signed = self.evmAccount.signTransaction( + signed = self.evmAccount.sign_transaction( { "to": to_address.address, "value": 1, @@ -86,8 +86,8 @@ async def run_async(self): } ) - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) - txs.append(return_tx_hash.hex()) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) + txs.append(return_tx_hash.to_0x_hex()) nonce += 1 def wait_to_pack_txs(size): @@ -109,21 +109,21 @@ def wait_to_pack_txs(size): assert_equal(filter_txs[0], txs[5]) # tx for second account - signed = account2.signTransaction( + signed = account2.sign_transaction( { "to": to_address.address, "value": 1, "gasPrice": 1, "gas": 210000, - "nonce": self.w3.eth.getTransactionCount(account2.address), + "nonce": self.w3.eth.get_transaction_count(account2.address), "chainId": 10, } ) - tx_second_account = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + tx_second_account = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) filter_txs = self.nodes[0].eth_getFilterChanges(filter) assert_equal(len(filter_txs), 1) - assert_equal(filter_txs[0], tx_second_account.hex()) + assert_equal(filter_txs[0], tx_second_account.to_0x_hex()) # pack all transactons wait_until(lambda: wait_to_pack_txs(20)) diff --git a/tests/evm_space/log_filtering_test.py b/tests/evm_space/log_filtering_test.py index 0419df894..2d2cba9ff 100755 --- a/tests/evm_space/log_filtering_test.py +++ b/tests/evm_space/log_filtering_test.py @@ -39,7 +39,7 @@ def run_test(self): print(f'Using Conflux account {self.cfxAccount}') # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 1 * 10 ** 18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(1 * 10 ** 18)) @@ -66,7 +66,7 @@ def run_test(self): cfx_next_nonce = self.rpc.get_nonce(self.cfxAccount) cfx_tx_hashes = [] - evm_next_nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + evm_next_nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) evm_tx_hashes = [] def emitConflux(n): @@ -138,7 +138,7 @@ def emitEVM(n): assert_equal(receipt["outcomeStatus"], "0x0") for h in evm_tx_hashes: - receipt = self.w3.eth.waitForTransactionReceipt(h) + receipt = self.w3.eth.wait_for_transaction_receipt(h) assert_equal(receipt["status"], 1) # check Conflux events @@ -392,9 +392,9 @@ def deploy_evm_space(self, bytecode_path): assert(os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": None, "value": 0, "gasPrice": 1, @@ -405,12 +405,12 @@ def deploy_evm_space(self, bytecode_path): }) tx_hash = signed["hash"] - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) assert_equal(tx_hash, return_tx_hash) self.rpc.generate_block(1) self.rpc.generate_blocks(20, 1) - receipt = self.w3.eth.waitForTransactionReceipt(tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(tx_hash) assert_equal(receipt["status"], 1) addr = receipt["contractAddress"] return addr diff --git a/tests/evm_space/out_of_balance_test.py b/tests/evm_space/out_of_balance_test.py index a05ce6386..3c1890aef 100755 --- a/tests/evm_space/out_of_balance_test.py +++ b/tests/evm_space/out_of_balance_test.py @@ -10,9 +10,9 @@ class OutOfBalanceTest(Web3Base): def run_test(self): - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) + signed = self.evmAccount.sign_transaction({ "to": self.evmAccount.address, "value": default_config["TOTAL_COIN"], "gasPrice": 2, @@ -22,7 +22,7 @@ def run_test(self): }) try: - self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + self.w3.eth.send_raw_transaction(signed["raw_transaction"]) AssertionError("expect out of balance error") except Exception as e: assert_equal(str(e), "{'code': -32003, 'message': 'insufficient funds for transfer'}") diff --git a/tests/evm_space/phantom_trace_test.py b/tests/evm_space/phantom_trace_test.py index f1b1cb3f5..0d6956262 100755 --- a/tests/evm_space/phantom_trace_test.py +++ b/tests/evm_space/phantom_trace_test.py @@ -39,7 +39,7 @@ def run_test(self): print(f'Using Conflux account {self.cfxAccount}') # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 1 * 10 ** 18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(1 * 10 ** 18)) @@ -80,7 +80,7 @@ def run_test(self): self.log.info("Pass") def test_callEVM(self): - data_hex = self.confluxContract.encodeABI(fn_name="callEVM", args=[self.evmContractAddr, 1]) + data_hex = self.confluxContract.encode_abi(abi_element_identifier="callEVM", args=[self.evmContractAddr, 1]) tx = self.rpc.new_contract_tx(receiver=self.confluxContractAddr, data_hex=data_hex) cfxTxHash = tx.hash_hex() assert_equal(self.rpc.send_tx(tx, True), cfxTxHash) @@ -135,7 +135,7 @@ def test_callEVM(self): assert_equal(phantom1["from"], mapped_address(self.confluxContractAddr)) assert_equal(phantom1["to"], self.evmContractAddr.lower()) - assert_equal(phantom1["input"], self.evmContract.encodeABI(fn_name="call", args=[1])), + assert_equal(phantom1["input"], self.evmContract.encode_abi(abi_element_identifier="call", args=[1])), assert_equal(phantom1["status"], "0x1") assert_equal(phantom1["blockHash"], block["hash"]) assert_equal(phantom1["blockNumber"], block["number"]) @@ -170,7 +170,7 @@ def test_callEVM(self): "callType": "call", "from": self.evmContractAddr.lower(), "to": self.evmContractAddr.lower(), - "input": self.evmContract.encodeABI(fn_name="call", args=[0]), + "input": self.evmContract.encode_abi(abi_element_identifier="call", args=[0]), "gas": "0x0", "value": "0x0", }, @@ -206,7 +206,7 @@ def test_callEVM(self): assert_equal(filtered, block_traces) def test_staticCallEVM(self): - data_hex = self.confluxContract.encodeABI(fn_name="staticCallEVM", args=[self.evmContractAddr, 1]) + data_hex = self.confluxContract.encode_abi(abi_element_identifier="staticCallEVM", args=[self.evmContractAddr, 1]) tx = self.rpc.new_contract_tx(receiver=self.confluxContractAddr, data_hex=data_hex) assert_equal(self.rpc.send_tx(tx, True), tx.hash_hex()) receipt = self.rpc.get_transaction_receipt(tx.hash_hex()) @@ -232,7 +232,7 @@ def test_createEVM(self): assert(os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() - data_hex = self.confluxContract.encodeABI(fn_name="createEVM", args=[bytecode]) + data_hex = self.confluxContract.encode_abi(abi_element_identifier="createEVM", args=[bytes.fromhex(bytecode)]) tx = self.rpc.new_contract_tx(receiver=self.confluxContractAddr, data_hex=data_hex, gas=3_700_000) cfxTxHash = tx.hash_hex() assert_equal(self.rpc.send_tx(tx, True), cfxTxHash) @@ -334,7 +334,7 @@ def test_createEVM(self): assert_equal(filtered, block_traces) def test_transferEVM(self): - data_hex = self.confluxContract.encodeABI(fn_name="transferEVM", args=[self.evmAccount.address]) + data_hex = self.confluxContract.encode_abi(abi_element_identifier="transferEVM", args=[self.evmAccount.address]) tx = self.rpc.new_contract_tx(receiver=self.confluxContractAddr, data_hex=data_hex, value=0x222) cfxTxHash = tx.hash_hex() assert_equal(self.rpc.send_tx(tx, True), cfxTxHash) @@ -445,7 +445,7 @@ def test_transferEVM(self): def test_withdrawFromMapped(self): # withdraw with insufficient funds should fail - data_hex = self.confluxContract.encodeABI(fn_name="withdrawFromMapped", args=[0x123]) + data_hex = self.confluxContract.encode_abi(abi_element_identifier="withdrawFromMapped", args=[0x123]) tx = self.rpc.new_contract_tx(receiver=self.confluxContractAddr, data_hex=data_hex) cfxTxHash = tx.hash_hex() assert_equal(self.rpc.send_tx(tx, True), cfxTxHash) @@ -454,9 +454,9 @@ def test_withdrawFromMapped(self): # transfer funds to mapped account receiver = Web3.to_checksum_address(mapped_address(self.confluxContractAddr)) - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": receiver, "value": 0x123, "gasPrice": 1, @@ -466,12 +466,12 @@ def test_withdrawFromMapped(self): "data": data_hex, }) - self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + self.w3.eth.send_raw_transaction(signed["raw_transaction"]) self.rpc.generate_blocks(20, 1) - receipt = self.w3.eth.waitForTransactionReceipt(signed["hash"]) + receipt = self.w3.eth.wait_for_transaction_receipt(signed["hash"]) assert_equal(receipt["status"], 1) # success - data_hex = self.confluxContract.encodeABI(fn_name="withdrawFromMapped", args=[0x123]) + data_hex = self.confluxContract.encode_abi(abi_element_identifier="withdrawFromMapped", args=[0x123]) tx = self.rpc.new_contract_tx(receiver=self.confluxContractAddr, data_hex=data_hex) cfxTxHash = tx.hash_hex() assert_equal(self.rpc.send_tx(tx, True), cfxTxHash) @@ -536,7 +536,7 @@ def test_withdrawFromMapped(self): def test_fail(self): # test failing tx - data_hex = self.confluxContract.encodeABI(fn_name="fail", args=[self.evmContractAddr]) + data_hex = self.confluxContract.encode_abi(abi_element_identifier="fail", args=[self.evmContractAddr]) tx = self.rpc.new_contract_tx(receiver=self.confluxContractAddr, data_hex=data_hex) cfxTxHash = tx.hash_hex() assert_equal(self.rpc.send_tx(tx, True), cfxTxHash) @@ -555,7 +555,7 @@ def test_fail(self): assert_equal(len(block_traces), 0) # test failing subcall - data_hex = self.confluxContract.encodeABI(fn_name="subcallFail", args=[self.evmContractAddr]) + data_hex = self.confluxContract.encode_abi(abi_element_identifier="subcallFail", args=[self.evmContractAddr]) tx = self.rpc.new_contract_tx(receiver=self.confluxContractAddr, data_hex=data_hex) cfxTxHash = tx.hash_hex() assert_equal(self.rpc.send_tx(tx, True), cfxTxHash) @@ -578,7 +578,7 @@ def test_fail(self): assert_equal(filtered, None) # test insufficient storage (issue #2483) - data_hex = self.confluxContract.encodeABI(fn_name="callEVMAndSetStorage", args=[self.evmContractAddr, 1]) + data_hex = self.confluxContract.encode_abi(abi_element_identifier="callEVMAndSetStorage", args=[self.evmContractAddr, 1]) tx = self.rpc.new_contract_tx(receiver=self.confluxContractAddr, data_hex=data_hex) cfxTxHash = tx.hash_hex() assert_equal(self.rpc.send_tx(tx, True), cfxTxHash) @@ -589,7 +589,7 @@ def test_fail(self): assert_equal(len(block_traces), 0) def test_deployEip1820(self): - data_hex = self.crossSpaceContract.encodeABI(fn_name="deployEip1820", args=[]) + data_hex = self.crossSpaceContract.encode_abi(abi_element_identifier="deployEip1820", args=[]) tx = self.rpc.new_contract_tx(receiver=CROSS_SPACE_CALL_ADDRESS, data_hex=data_hex) cfxTxHash = tx.hash_hex() assert_equal(self.rpc.send_tx(tx, True), cfxTxHash) diff --git a/tests/evm_space/phantom_transaction_test.py b/tests/evm_space/phantom_transaction_test.py index 71daf4750..1fb95837f 100755 --- a/tests/evm_space/phantom_transaction_test.py +++ b/tests/evm_space/phantom_transaction_test.py @@ -55,7 +55,7 @@ def run_test(self): print(f'Using Conflux account {self.cfxAccount}') # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount("0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") + self.evmAccount = self.w3.eth.account.from_key("0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 1 * 10 ** 18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(1 * 10 ** 18)) @@ -82,7 +82,7 @@ def run_test(self): cfx_next_nonce = self.rpc.get_nonce(self.cfxAccount) cfx_tx_hashes = [] - evm_next_nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + evm_next_nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) evm_tx_hashes = [] def emitConflux(n): @@ -155,7 +155,7 @@ def emitEVM(n): assert_equal(receipt["outcomeStatus"], "0x0") for h in evm_tx_hashes: - receipt = self.w3.eth.waitForTransactionReceipt(h) + receipt = self.w3.eth.wait_for_transaction_receipt(h) assert_equal(receipt["status"], 1) # TODO: add failing tx @@ -402,7 +402,7 @@ def emitEVM(n): # make sure pending transactions can be retrieved even before execution evm_next_nonce += 1 - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": evmContractAddr, "value": 0, "gasPrice": 1, @@ -412,7 +412,7 @@ def emitEVM(n): "data": "0x", }) - tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) tx = self.nodes[0].eth_getTransactionByHash(tx_hash.hex()) assert_ne(tx, None) @@ -448,9 +448,9 @@ def deploy_evm_space(self, bytecode_path): assert(os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": None, "value": 0, "gasPrice": 1, @@ -461,18 +461,18 @@ def deploy_evm_space(self, bytecode_path): }) tx_hash = signed["hash"] - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) assert_equal(tx_hash, return_tx_hash) self.rpc.generate_block(1) self.rpc.generate_blocks(20, 1) - receipt = self.w3.eth.waitForTransactionReceipt(tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(tx_hash) assert_equal(receipt["status"], 1) addr = receipt["contractAddress"] return addr def construct_evm_tx(self, receiver, data_hex, nonce): - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": receiver, "value": 0, "gasPrice": 1, diff --git a/tests/evm_space/phantom_tx_hash_test.py b/tests/evm_space/phantom_tx_hash_test.py index b2bd183f3..914279a31 100755 --- a/tests/evm_space/phantom_tx_hash_test.py +++ b/tests/evm_space/phantom_tx_hash_test.py @@ -33,7 +33,7 @@ def run_test(self): print(f'Using Conflux account {self.cfxAccount}') # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 1 * 10 ** 18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(1 * 10 ** 18)) @@ -50,7 +50,7 @@ def run_test(self): # create and charge accounts cfx_privkey_1 = "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde0" - cfx_address_1 = self.w3.eth.account.privateKeyToAccount(cfx_privkey_1).address + cfx_address_1 = self.w3.eth.account.from_key(cfx_privkey_1).address cfx_address_1 = cfx_address_1[:2] + '1' + cfx_address_1[3:] self.cross_space_transfer(mapped_address(cfx_address_1), 1 * 10 ** 18) @@ -62,7 +62,7 @@ def run_test(self): ), True) cfx_privkey_2 = "0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcde1" - cfx_address_2 = self.w3.eth.account.privateKeyToAccount(cfx_privkey_2).address + cfx_address_2 = self.w3.eth.account.from_key(cfx_privkey_2).address cfx_address_2 = cfx_address_2[:2] + '1' + cfx_address_2[3:] self.cross_space_transfer(mapped_address(cfx_address_2), 1 * 10 ** 18) @@ -74,7 +74,7 @@ def run_test(self): ), True) # withdraw - data_hex = self.crossSpaceContract.encodeABI(fn_name="withdrawFromMapped", args=[1]) + data_hex = self.crossSpaceContract.encode_abi(abi_element_identifier="withdrawFromMapped", args=[1]) tx = self.rpc.new_contract_tx(receiver=CROSS_SPACE_CALL_ADDRESS, data_hex=data_hex, sender=cfx_address_1, priv_key=cfx_privkey_1) cfx_tx_hash_1 = tx.hash_hex() @@ -102,7 +102,7 @@ def run_test(self): # call call_hex = encode_hex_0x(keccak(b"emitEVM(uint256)"))[:10] + encode_u256(0) - data_hex = self.crossSpaceContract.encodeABI(fn_name="callEVM", args=[self.evmContractAddr, call_hex]) + data_hex = self.crossSpaceContract.encode_abi(abi_element_identifier="callEVM", args=[self.evmContractAddr, call_hex]) tx = self.rpc.new_contract_tx(receiver=CROSS_SPACE_CALL_ADDRESS, data_hex=data_hex, sender=cfx_address_1, priv_key=cfx_privkey_1) cfx_tx_hash_1 = tx.hash_hex() diff --git a/tests/evm_space/rpc_error_test.py b/tests/evm_space/rpc_error_test.py index 5eafe0c85..bdcf989a2 100644 --- a/tests/evm_space/rpc_error_test.py +++ b/tests/evm_space/rpc_error_test.py @@ -5,7 +5,7 @@ from test_framework.util import * from web3 import Web3 -toHex = Web3.toHex +toHex = Web3.to_hex class RpcErrorTest(Web3Base): def set_test_params(self): @@ -17,7 +17,7 @@ def run_test(self): self.cfxAccount = self.rpc.GENESIS_ADDR print(f'Using Conflux account {self.cfxAccount}') - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 100 * 10 ** 18) @@ -33,9 +33,9 @@ def run_test(self): self.zero_gas_price() def invalid_chain_id(self): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": self.evmAccount.address, "value": 1, "gasPrice": 1, @@ -45,15 +45,15 @@ def invalid_chain_id(self): }) try: - self.w3.eth.send_raw_transaction(signed["rawTransaction"]) + self.w3.eth.send_raw_transaction(signed["raw_transaction"]) except Exception as e: assert_equal(str(e), "{'code': -32000, 'message': 'invalid chain ID'}") return def nonce_too_low(self): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": self.evmAccount.address, "value": 1, "gasPrice": 1, @@ -63,15 +63,15 @@ def nonce_too_low(self): }) try: - self.w3.eth.send_raw_transaction(signed["rawTransaction"]) + self.w3.eth.send_raw_transaction(signed["raw_transaction"]) except Exception as e: assert_equal(str(e), "{'code': -32003, 'message': 'nonce too low'}") return def nonce_too_high(self): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": self.evmAccount.address, "value": 1, "gasPrice": 1, @@ -81,15 +81,15 @@ def nonce_too_high(self): }) try: - self.w3.eth.send_raw_transaction(signed["rawTransaction"]) + self.w3.eth.send_raw_transaction(signed["raw_transaction"]) except Exception as e: assert_equal(str(e), "{'code': -32003, 'message': 'nonce too high'}") return def same_nonce_higher_gas_price_required(self): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": self.evmAccount.address, "value": 1, "gasPrice": 1, @@ -99,17 +99,17 @@ def same_nonce_higher_gas_price_required(self): }) try: - self.w3.eth.send_raw_transaction(signed["rawTransaction"]) + self.w3.eth.send_raw_transaction(signed["raw_transaction"]) wait_ms(1000) - self.w3.eth.send_raw_transaction(signed["rawTransaction"]) + self.w3.eth.send_raw_transaction(signed["raw_transaction"]) except Exception as e: assert_equal(str(e), "{'code': -32603, 'message': 'replacement transaction underpriced'}") return def zero_gas_price(self): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": self.evmAccount.address, "value": 1, "gasPrice": 0, @@ -119,15 +119,15 @@ def zero_gas_price(self): }) try: - self.w3.eth.send_raw_transaction(signed["rawTransaction"]) + self.w3.eth.send_raw_transaction(signed["raw_transaction"]) except Exception as e: assert_equal(str(e), "{'code': -32603, 'message': 'transaction underpriced'}") return def gas_too_low(self): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": self.evmAccount.address, "value": 1, "gasPrice": 1, @@ -137,7 +137,7 @@ def gas_too_low(self): }) try: - self.w3.eth.send_raw_transaction(signed["rawTransaction"]) + self.w3.eth.send_raw_transaction(signed["raw_transaction"]) except Exception as e: assert_equal(str(e), "{'code': -32000, 'message': 'intrinsic gas too low'}") return @@ -145,9 +145,9 @@ def gas_too_low(self): # TODO: estimate: out of gas def gas_too_high(self): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": self.evmAccount.address, "value": 1, "gasPrice": 1, @@ -157,16 +157,16 @@ def gas_too_high(self): }) try: - self.w3.eth.send_raw_transaction(signed["rawTransaction"]) + self.w3.eth.send_raw_transaction(signed["raw_transaction"]) # AssertionError("send tx failed") except Exception as e: assert_equal(str(e), "{'code': -32603, 'message': 'exceeds block gas limit'}") return def valid_tx(self): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": self.evmAccount.address, "value": 1, "maxFeePerGas": 1, @@ -176,15 +176,15 @@ def valid_tx(self): "chainId": 10, }) - tx_hash = self.w3.eth.send_raw_transaction(signed["rawTransaction"]) + tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) self.rpc.generate_blocks(20, 1) - next_nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + next_nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) assert_equal(next_nonce, nonce + 1) - tx = self.w3.eth.getTransaction(tx_hash) + tx = self.w3.eth.get_transaction(tx_hash) assert_equal(tx["nonce"], nonce) - assert_equal(tx["type"], "0x2") + assert_equal(tx["type"], 2) def wait_ms(ms): diff --git a/tests/evm_space/rpc_test.py b/tests/evm_space/rpc_test.py index f869b1fc2..302c727a8 100644 --- a/tests/evm_space/rpc_test.py +++ b/tests/evm_space/rpc_test.py @@ -3,7 +3,7 @@ from test_framework.util import * from web3 import Web3 -toHex = Web3.toHex +toHex = Web3.to_hex class EspaceRpcTest(Web3Base): @@ -12,7 +12,7 @@ def run_test(self): self.cfxAccount = self.rpc.GENESIS_ADDR print(f'Using Conflux account {self.cfxAccount}') - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 1000 * 10 ** 18) @@ -27,7 +27,7 @@ def eth_call_test(self): erc20 = self.load_contract(addr, "erc20") - data = erc20.encodeABI(fn_name="balanceOf", args=[self.evmAccount.address]) + data = erc20.encode_abi(abi_element_identifier="balanceOf", args=[self.evmAccount.address]) res1 = self.nodes[0].ethrpc.eth_call({ "from": self.evmAccount.address, diff --git a/tests/evm_space/trace_test.py b/tests/evm_space/trace_test.py index a77f880bb..f04a9352c 100755 --- a/tests/evm_space/trace_test.py +++ b/tests/evm_space/trace_test.py @@ -34,7 +34,7 @@ def run_test(self): print(f'Using Conflux account {self.cfxAccount}') # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 1 * 10 ** 18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(1 * 10 ** 18)) @@ -43,7 +43,7 @@ def run_test(self): evmContractAddr = self.deploy_evm_space(EVM_CONTRACT_PATH) print(f'EVM contract: {evmContractAddr}') - evm_next_nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + evm_next_nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) evm_tx_hashes = [] def emitEVM(n): @@ -72,7 +72,7 @@ def emitEVM(n): parent_hash = block for h in evm_tx_hashes: - receipt = self.w3.eth.waitForTransactionReceipt(h) + receipt = self.w3.eth.wait_for_transaction_receipt(h) assert_equal(receipt["status"], 1) filter = { "fromBlock": epoch_a } @@ -108,9 +108,9 @@ def deploy_evm_space(self, bytecode_path): assert(os.path.isfile(bytecode_file)) bytecode = open(bytecode_file).read() - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": None, "value": 0, "gasPrice": 1, @@ -121,12 +121,12 @@ def deploy_evm_space(self, bytecode_path): }) tx_hash = signed["hash"] - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) assert_equal(tx_hash, return_tx_hash) self.rpc.generate_block(1) self.rpc.generate_blocks(20, 1) - receipt = self.w3.eth.waitForTransactionReceipt(tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(tx_hash) assert_equal(receipt["status"], 1) addr = receipt["contractAddress"] return addr diff --git a/tests/evm_space/tx_and_receipt_test.py b/tests/evm_space/tx_and_receipt_test.py index 4a1547e67..3110615ad 100755 --- a/tests/evm_space/tx_and_receipt_test.py +++ b/tests/evm_space/tx_and_receipt_test.py @@ -16,7 +16,7 @@ def run_test(self): self.cfxAccount = self.rpc.GENESIS_ADDR print(f'Using Conflux account {self.cfxAccount}') # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount(self.DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(self.DEFAULT_TEST_ACCOUNT_KEY) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 1 * 10 ** 18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(1 * 10 ** 18)) @@ -26,8 +26,8 @@ def run_test(self): ret = self.nodes[0].debug_getTransactionsByEpoch("0x1") assert_equal(len(ret), 1) - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) + signed = self.evmAccount.sign_transaction({ "to": self.evmAccount.address, "value": 1, "gasPrice": 1, @@ -36,10 +36,10 @@ def run_test(self): "chainId": 10, }) - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) self.rpc.generate_block(1) self.rpc.generate_blocks(20, 1) - receipt = self.w3.eth.waitForTransactionReceipt(return_tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(return_tx_hash) assert_equal(receipt["status"], 1) assert_equal(receipt["gasUsed"], 21000) assert_equal(receipt["txExecErrorMsg"], None) diff --git a/tests/issue2483_test.py b/tests/issue2483_test.py index fa464a967..d811bccb9 100644 --- a/tests/issue2483_test.py +++ b/tests/issue2483_test.py @@ -39,7 +39,7 @@ def run_test(self): contract_address = receipt["contractCreated"] contract = self.w3.eth.contract(abi=abi) - call_data = contract.encodeABI(fn_name="setFresh") + call_data = contract.encode_abi("setFresh") tx = self.rpc.new_contract_tx(receiver=contract_address, data_hex=call_data) assert_equal(self.rpc.send_tx(tx, True), tx.hash_hex()) diff --git a/tests/issue988_test.py b/tests/issue988_test.py index 645482b8c..40a579bb9 100755 --- a/tests/issue988_test.py +++ b/tests/issue988_test.py @@ -68,7 +68,7 @@ def call_contract_function(self, contract, name, args, sender_key, value=None, attrs['to'] = contract_addr else: attrs['receiver'] = b'' - tx_data = func(*args).buildTransaction(attrs) + tx_data = func(*args).build_transaction(attrs) tx_data['data'] = decode_hex(tx_data['data']) tx_data['pri_key'] = sender_key tx_data['gas_price'] = tx_data['gasPrice'] @@ -91,7 +91,7 @@ def call_contract_function_rpc(self, contract, name, args, contract_addr): attrs["gasPrice"] = int_to_hex(gas_price) attrs["chainId"] = 0 attrs["to"] = Web3.to_checksum_address(contract_addr) - tx = func(*args).buildTransaction(attrs) + tx = func(*args).build_transaction(attrs) return RpcClient(self.nodes[0]).call(contract_addr, tx["data"]) def run_test(self): diff --git a/tests/light/rpc_test.py b/tests/light/rpc_test.py index 6a0775699..afa5009bd 100755 --- a/tests/light/rpc_test.py +++ b/tests/light/rpc_test.py @@ -71,11 +71,11 @@ def _setup_stake_contract(self, addr, priv): "chainId": 0 } - tx_data = decode_hex(staking_contract.functions.deposit(10 ** 18).buildTransaction(tx_conf)["data"]) + tx_data = decode_hex(staking_contract.functions.deposit(10 ** 18).build_transaction(tx_conf)["data"]) tx = self.rpc[FULLNODE0].new_tx(value=0, sender=addr, receiver=contract_addr, gas=3_000_000, data=tx_data, priv_key=priv) assert_equal(self.rpc[FULLNODE0].send_tx(tx, True), tx.hash_hex()) - tx_data = decode_hex(staking_contract.functions.voteLock(4 * 10 ** 17, 100000).buildTransaction(tx_conf)["data"]) + tx_data = decode_hex(staking_contract.functions.voteLock(4 * 10 ** 17, 100000).build_transaction(tx_conf)["data"]) tx = self.rpc[FULLNODE0].new_tx(value=0, sender=addr, receiver=contract_addr, gas=3_000_000, data=tx_data, priv_key=priv) assert_equal(self.rpc[FULLNODE0].send_tx(tx, True), tx.hash_hex()) @@ -94,11 +94,11 @@ def _setup_sponsor(self, contractAddr): } # setSponsorForGas - data = whitelist_control.functions.setSponsorForGas(Web3.to_checksum_address(contractAddr), 2000000).buildTransaction({"to":Web3.to_checksum_address(whitelist_control_addr), **tx_conf})["data"] + data = whitelist_control.functions.setSponsorForGas(Web3.to_checksum_address(contractAddr), 2000000).build_transaction({"to":Web3.to_checksum_address(whitelist_control_addr), **tx_conf})["data"] self.call_contract(contract=whitelist_control_addr, data_hex=data, value=20000000000000000000) # setSponsorForCollateral - data = whitelist_control.functions.setSponsorForCollateral(Web3.to_checksum_address(contractAddr)).buildTransaction({"to":Web3.to_checksum_address(whitelist_control_addr), **tx_conf})["data"] + data = whitelist_control.functions.setSponsorForCollateral(Web3.to_checksum_address(contractAddr)).build_transaction({"to":Web3.to_checksum_address(whitelist_control_addr), **tx_conf})["data"] self.call_contract(contract=whitelist_control_addr, data_hex=data, value=20000000000000000000) # add to whitelist diff --git a/tests/overlay_account_storage_test.py b/tests/overlay_account_storage_test.py index 5db100f85..2cf6da43f 100644 --- a/tests/overlay_account_storage_test.py +++ b/tests/overlay_account_storage_test.py @@ -1,7 +1,4 @@ -from web3 import Web3 -from web3.contract import ContractFunction, Contract - -from conflux.rpc import RpcClient +from web3.contract.contract import ContractFunction, Contract from conflux.utils import * from test_framework.util import * from test_framework.mininode import * diff --git a/tests/pos/hard_fork_test.py b/tests/pos/hard_fork_test.py index b0f9cf0af..d66690069 100755 --- a/tests/pos/hard_fork_test.py +++ b/tests/pos/hard_fork_test.py @@ -95,7 +95,7 @@ def run_test(self): for log in logs: pos_identifier = log["topics"][1] if log["topics"][0] == REGISTER_TOPIC: - bls_pub_key, vrf_pub_key = eth_abi.decode_abi(["bytes", "bytes"], decode_hex(log["data"])) + bls_pub_key, vrf_pub_key = eth_abi.decode(["bytes", "bytes"], decode_hex(log["data"])) pub_keys_map[pos_identifier] = (encode_hex_0x(bls_pub_key), encode_hex_0x(vrf_pub_key)) elif log["topics"][0] == INCREASE_STAKE_TOPIC: assert pos_identifier in pub_keys_map diff --git a/tests/pubsub/eth_logs_test.py b/tests/pubsub/eth_logs_test.py index a58f0a6f3..a9a5c7245 100755 --- a/tests/pubsub/eth_logs_test.py +++ b/tests/pubsub/eth_logs_test.py @@ -85,7 +85,7 @@ async def run_async(self): assert_equal(self.w3.is_connected(), True) # initialize EVM account - self.evmAccount = self.w3.eth.account.privateKeyToAccount(DEFAULT_TEST_ACCOUNT_KEY) + self.evmAccount = self.w3.eth.account.from_key(DEFAULT_TEST_ACCOUNT_KEY) print(f'Using EVM account {self.evmAccount.address}') self.cross_space_transfer(self.evmAccount.address, 1 * 10 ** 18) assert_equal(self.nodes[0].eth_getBalance(self.evmAccount.address), hex(1 * 10 ** 18)) @@ -161,8 +161,8 @@ async def run_async(self): # create one transaction that is mined but not executed yet sync_blocks(self.nodes) - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) + signed = self.evmAccount.sign_transaction({ "to": contract1, "value": 0, "gasPrice": 1, @@ -172,7 +172,7 @@ async def run_async(self): "data": FOO_TOPIC }) - tx = self.w3.eth.sendRawTransaction(signed["rawTransaction"]).hex() + tx = self.w3.eth.send_raw_transaction(signed["raw_transaction"]).hex() assert_equal(signed.hash.hex(), tx) self.rpc[FULLNODE0].generate_block(num_txs=1) @@ -197,9 +197,9 @@ def run_test(self): asyncio.get_event_loop().run_until_complete(self.run_async()) def deploy_evm_space(self, data_hex): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + signed = self.evmAccount.sign_transaction({ "to": None, "value": 0, "gasPrice": 1, @@ -210,19 +210,19 @@ def deploy_evm_space(self, data_hex): }) tx_hash = signed["hash"] - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) assert_equal(tx_hash, return_tx_hash) self.rpc[FULLNODE0].generate_block(1) self.rpc[FULLNODE0].generate_blocks(20, 1) - receipt = self.w3.eth.waitForTransactionReceipt(tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(tx_hash) assert_equal(receipt["status"], 1) addr = receipt["contractAddress"] return receipt, addr def call_contract(self, contract, data_hex): - nonce = self.w3.eth.getTransactionCount(self.evmAccount.address) - signed = self.evmAccount.signTransaction({ + nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) + signed = self.evmAccount.sign_transaction({ "to": contract, "value": 0, "gasPrice": 1, @@ -232,7 +232,7 @@ def call_contract(self, contract, data_hex): "data": data_hex }) - tx = self.w3.eth.sendRawTransaction(signed["rawTransaction"]).hex() + tx = self.w3.eth.send_raw_transaction(signed["raw_transaction"]).hex() time_end = time.time() + 10 while time.time() < time_end: self.rpc[FULLNODE0].generate_block(1) diff --git a/tests/rpc/test_token_supply_info.py b/tests/rpc/test_token_supply_info.py index 1d92d5ac7..d6eded341 100644 --- a/tests/rpc/test_token_supply_info.py +++ b/tests/rpc/test_token_supply_info.py @@ -42,7 +42,7 @@ def test_token_supply_info(self): staking_contract_dict = json.loads(open(os.path.join(file_path), "r").read()) staking_contract = get_contract_instance(contract_dict=staking_contract_dict) tx_data = decode_hex(staking_contract.functions.deposit(10 ** 18) - .buildTransaction(tx_conf)["data"]) + .build_transaction(tx_conf)["data"]) tx = self.new_tx(data=tx_data, gas=tx_conf["gas"], receiver=tx_conf["to"], value=0) self.send_tx(tx, True) # Stake 10**18 drip, and generating 5 blocks does not affect rewards @@ -59,7 +59,7 @@ def test_token_supply_info(self): bytecode_file=os.path.join(file_dir, "../contracts/pay_bytecode.dat"), ) # deploy pay contract - tx_data = decode_hex(pay_contract.constructor().buildTransaction(tx_conf)["data"]) + tx_data = decode_hex(pay_contract.constructor().build_transaction(tx_conf)["data"]) tx = self.new_tx(data=tx_data, gas=tx_conf["gas"], receiver='', storage_limit=512, value=0) self.send_tx(tx, True) # Collateral for pay_contract diff --git a/tests/storage_value_unchange_test.py b/tests/storage_value_unchange_test.py index 8a4bae9cd..79f6f10b8 100644 --- a/tests/storage_value_unchange_test.py +++ b/tests/storage_value_unchange_test.py @@ -1,7 +1,3 @@ -from web3 import Web3 -from web3.contract import ContractFunction, Contract - -from conflux.rpc import RpcClient from conflux.utils import * from test_framework.util import * from test_framework.mininode import * diff --git a/tests/test_framework/contracts.py b/tests/test_framework/contracts.py index 4e6c1b401..7bc92cde2 100644 --- a/tests/test_framework/contracts.py +++ b/tests/test_framework/contracts.py @@ -6,15 +6,16 @@ from typing import Literal, Dict import types +from eth_utils.abi import get_abi_output_types from web3 import Web3 -from web3.contract import ContractFunction, Contract, ContractConstructor, get_abi_output_types +from web3.contract.contract import ContractFunction, Contract, ContractConstructor from conflux.address import b32_address_to_hex from conflux.config import default_config from conflux.utils import priv_to_addr from test_framework.blocktools import encode_hex_0x from test_framework.test_framework import ConfluxTestFramework, RpcClient, start_p2p_connection from test_framework.util import * -from eth_utils import decode_hex +from eth_utils.hexadecimal import decode_hex BASE = int(1e18) @@ -179,7 +180,7 @@ def _cfx_call(self: ContractFunction, framework=None, sender=None, raw_output=Fa if not raw_output: output_types = get_abi_output_types(self.abi) - ans = self.web3.codec.decode_abi(output_types, decode_hex(result)) + ans = self.w3.codec.decode(output_types, decode_hex(result)) if len(ans) == 0: return elif len(ans) == 1: diff --git a/tests/test_framework/smart_contract_bench_base.py b/tests/test_framework/smart_contract_bench_base.py index 66922a44e..664ead076 100644 --- a/tests/test_framework/smart_contract_bench_base.py +++ b/tests/test_framework/smart_contract_bench_base.py @@ -77,7 +77,7 @@ def call_contract_function(self, contract, name, args, sender_key, contract_addr attributes['to'] = contract_addr else: attributes['receiver'] = b'' - tx_data = func(*args).buildTransaction(attributes) + tx_data = func(*args).build_transaction(attributes) tx_data['data'] = decode_hex(tx_data['data']) tx_data['pri_key'] = sender_key tx_data['gas_price'] = tx_data['gasPrice'] diff --git a/tests/tools/hard_fork_tool.py b/tests/tools/hard_fork_tool.py index 3eafec9fd..1201107dd 100755 --- a/tests/tools/hard_fork_tool.py +++ b/tests/tools/hard_fork_tool.py @@ -44,7 +44,7 @@ for log in logs: pos_identifier = log["topics"][1] if log["topics"][0] == REGISTER_TOPIC: - bls_pub_key, vrf_pub_key = eth_abi.decode_abi(["bytes", "bytes"], decode_hex(log["data"])) + bls_pub_key, vrf_pub_key = eth_abi.decode(["bytes", "bytes"], decode_hex(log["data"])) pub_keys_map[pos_identifier] = (encode_hex_0x(bls_pub_key), encode_hex_0x(vrf_pub_key)) print(pub_keys_map[pos_identifier]) elif log["topics"][0] == INCREASE_STAKE_TOPIC: diff --git a/tests/web3_test.py b/tests/web3_test.py index 3ea0db418..a26f848d6 100644 --- a/tests/web3_test.py +++ b/tests/web3_test.py @@ -21,6 +21,7 @@ from conflux.utils import encode_hex, priv_to_addr, parse_as_int from web3 import Web3 +from web3.exceptions import Web3RPCError def hex256(value): if type(value) is int: @@ -117,7 +118,7 @@ def test_deploy_1820(self): eip1820 = Web3.to_checksum_address("1820a4b7618bde71dce8cdc73aab6c95905fad24") receipt = client.get_transaction_receipt(tx.hash.hex()) assert_greater_than(int(receipt['gasUsed'],16), 1_500_000 + 21_000) - assert_equal(len(self.w3.eth.getCode(eip1820)), 2501) + assert_equal(len(self.w3.eth.get_code(eip1820)), 2501) def run_test(self): time.sleep(3) @@ -127,7 +128,7 @@ def run_test(self): self.w3 = Web3(Web3.HTTPProvider(f'http://{ip}:{port}/')) assert_equal(self.w3.is_connected(), True) - account = self.w3.eth.account.privateKeyToAccount( + account = self.w3.eth.account.from_key( '0x348ce564d427a3311b6536bbcff9390d69395b06ed6c486954e971d960fe8709') sender = account.address @@ -139,28 +140,28 @@ def run_test(self): # Send eip-155 transaction receiver = Web3.to_checksum_address("10000000000000000000000000000000000000aa") - signed = account.signTransaction( + signed = account.sign_transaction( {"to": receiver, "value": 1 * 10 ** 17, "gasPrice": 1, "gas": 21000, "nonce": 0, "chainId": 10}) tx_hash = signed["hash"] - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) assert_equal(tx_hash, return_tx_hash) client = RpcClient(self.nodes[0]) client.generate_block(1) client.generate_blocks(10) - receipt = self.w3.eth.waitForTransactionReceipt(tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(tx_hash) assert_equal(receipt["status"], 1) # Send pre eip-155 transaction - signed = account.signTransaction( + signed = account.sign_transaction( {"to": receiver, "value": 1 * 10 ** 17, "gasPrice": 1, "gas": 21000, "nonce": 1}) tx_hash = signed["hash"] - return_tx_hash = self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + return_tx_hash = self.w3.eth.send_raw_transaction(signed["raw_transaction"]) assert_equal(tx_hash, return_tx_hash) client.generate_block(1) client.generate_blocks(10) - receipt = self.w3.eth.waitForTransactionReceipt(tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(tx_hash) assert_equal(receipt["status"], 1) assert_equal(2 * 10 ** 17, self.w3.eth.get_balance(receiver)) @@ -169,14 +170,14 @@ def run_test(self): # Send to transaction mapped_sender = keccak_256(self.genesis_addr).digest()[-20:] receiver = Web3.to_checksum_address(mapped_sender.hex()) - signed = account.signTransaction( + signed = account.sign_transaction( {"to": receiver, "value": 2 * 10 ** 17, "gasPrice": 1, "gas": 21000, "nonce": 2}) - self.w3.eth.sendRawTransaction(signed["rawTransaction"]) + self.w3.eth.send_raw_transaction(signed["raw_transaction"]) client = RpcClient(self.nodes[0]) client.generate_block(1) client.generate_blocks(10) - receipt = self.w3.eth.waitForTransactionReceipt(tx_hash) + receipt = self.w3.eth.wait_for_transaction_receipt(tx_hash) assert_equal(receipt["status"], 1) assert_equal(2 * 10 ** 17, self.w3.eth.get_balance(mapped_sender)) @@ -187,9 +188,9 @@ def run_test(self): assert_equal(1 * 10 ** 17, self.w3.eth.get_balance(mapped_sender)) # Send transaction with large chain-id, should not panic. - signed = account.signTransaction( + signed = account.sign_transaction( {"to": receiver, "value": 1 * 10 ** 17, "gasPrice": 1, "gas": 21000, "nonce": 3, "chainId": 2**33}) - assert_raises(ValueError, self.w3.eth.sendRawTransaction,signed["rawTransaction"]) + assert_raises(Web3RPCError, self.w3.eth.send_raw_transaction,signed["raw_transaction"]) self.nodes[0].test_stop() diff --git a/tests/withdraw_deposit_test.py b/tests/withdraw_deposit_test.py index dd58e63bc..432cd826f 100755 --- a/tests/withdraw_deposit_test.py +++ b/tests/withdraw_deposit_test.py @@ -70,7 +70,7 @@ def run_test(self): self.tx_conf["to"] = Web3.to_checksum_address("0888000000000000000000000000000000000002") # deposit 10**18 - tx_data = decode_hex(staking_contract.functions.deposit(10 ** 18).buildTransaction(self.tx_conf)["data"]) + tx_data = decode_hex(staking_contract.functions.deposit(10 ** 18).build_transaction(self.tx_conf)["data"]) tx = client.new_tx(value=0, sender=addr, receiver=self.tx_conf["to"], gas=gas, data=tx_data, priv_key=priv_key) client.send_tx(tx) self.wait_for_tx([tx]) @@ -81,7 +81,7 @@ def run_test(self): # withdraw 5 * 10**17 balance = client.get_balance(addr) capital = 5 * 10 ** 17 - tx_data = decode_hex(staking_contract.functions.withdraw(capital).buildTransaction(self.tx_conf)["data"]) + tx_data = decode_hex(staking_contract.functions.withdraw(capital).build_transaction(self.tx_conf)["data"]) tx = client.new_tx(value=0, sender=addr, receiver=self.tx_conf["to"], gas=gas, data=tx_data, priv_key=priv_key) client.send_tx(tx) self.wait_for_tx([tx]) @@ -93,7 +93,7 @@ def run_test(self): # lock 4 * 10 ** 17 until block number 100000 balance = client.get_balance(addr) - tx_data = decode_hex(staking_contract.functions.voteLock(4 * 10 ** 17, 100000).buildTransaction(self.tx_conf)["data"]) + tx_data = decode_hex(staking_contract.functions.voteLock(4 * 10 ** 17, 100000).build_transaction(self.tx_conf)["data"]) tx = client.new_tx(value=0, sender=addr, receiver=self.tx_conf["to"], gas=gas, data=tx_data, priv_key=priv_key) client.send_tx(tx) self.wait_for_tx([tx]) @@ -103,7 +103,7 @@ def run_test(self): # withdraw 5 * 10**17 and it should fail balance = client.get_balance(addr) capital = 5 * 10 ** 17 - tx_data = decode_hex(staking_contract.functions.withdraw(capital).buildTransaction(self.tx_conf)["data"]) + tx_data = decode_hex(staking_contract.functions.withdraw(capital).build_transaction(self.tx_conf)["data"]) tx = client.new_tx(value=0, sender=addr, receiver=self.tx_conf["to"], gas=gas, data=tx_data, priv_key=priv_key) client.send_tx(tx) self.wait_for_tx([tx]) @@ -112,7 +112,7 @@ def run_test(self): # withdraw 10**17 + 1 and it should fail balance = client.get_balance(addr) - tx_data = decode_hex(staking_contract.functions.withdraw(10 ** 17 + 1).buildTransaction(self.tx_conf)["data"]) + tx_data = decode_hex(staking_contract.functions.withdraw(10 ** 17 + 1).build_transaction(self.tx_conf)["data"]) tx = client.new_tx(value=0, sender=addr, receiver=self.tx_conf["to"], gas=gas, data=tx_data, priv_key=priv_key) client.send_tx(tx) self.wait_for_tx([tx]) @@ -122,7 +122,7 @@ def run_test(self): # withdraw 10**17 and it should succeed balance = client.get_balance(addr) capital = 10 ** 17 - tx_data = decode_hex(staking_contract.functions.withdraw(capital).buildTransaction(self.tx_conf)["data"]) + tx_data = decode_hex(staking_contract.functions.withdraw(capital).build_transaction(self.tx_conf)["data"]) tx = client.new_tx(value=0, sender=addr, receiver=self.tx_conf["to"], gas=gas, data=tx_data, priv_key=priv_key) client.send_tx(tx) self.wait_for_tx([tx]) From 0a3c7e3532ccf720071c876004a70058c79a851b Mon Sep 17 00:00:00 2001 From: darwintree <17946284+darwintree@users.noreply.github.com> Date: Tue, 5 Nov 2024 18:32:01 +0800 Subject: [PATCH 12/31] chore(deps): update dependency version --- dev-support/dep_pip3.sh | 20 +------------------- requirements.txt | 14 -------------- 2 files changed, 1 insertion(+), 33 deletions(-) delete mode 100644 requirements.txt diff --git a/dev-support/dep_pip3.sh b/dev-support/dep_pip3.sh index 8570e94d1..797b4c6db 100755 --- a/dev-support/dep_pip3.sh +++ b/dev-support/dep_pip3.sh @@ -2,26 +2,8 @@ set -e -function install() { - if [ "`pip3 show ${1%%=*}`" = "" ]; then - pip3 install $1 - fi -} +pip3 install cfx-account eth-utils coincurve==19.0.1 pysha3 web3==7.4.0 py-solc-x jsonrpcclient==3.3.6 asyncio websockets pyyaml numpy -install cfx-account -install eth-utils -# install rlp==1.2.0 -# install py-ecc==5.2.0 -install coincurve==19.0.1 -install pysha3 -# install trie==1.4.0 -install web3==7.4.0 -install py-solc-x -install jsonrpcclient==3.3.6 -install asyncio -install websockets -install pyyaml -install numpy # python3 -m solcx.install v0.5.17 diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index f15aed897..000000000 --- a/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -cfx-account -eth-utils -# rlp==1.2.0 -# py-ecc -coincurve==19.0.1 -pysha3 -# trie==1.4.0 -web3==7.4.0 -py-solc-x -jsonrpcclient==3.3.6 -asyncio -websockets -pyyaml -numpy From e8d7255175b89c7986ace623c0a02b466f7e5a3c Mon Sep 17 00:00:00 2001 From: darwintree <17946284+darwintree@users.noreply.github.com> Date: Wed, 6 Nov 2024 18:20:49 +0800 Subject: [PATCH 13/31] fix: missed dependency in dep_pip3 --- dev-support/dep_pip3.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/dep_pip3.sh b/dev-support/dep_pip3.sh index 797b4c6db..fd4f7599b 100755 --- a/dev-support/dep_pip3.sh +++ b/dev-support/dep_pip3.sh @@ -2,7 +2,7 @@ set -e -pip3 install cfx-account eth-utils coincurve==19.0.1 pysha3 web3==7.4.0 py-solc-x jsonrpcclient==3.3.6 asyncio websockets pyyaml numpy +pip3 install cfx-account eth-utils py-ecc trie coincurve safe-pysha3 web3==7.4.0 py-solc-x jsonrpcclient==3.3.6 asyncio websockets pyyaml numpy # python3 -m solcx.install v0.5.17 From 7f0eb25719e6fb39fa24cc6e4cd5589f885662d5 Mon Sep 17 00:00:00 2001 From: darwintree <17946284+darwintree@users.noreply.github.com> Date: Thu, 7 Nov 2024 09:47:34 +0000 Subject: [PATCH 14/31] fix dependency issues --- dev-support/dep_pip3.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/dev-support/dep_pip3.sh b/dev-support/dep_pip3.sh index fd4f7599b..2d2e6fe93 100755 --- a/dev-support/dep_pip3.sh +++ b/dev-support/dep_pip3.sh @@ -2,10 +2,9 @@ set -e -pip3 install cfx-account eth-utils py-ecc trie coincurve safe-pysha3 web3==7.4.0 py-solc-x jsonrpcclient==3.3.6 asyncio websockets pyyaml numpy +pip3 install cfx-account eth-utils py-ecc rlp trie coincurve safe-pysha3 web3==7.4.0 py-solc-x jsonrpcclient==3.3.6 asyncio websockets pyyaml numpy - -# python3 -m solcx.install v0.5.17 +python3 -m solcx.install v0.5.17 # TODO cross platform #yum install clang snappy snappy-devel zlib zlib-devel bzip2 bzip2-devel lz4-devel From db87e0f08fc9a5a80d986f17ee74e50c9514d8a2 Mon Sep 17 00:00:00 2001 From: iosh Date: Fri, 8 Nov 2024 11:51:24 +0000 Subject: [PATCH 15/31] chore: migrate light_protocol error from error_chain to thiserror --- Cargo.lock | 2 +- crates/cfxcore/core/Cargo.toml | 3 +- crates/cfxcore/core/src/lib.rs | 2 +- .../src/light_protocol/common/ledger_info.rs | 20 +- .../core/src/light_protocol/common/mod.rs | 4 +- .../cfxcore/core/src/light_protocol/error.rs | 380 +++++++++--------- .../core/src/light_protocol/handler/mod.rs | 18 +- .../light_protocol/handler/sync/block_txs.rs | 2 +- .../src/light_protocol/handler/sync/blooms.rs | 2 +- .../handler/sync/common/ledger_proof.rs | 6 +- .../handler/sync/common/sync_manager.rs | 6 +- .../light_protocol/handler/sync/headers.rs | 4 +- .../light_protocol/handler/sync/receipts.rs | 2 +- .../handler/sync/state_entries.rs | 9 +- .../handler/sync/state_roots.rs | 6 +- .../handler/sync/storage_roots.rs | 9 +- .../light_protocol/handler/sync/tx_infos.rs | 16 +- .../src/light_protocol/handler/sync/txs.rs | 2 +- .../light_protocol/handler/sync/witnesses.rs | 4 +- crates/cfxcore/core/src/light_protocol/mod.rs | 2 +- .../core/src/light_protocol/provider.rs | 42 +- .../core/src/light_protocol/query_service.rs | 8 +- crates/client/src/rpc/impls/cfx/light.rs | 8 +- crates/util/util-macros/src/lib.rs | 10 + 24 files changed, 289 insertions(+), 278 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fae2e343a..054017925 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1585,6 +1585,7 @@ dependencies = [ "cfx-statedb", "cfx-storage", "cfx-types", + "cfx-util-macros", "cfx-utils", "cfx-vm-interpreter", "cfx-vm-tracer-derive", @@ -1607,7 +1608,6 @@ dependencies = [ "diem-temppath", "diem-types", "either", - "error-chain", "executor", "executor-types", "fail", diff --git a/crates/cfxcore/core/Cargo.toml b/crates/cfxcore/core/Cargo.toml index f6c61c5f4..87bca3126 100644 --- a/crates/cfxcore/core/Cargo.toml +++ b/crates/cfxcore/core/Cargo.toml @@ -31,7 +31,6 @@ dag = { workspace = true } derivative = "2.0.2" db = { workspace = true } either = "1.5.3" -error-chain = { version = "0.12", default-features = false } fallible-iterator = "0.2" fs_extra = "1.1.0" futures = {version="0.3.3", features = ["compat"]} @@ -143,7 +142,7 @@ cfx-rpc-cfx-types = { workspace = true } cfx-rpc-eth-types = { workspace = true } jsonrpsee = { workspace = true, features = ["jsonrpsee-types"] } cfx-rpc-utils = { workspace = true } - +cfx-util-macros= { workspace = true } [dev-dependencies] criterion = "0.3" diff --git a/crates/cfxcore/core/src/lib.rs b/crates/cfxcore/core/src/lib.rs index 7b419f776..294d6faa5 100644 --- a/crates/cfxcore/core/src/lib.rs +++ b/crates/cfxcore/core/src/lib.rs @@ -13,7 +13,7 @@ extern crate keccak_hash as hash; #[macro_use] extern crate log; #[macro_use] -extern crate error_chain; +extern crate cfx_util_macros; extern crate db as ext_db; #[macro_use] extern crate lazy_static; diff --git a/crates/cfxcore/core/src/light_protocol/common/ledger_info.rs b/crates/cfxcore/core/src/light_protocol/common/ledger_info.rs index 06cf27e95..ead4d8da4 100644 --- a/crates/cfxcore/core/src/light_protocol/common/ledger_info.rs +++ b/crates/cfxcore/core/src/light_protocol/common/ledger_info.rs @@ -4,7 +4,7 @@ use crate::{ consensus::SharedConsensusGraph, - light_protocol::{message::WitnessInfoWithHeight, Error, ErrorKind}, + light_protocol::{message::WitnessInfoWithHeight, Error}, }; use cfx_internal_common::StateRootWithAuxInfo; use cfx_parameters::consensus::DEFERRED_STATE_EPOCH_COUNT; @@ -42,7 +42,7 @@ impl LedgerInfo { .block_by_hash(&hash, false /* update_cache */) .map(|b| (*b).clone()) .ok_or_else(|| { - ErrorKind::InternalError(format!("Block {:?} not found", hash)) + Error::InternalError(format!("Block {:?} not found", hash)) .into() }) } @@ -55,7 +55,7 @@ impl LedgerInfo { .block_header_by_hash(&hash) .map(|h| (*h).clone()) .ok_or_else(|| { - ErrorKind::InternalError(format!("Header {:?} not found", hash)) + Error::InternalError(format!("Header {:?} not found", hash)) .into() }) } @@ -96,7 +96,7 @@ impl LedgerInfo { .get_data_manager() .get_epoch_execution_commitment_with_db(&pivot) .ok_or_else(|| { - Error::from(ErrorKind::InternalError(format!( + Error::from(Error::InternalError(format!( "Execution commitments for {:?} not found", pivot ))) @@ -122,7 +122,7 @@ impl LedgerInfo { .get_data_manager() .get_epoch_execution_commitment_with_db(&pivot) .ok_or_else(|| { - Error::from(ErrorKind::InternalError(format!( + Error::from(Error::InternalError(format!( "Execution commitments for {:?} not found", pivot ))) @@ -145,7 +145,7 @@ impl LedgerInfo { .get_data_manager() .get_epoch_execution_commitment_with_db(&pivot) .ok_or_else(|| { - Error::from(ErrorKind::InternalError(format!( + Error::from(Error::InternalError(format!( "Execution commitments for {:?} not found", pivot ))) @@ -183,7 +183,7 @@ impl LedgerInfo { match state { Some(Ok(Some(state))) => Ok(state), _ => { - bail!(ErrorKind::InternalError(format!( + bail!(Error::InternalError(format!( "State of epoch {} not found", epoch ))); @@ -199,7 +199,7 @@ impl LedgerInfo { match self.state_of(epoch)?.get_state_root() { Ok(root) => Ok(root), Err(e) => { - bail!(ErrorKind::InternalError(format!( + bail!(Error::InternalError(format!( "State root of epoch {} not found: {:?}", epoch, e ))); @@ -255,7 +255,7 @@ impl LedgerInfo { ) .map(|res| (*res.block_receipts).clone()) .ok_or_else(|| { - ErrorKind::InternalError(format!( + Error::InternalError(format!( "Receipts of epoch {} not found", epoch )) @@ -286,7 +286,7 @@ impl LedgerInfo { ) .map(|res| res.bloom) .ok_or_else(|| { - ErrorKind::InternalError(format!( + Error::InternalError(format!( "Logs bloom of epoch {} not found", epoch )) diff --git a/crates/cfxcore/core/src/light_protocol/common/mod.rs b/crates/cfxcore/core/src/light_protocol/common/mod.rs index 1599dfc2f..37f6666b3 100644 --- a/crates/cfxcore/core/src/light_protocol/common/mod.rs +++ b/crates/cfxcore/core/src/light_protocol/common/mod.rs @@ -8,7 +8,7 @@ mod peers; pub use ledger_info::LedgerInfo; pub use peers::{FullPeerFilter, FullPeerState, LightPeerState, Peers}; -use super::{Error, ErrorKind}; +use super::Error; use cfx_internal_common::ChainIdParamsOneChainInner; use std::{cmp, fmt::Debug}; @@ -26,7 +26,7 @@ pub fn validate_chain_id( peer_height: u64, ) -> Result<(), Error> { if !ours.matches(&theirs, peer_height) { - let error_kind = ErrorKind::ChainIdMismatch { + let error_kind = Error::ChainIdMismatch { ours: ours.clone(), theirs, }; diff --git a/crates/cfxcore/core/src/light_protocol/error.rs b/crates/cfxcore/core/src/light_protocol/error.rs index 327ce406d..a25c60235 100644 --- a/crates/cfxcore/core/src/light_protocol/error.rs +++ b/crates/cfxcore/core/src/light_protocol/error.rs @@ -9,245 +9,224 @@ use crate::{ }; use cfx_internal_common::ChainIdParamsOneChainInner; use cfx_types::{H160, H256}; -use error_chain::ChainedError; use network::{node_table::NodeId, NetworkContext, UpdateNodeOperation}; use parking_lot::Mutex; use primitives::{account::AccountError, filter::FilterError, StateRoot}; use rlp::DecoderError; use std::sync::Arc; - -error_chain! { - links { - Network(network::Error, network::ErrorKind); - StateDb(cfx_statedb::Error, cfx_statedb::ErrorKind); - Storage(cfx_storage::Error, cfx_storage::ErrorKind); - } - - foreign_links { - Decoder(DecoderError); - Filter(FilterError); - AccountError(AccountError); - } - - errors { - AlreadyThrottled(msg_name: &'static str) { - description("packet already throttled"), - display("packet already throttled: {:?}", msg_name), - } - - ChainIdMismatch{ ours: ChainIdParamsOneChainInner, theirs: ChainIdParamsOneChainInner } { - description("ChainId mismatch"), - display("ChainId mismatch, ours={:?}, theirs={:?}.", ours, theirs), - } - - ClonableErrorWrapper(error: ClonableError) { - description("Clonable error"), - display("{:?}", error.0.lock().to_string()), - } - - GenesisMismatch{ ours: H256, theirs: H256 } { - description("Genesis mismatch"), - display("Genesis mismatch, ours={:?}, theirs={:?}.", ours, theirs), - } - - InternalError(details: String) { - description("Internal error"), - display("Internal error: {:?}", details), - } - - InvalidBloom{ epoch: u64, expected: H256, received: H256 } { - description("Logs bloom hash validation failed"), - display("Logs bloom hash validation for epoch {} failed, expected={:?}, received={:?}", epoch, expected, received), - } - - InvalidHeader { - description("Header verification failed"), - display("Header verification failed"), - } - - InvalidLedgerProofSize{ hash: H256, expected: u64, received: u64 } { - description("Invalid ledger proof size"), - display("Invalid ledger proof size for header {:?}: expected={}, received={}", hash, expected, received), - } - - InvalidMessageFormat { - description("Invalid message format"), - display("Invalid message format"), - } - - InvalidPreviousStateRoot{ current_epoch: u64, snapshot_epoch_count: u64, root: Option } { - description("Invalid previous state root"), - display("Invalid previous state root for epoch {} with snapshot epoch count {}: {:?}", current_epoch, snapshot_epoch_count, root), - } - - InvalidReceipts{ epoch: u64, expected: H256, received: H256 } { - description("Receipts root validation failed"), - display("Receipts root validation for epoch {} failed, expected={:?}, received={:?}", epoch, expected, received), - } - - InvalidStateProof{ epoch: u64, key: Vec, value: Option>, reason: &'static str } { - description("Invalid state proof"), - display("Invalid state proof for key {:?} and value {:?} in epoch {}: {:?}", value, key, epoch, reason), - } - - InvalidStateRoot{ epoch: u64, expected: H256, received: H256 } { - description("State root validation failed"), - display("State root validation for epoch {} failed, expected={:?}, received={:?}", epoch, expected, received), - } - - InvalidStorageRootProof{ epoch: u64, address: H160, reason: &'static str } { - description("Invalid storage root proof"), - display("Invalid storage root proof for address {:?} in epoch {}: {}", address, epoch, reason), - } - - InvalidTxInfo{ reason: String } { - description("Invalid tx info"), - display("Invalid tx info: {:?}", reason), - } - - InvalidTxRoot{ hash: H256, expected: H256, received: H256 } { - description("Transaction root validation failed"), - display("Transaction root validation for block {:?} failed, expected={:?}, received={:?}", hash, expected, received), - } - - InvalidTxSignature{ hash: H256 } { - description("Invalid tx signature"), - display("Invalid signature for transaction {:?}", hash), - } - - InvalidWitnessRoot{ hash: H256, expected: H256, received: H256 } { - description("Witness root validation failed"), - display("Witness root validation for header {:?} failed, expected={:?}, received={:?}", hash, expected, received), - } - - SendStatusFailed{ peer: NodeId } { - description("Send status failed"), - display("Failed to send status to peer {:?}", peer), - } - - Timeout(details: String) { - description("Operation timeout"), - display("Operation timeout: {:?}", details), - } - - Throttled(msg_name: &'static str, response: Throttled) { - description("packet throttled"), - display("packet {:?} throttled: {:?}", msg_name, response), - } - - UnableToProduceTxInfo{ reason: String } { - description("Unable to produce tx info"), - display("Unable to produce tx info: {:?}", reason), - } - - UnexpectedMessage{ expected: Vec, received: MsgId } { - description("Unexpected message"), - display("Unexpected message id={:?}, expected one of {:?}", received, expected), - } - - UnexpectedPeerType{ node_type: NodeType } { - description("Unexpected peer type"), - display("Unexpected peer type: {:?}", node_type), - } - - UnexpectedResponse{ expected: Option, received: RequestId } { - description("Unexpected response"), - display("Unexpected response id; expected = {:?}, received = {:?}", expected, received), - } - - UnknownMessage{ id: MsgId } { - description("Unknown message"), - display("Unknown message: {:?}", id), - } - - WitnessUnavailable{ epoch: u64 } { - description("Witness unavailable"), - display("Witness for epoch {} is not available", epoch), - } - } +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum Error { + #[error(transparent)] + Network(#[from] network::Error), + #[error(transparent)] + StateDb(#[from] cfx_statedb::Error), + #[error(transparent)] + Storage(#[from] cfx_storage::Error), + #[error(transparent)] + Decoder(#[from] DecoderError), + #[error(transparent)] + Filter(#[from] FilterError), + #[error(transparent)] + AccountError(#[from] AccountError), + #[error("packet already throttled: {0:?}")] + AlreadyThrottled(&'static str), + #[error("ChainId mismatch, ours={ours:?}, theirs={theirs:?}.")] + ChainIdMismatch { + ours: ChainIdParamsOneChainInner, + theirs: ChainIdParamsOneChainInner, + }, + #[error("{:?}", .0.display_error())] + ClonableErrorWrapper(ClonableError), + #[error("Genesis mismatch, ours={ours:?}, theirs={theirs:?}.")] + GenesisMismatch { ours: H256, theirs: H256 }, + #[error("Internal error: {0:?}")] + InternalError(String), + #[error("Logs bloom hash validation for epoch {epoch} failed, expected={expected:?}, received={received:?}")] + InvalidBloom { + epoch: u64, + expected: H256, + received: H256, + }, + #[error("Header verification failed")] + InvalidHeader, + #[error("Invalid ledger proof size for header {hash:?}: expected={expected}, received={received}")] + InvalidLedgerProofSize { + hash: H256, + expected: u64, + received: u64, + }, + #[error("Invalid message format")] + InvalidMessageFormat, + #[error("Invalid previous state root for epoch {current_epoch} with snapshot epoch count {snapshot_epoch_count}: {root:?}")] + InvalidPreviousStateRoot { + current_epoch: u64, + snapshot_epoch_count: u64, + root: Option, + }, + #[error("Receipts root validation for epoch {epoch} failed, expected={expected:?}, received={received:?}")] + InvalidReceipts { + epoch: u64, + expected: H256, + received: H256, + }, + #[error( + "Invalid state proof for key {value:?} and value {key:?} in epoch {epoch}: {reason:?}" + )] + InvalidStateProof { + epoch: u64, + key: Vec, + value: Option>, + reason: &'static str, + #[source] + source: Option>, + }, + #[error("State root validation for epoch {epoch} failed, expected={expected:?}, received={received:?}")] + InvalidStateRoot { + epoch: u64, + expected: H256, + received: H256, + }, + #[error("Invalid storage root proof for address {address:?} in epoch {epoch}: {reason}")] + InvalidStorageRootProof { + epoch: u64, + address: H160, + reason: &'static str, + #[source] + source: Option>, + }, + #[error("Invalid tx info: {reason:?}")] + InvalidTxInfo { reason: String }, + #[error("Transaction root validation for block {hash:?} failed, expected={expected:?}, received={received:?}")] + InvalidTxRoot { + hash: H256, + expected: H256, + received: H256, + }, + #[error("Invalid signature for transaction {hash:?}")] + InvalidTxSignature { hash: H256 }, + #[error("Witness root validation for header {hash:?} failed, expected={expected:?}, received={received:?}")] + InvalidWitnessRoot { + hash: H256, + expected: H256, + received: H256, + }, + #[error("Failed to send status to peer {peer:?}")] + SendStatusFailed { + peer: NodeId, + #[source] + source: Option>, + }, + #[error("Operation timeout: {0:?}")] + Timeout(String), + #[error("packet {0:?} throttled: {1:?}")] + Throttled(&'static str, Throttled), + #[error("Unable to produce tx info: {reason:?}")] + UnableToProduceTxInfo { reason: String }, + #[error( + "Unexpected message id={received:?}, expected one of {expected:?}" + )] + UnexpectedMessage { + expected: Vec, + received: MsgId, + }, + #[error("Unexpected peer type: {node_type:?}")] + UnexpectedPeerType { node_type: NodeType }, + #[error("Unexpected response id; expected = {expected:?}, received = {received:?}")] + UnexpectedResponse { + expected: Option, + received: RequestId, + }, + #[error("Unknown message: {id:?}")] + UnknownMessage { id: MsgId }, + #[error("Witness for epoch {epoch} is not available")] + WitnessUnavailable { epoch: u64 }, + #[error("{0}")] + Msg(String), } +pub type Result = std::result::Result; + pub fn handle( io: &dyn NetworkContext, peer: &NodeId, msg_id: MsgId, e: &Error, ) { // for clonable errors, we will print the error in the recursive call - if !matches!(e.0, ErrorKind::ClonableErrorWrapper(_)) { + if !matches!(e, Error::ClonableErrorWrapper(_)) { warn!( "Error while handling message, peer={}, msg_id={:?}, error={}", - peer, - msg_id, - e.display_chain().to_string(), + peer, msg_id, e, ); } let mut disconnect = true; - let reason = format!("{}", e.0); + let reason = format!("{}", e); let mut op = None; // NOTE: do not use wildcard; this way, the compiler // will help covering all the cases. - match &e.0 { + match &e { // for wrapped errors, handle based on the inner error - ErrorKind::ClonableErrorWrapper(e) => { + Error::ClonableErrorWrapper(e) => { handle(io, peer, msg_id, &*e.0.lock()); // if we need to disconnect, we will do it in the call above disconnect = false } - ErrorKind::Filter(_) - | ErrorKind::InternalError(_) + Error::Filter(_) + | Error::InternalError(_) // NOTE: we should be tolerant of non-critical errors, // e.g. do not disconnect on requesting non-existing epoch - | ErrorKind::Msg(_) + | Error::Msg(_) // NOTE: in order to let other protocols run, // we should not disconnect on protocol failure - | ErrorKind::SendStatusFailed{..} + | Error::SendStatusFailed{..} - | ErrorKind::Timeout(_) + | Error::Timeout(_) // if the tx requested has been removed locally, // we should not disconnect the peer - | ErrorKind::UnableToProduceTxInfo{..} + | Error::UnableToProduceTxInfo{..} // if the witness is not available, it is probably // due to the local witness sync process - | ErrorKind::WitnessUnavailable{..} + | Error::WitnessUnavailable{..} // NOTE: to help with backward-compatibility, we // should not disconnect on `UnknownMessage` - | ErrorKind::UnknownMessage{..} => disconnect = false, + | Error::UnknownMessage{..} => disconnect = false, - ErrorKind::GenesisMismatch{..} - | ErrorKind::InvalidHeader - | ErrorKind::ChainIdMismatch{..} - | ErrorKind::UnexpectedMessage{..} - | ErrorKind::UnexpectedPeerType{..} => op = Some(UpdateNodeOperation::Failure), + Error::GenesisMismatch{..} + | Error::InvalidHeader + | Error::ChainIdMismatch{..} + | Error::UnexpectedMessage{..} + | Error::UnexpectedPeerType{..} => op = Some(UpdateNodeOperation::Failure), - ErrorKind::UnexpectedResponse{..} => { + Error::UnexpectedResponse{..} => { op = Some(UpdateNodeOperation::Demotion) } - ErrorKind::InvalidBloom{..} - | ErrorKind::InvalidLedgerProofSize{..} - | ErrorKind::InvalidMessageFormat - | ErrorKind::InvalidPreviousStateRoot{..} - | ErrorKind::InvalidReceipts{..} - | ErrorKind::InvalidStateProof{..} - | ErrorKind::InvalidStateRoot{..} - | ErrorKind::InvalidStorageRootProof{..} - | ErrorKind::InvalidTxInfo{..} - | ErrorKind::InvalidTxRoot{..} - | ErrorKind::InvalidTxSignature{..} - | ErrorKind::InvalidWitnessRoot{..} - | ErrorKind::AlreadyThrottled(_) - | ErrorKind::Decoder(_) - | ErrorKind::AccountError(_) => op = Some(UpdateNodeOperation::Remove), - - ErrorKind::Throttled(_, resp) => { + Error::InvalidBloom{..} + | Error::InvalidLedgerProofSize{..} + | Error::InvalidMessageFormat + | Error::InvalidPreviousStateRoot{..} + | Error::InvalidReceipts{..} + | Error::InvalidStateProof{..} + | Error::InvalidStateRoot{..} + | Error::InvalidStorageRootProof{..} + | Error::InvalidTxInfo{..} + | Error::InvalidTxRoot{..} + | Error::InvalidTxSignature{..} + | Error::InvalidWitnessRoot{..} + | Error::AlreadyThrottled(_) + | Error::Decoder(_) + | Error::AccountError(_) => op = Some(UpdateNodeOperation::Remove), + + Error::Throttled(_, resp) => { disconnect = false; if let Err(e) = resp.send(io, peer) { @@ -257,7 +236,7 @@ pub fn handle( } // network errors - ErrorKind::Network(kind) => match kind { + Error::Network(kind) => match kind.kind() { network::ErrorKind::SendUnsupportedMessage{..} => { unreachable!("This is a bug in protocol version maintenance. {:?}", kind); } @@ -293,11 +272,11 @@ pub fn handle( } }, - ErrorKind::StateDb(_)| ErrorKind::Storage(_) => disconnect = false, + Error::StateDb(_)| Error::Storage(_) => disconnect = false, - ErrorKind::__Nonexhaustive {} => { - op = Some(UpdateNodeOperation::Failure) - } + // Error::__Nonexhaustive {} => { + // op = Some(UpdateNodeOperation::Failure) + // } }; if disconnect { @@ -309,7 +288,7 @@ pub fn handle( pub struct ClonableError(Arc>); impl Into for ClonableError { - fn into(self) -> Error { ErrorKind::ClonableErrorWrapper(self).into() } + fn into(self) -> Error { Error::ClonableErrorWrapper(self).into() } } impl From for ClonableError { @@ -317,3 +296,14 @@ impl From for ClonableError { ClonableError(Arc::new(Mutex::new(e))) } } + +impl ClonableError { + fn display_error(&self) -> String { self.0.lock().to_string() } +} + +impl From<&str> for Error { + fn from(e: &str) -> Self { Error::Msg(e.into()) } +} +impl From for Error { + fn from(e: String) -> Self { Error::Msg(e) } +} diff --git a/crates/cfxcore/core/src/light_protocol/handler/mod.rs b/crates/cfxcore/core/src/light_protocol/handler/mod.rs index e8ae4ab72..7bf09fbc6 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/mod.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/mod.rs @@ -316,7 +316,7 @@ impl Handler { None => { // NOTE: this should not happen as we register // all peers in `on_peer_connected` - bail!(ErrorKind::InternalError(format!( + bail!(Error::InternalError(format!( "Received message from unknown peer={:?}", peer ))); @@ -339,7 +339,7 @@ impl Handler { && !state.read().handshake_completed { warn!("Received msg={:?} from handshaking peer={:?}", msg_id, peer); - bail!(ErrorKind::UnexpectedMessage { + bail!(Error::UnexpectedMessage { expected: vec![ msgid::STATUS_PONG_DEPRECATED, msgid::STATUS_PONG_V2 @@ -356,7 +356,7 @@ impl Handler { match node_type { NodeType::Archive => Ok(()), NodeType::Full => Ok(()), - _ => bail!(ErrorKind::UnexpectedPeerType { node_type }), + _ => bail!(Error::UnexpectedPeerType { node_type }), } } @@ -366,7 +366,7 @@ impl Handler { let theirs = genesis; if ours != theirs { - bail!(ErrorKind::GenesisMismatch { ours, theirs }); + bail!(Error::GenesisMismatch { ours, theirs }); } Ok(()) @@ -403,7 +403,7 @@ impl Handler { // request was throttled by service provider msgid::THROTTLED => self.on_throttled(io, peer, decode_rlp_and_check_deprecation(&rlp, min_supported_ver, protocol)?), - _ => bail!(ErrorKind::UnknownMessage{id: msg_id}), + _ => bail!(Error::UnknownMessage{id: msg_id}), } } @@ -971,7 +971,7 @@ impl NetworkProtocolHandler for Handler { io, peer, msgid::INVALID, - &ErrorKind::InvalidMessageFormat.into(), + &Error::InvalidMessageFormat.into(), ) } }; @@ -1012,7 +1012,11 @@ impl NetworkProtocolHandler for Handler { io, node_id, msgid::INVALID, - &ErrorKind::SendStatusFailed { peer: *node_id }.into(), + &Error::SendStatusFailed { + peer: *node_id, + source: None, + } + .into(), ); } } diff --git a/crates/cfxcore/core/src/light_protocol/handler/sync/block_txs.rs b/crates/cfxcore/core/src/light_protocol/handler/sync/block_txs.rs index 97bdb272b..5cf194b0d 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/sync/block_txs.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/sync/block_txs.rs @@ -220,7 +220,7 @@ impl BlockTxs { let received = compute_transaction_root(&txs); if received != expected { - bail!(ErrorKind::InvalidTxRoot { + bail!(Error::InvalidTxRoot { hash, expected, received, diff --git a/crates/cfxcore/core/src/light_protocol/handler/sync/blooms.rs b/crates/cfxcore/core/src/light_protocol/handler/sync/blooms.rs index e756954d7..c6a0a50ed 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/sync/blooms.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/sync/blooms.rs @@ -208,7 +208,7 @@ impl Blooms { // check if received != expected { - bail!(ErrorKind::InvalidBloom { + bail!(Error::InvalidBloom { epoch, expected, received, diff --git a/crates/cfxcore/core/src/light_protocol/handler/sync/common/ledger_proof.rs b/crates/cfxcore/core/src/light_protocol/handler/sync/common/ledger_proof.rs index 2181653e0..9604df4ba 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/sync/common/ledger_proof.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/sync/common/ledger_proof.rs @@ -6,7 +6,7 @@ use cfx_types::H256; use primitives::{BlockHeader, BlockHeaderBuilder}; use std::ops::Index; -use crate::light_protocol::{Error, ErrorKind}; +use crate::light_protocol::Error; pub enum LedgerProof { StateRoot(Vec), @@ -48,7 +48,7 @@ impl LedgerProof { let blame = witness.blame() as u64; if hashes.len() as u64 != blame + 1 { - bail!(ErrorKind::InvalidLedgerProofSize { + bail!(Error::InvalidLedgerProofSize { hash, expected: blame + 1, received: hashes.len() as u64 @@ -66,7 +66,7 @@ impl LedgerProof { // validate against local witness deferred state root hash if received != expected { - bail!(ErrorKind::InvalidWitnessRoot { + bail!(Error::InvalidWitnessRoot { hash, expected, received, diff --git a/crates/cfxcore/core/src/light_protocol/handler/sync/common/sync_manager.rs b/crates/cfxcore/core/src/light_protocol/handler/sync/common/sync_manager.rs index 867ebcc7a..7ce8bc1fa 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/sync/common/sync_manager.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/sync/common/sync_manager.rs @@ -6,7 +6,7 @@ use super::{HasKey, PriorityQueue}; use crate::{ light_protocol::{ common::{FullPeerFilter, FullPeerState, Peers}, - Error, ErrorKind, + Error, }, message::{MsgId, RequestId}, }; @@ -97,7 +97,7 @@ where match self.peers.get(peer) { Some(state) => Ok(state), None => { - bail!(ErrorKind::InternalError(format!( + bail!(Error::InternalError(format!( "Received message from unknown peer={:?}", peer ))); @@ -129,7 +129,7 @@ where ThrottleResult::Success => Ok(id), ThrottleResult::Throttled(_) => Ok(id), ThrottleResult::AlreadyThrottled => { - bail!(ErrorKind::UnexpectedResponse { + bail!(Error::UnexpectedResponse { expected: id, received: request_id, }); diff --git a/crates/cfxcore/core/src/light_protocol/handler/sync/headers.rs b/crates/cfxcore/core/src/light_protocol/handler/sync/headers.rs index 0b3e22a31..5c0cab7dd 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/sync/headers.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/sync/headers.rs @@ -7,7 +7,7 @@ use crate::{ light_protocol::{ common::{FullPeerState, Peers}, message::{msgid, GetBlockHeaders}, - Error, ErrorKind, LightNodeConfiguration, + Error, LightNodeConfiguration, }, message::{Message, RequestId}, sync::SynchronizationGraph, @@ -257,7 +257,7 @@ impl Headers { // disconnect peers who send invalid headers if has_invalid_header { - bail!(ErrorKind::InvalidHeader); + bail!(Error::InvalidHeader); } Ok(()) diff --git a/crates/cfxcore/core/src/light_protocol/handler/sync/receipts.rs b/crates/cfxcore/core/src/light_protocol/handler/sync/receipts.rs index cb16a869a..cd7afe1af 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/sync/receipts.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/sync/receipts.rs @@ -230,7 +230,7 @@ impl Receipts { // check if received != expected { - bail!(ErrorKind::InvalidReceipts { + bail!(Error::InvalidReceipts { epoch, expected, received, diff --git a/crates/cfxcore/core/src/light_protocol/handler/sync/state_entries.rs b/crates/cfxcore/core/src/light_protocol/handler/sync/state_entries.rs index 21f31812e..3b3408c6f 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/sync/state_entries.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/sync/state_entries.rs @@ -224,11 +224,12 @@ impl StateEntries { self.state_roots .validate_state_root(epoch, &state_root) - .chain_err(|| ErrorKind::InvalidStateProof { + .map_err(|e| Error::InvalidStateProof { epoch, key: key.clone(), value: value.clone(), reason: "Validation of current state root failed", + source: Some(Box::new(e)), })?; // validate previous state root @@ -236,11 +237,12 @@ impl StateEntries { self.state_roots .validate_prev_snapshot_state_root(epoch, &maybe_prev_root) - .chain_err(|| ErrorKind::InvalidStateProof { + .map_err(|e| Error::InvalidStateProof { epoch, key: key.clone(), value: value.clone(), reason: "Validation of previous state root failed", + source: Some(Box::new(e)), })?; // construct padding @@ -258,11 +260,12 @@ impl StateEntries { state_root, maybe_intermediate_padding, ) { - bail!(ErrorKind::InvalidStateProof { + bail!(Error::InvalidStateProof { epoch, key: key.clone(), value: value.clone(), reason: "Validation of merkle proof failed", + source: None }); } diff --git a/crates/cfxcore/core/src/light_protocol/handler/sync/state_roots.rs b/crates/cfxcore/core/src/light_protocol/handler/sync/state_roots.rs index 015d8bfe4..e0b25fb58 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/sync/state_roots.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/sync/state_roots.rs @@ -229,7 +229,7 @@ impl StateRoots { // check if received != expected { - bail!(ErrorKind::InvalidStateRoot { + bail!(Error::InvalidStateRoot { epoch, expected, received, @@ -252,7 +252,7 @@ impl StateRoots { if current_epoch <= snapshot_epoch_count { // previous root should not have been provided // for the first snapshot period - bail!(ErrorKind::InvalidPreviousStateRoot { + bail!(Error::InvalidPreviousStateRoot { current_epoch, snapshot_epoch_count, root: maybe_prev_snapshot_state_root.clone() @@ -269,7 +269,7 @@ impl StateRoots { if current_epoch > snapshot_epoch_count { // previous root should have been provided // for subsequent snapshot periods - bail!(ErrorKind::InvalidPreviousStateRoot { + bail!(Error::InvalidPreviousStateRoot { current_epoch, snapshot_epoch_count, root: maybe_prev_snapshot_state_root.clone() diff --git a/crates/cfxcore/core/src/light_protocol/handler/sync/storage_roots.rs b/crates/cfxcore/core/src/light_protocol/handler/sync/storage_roots.rs index a0531b51f..95f3a27e0 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/sync/storage_roots.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/sync/storage_roots.rs @@ -219,10 +219,11 @@ impl StorageRoots { self.state_roots .validate_state_root(epoch, &state_root) - .chain_err(|| ErrorKind::InvalidStorageRootProof { + .map_err(|e| Error::InvalidStorageRootProof { epoch, address, reason: "Validation of current state root failed", + source: Some(Box::new(e)), })?; // validate previous state root @@ -230,10 +231,11 @@ impl StorageRoots { self.state_roots .validate_prev_snapshot_state_root(epoch, &maybe_prev_root) - .chain_err(|| ErrorKind::InvalidStorageRootProof { + .map_err(|e| Error::InvalidStorageRootProof { epoch, address, reason: "Validation of previous state root failed", + source: Some(Box::new(e)), })?; // construct padding @@ -253,10 +255,11 @@ impl StorageRoots { state_root, maybe_intermediate_padding, ) { - bail!(ErrorKind::InvalidStorageRootProof { + bail!(Error::InvalidStorageRootProof { epoch, address, reason: "Validation of merkle proof failed", + source: None }); } diff --git a/crates/cfxcore/core/src/light_protocol/handler/sync/tx_infos.rs b/crates/cfxcore/core/src/light_protocol/handler/sync/tx_infos.rs index 5598d8f2a..f510ce4a3 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/sync/tx_infos.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/sync/tx_infos.rs @@ -196,7 +196,7 @@ impl TxInfos { // quick check for well-formedness if block_index_in_epoch >= num_blocks_in_epoch { - bail!(ErrorKind::InvalidTxInfo { + bail!(Error::InvalidTxInfo { reason: format!( "Inconsisent block index: {} >= {}", block_index_in_epoch, num_blocks_in_epoch @@ -205,7 +205,7 @@ impl TxInfos { } if tx_index_in_block >= num_txs_in_block { - bail!(ErrorKind::InvalidTxInfo { + bail!(Error::InvalidTxInfo { reason: format!( "Inconsisent tx index: {} >= {}", tx_index_in_block, num_txs_in_block @@ -218,7 +218,7 @@ impl TxInfos { if receipt.outcome_status != TransactionStatus::Success && receipt.outcome_status != TransactionStatus::Failure { - bail!(ErrorKind::InvalidTxInfo { + bail!(Error::InvalidTxInfo { reason: format!( "Unexpected outcome status in tx info: {:?}", receipt.outcome_status @@ -228,7 +228,7 @@ impl TxInfos { let block_hash = match self.ledger.block_hashes_in(epoch)? { hs if hs.len() != num_blocks_in_epoch => { - bail!(ErrorKind::InvalidTxInfo { + bail!(Error::InvalidTxInfo { reason: format!( "Number of blocks in epoch mismatch: local = {}, received = {}", hs.len(), num_blocks_in_epoch), @@ -263,7 +263,7 @@ impl TxInfos { tx_hash, &tx_proof, ) { - bail!(ErrorKind::InvalidTxInfo { + bail!(Error::InvalidTxInfo { reason: "Transaction proof verification failed".to_owned() }); } @@ -302,7 +302,7 @@ impl TxInfos { &receipt, &receipt_proof, ) { - bail!(ErrorKind::InvalidTxInfo { + bail!(Error::InvalidTxInfo { reason: "Receipt proof verification failed".to_owned() }); } @@ -330,7 +330,7 @@ impl TxInfos { &prev_receipt, &prev_receipt_proof, ) { - bail!(ErrorKind::InvalidTxInfo { + bail!(Error::InvalidTxInfo { reason: "Previous receipt proof verification failed" .to_owned() }); @@ -341,7 +341,7 @@ impl TxInfos { // not the first receipt but no previous receipt was provided (_, maybe_prev_receipt, maybe_prev_receipt_proof) => { - bail!(ErrorKind::InvalidTxInfo { + bail!(Error::InvalidTxInfo { reason: format!( "Expected two receipts; received one. tx_index_in_block = {:?}, diff --git a/crates/cfxcore/core/src/light_protocol/handler/sync/txs.rs b/crates/cfxcore/core/src/light_protocol/handler/sync/txs.rs index 4db7ea149..faca9b3ce 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/sync/txs.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/sync/txs.rs @@ -197,7 +197,7 @@ impl Txs { Ok(true) => {} _ => { warn!("Tx signature verification failed for {:?}", tx); - bail!(ErrorKind::InvalidTxSignature { hash: tx.hash() }); + bail!(Error::InvalidTxSignature { hash: tx.hash() }); } } diff --git a/crates/cfxcore/core/src/light_protocol/handler/sync/witnesses.rs b/crates/cfxcore/core/src/light_protocol/handler/sync/witnesses.rs index 7f36cbf23..8f9bd2f7f 100644 --- a/crates/cfxcore/core/src/light_protocol/handler/sync/witnesses.rs +++ b/crates/cfxcore/core/src/light_protocol/handler/sync/witnesses.rs @@ -123,7 +123,7 @@ impl Witnesses { let height = epoch + DEFERRED_STATE_EPOCH_COUNT; if height > *self.height_of_latest_verified_header.read() { - bail!(ErrorKind::WitnessUnavailable { epoch }); + bail!(Error::WitnessUnavailable { epoch }); } match self.data_man.verified_blamed_roots_by_height(height) { @@ -133,7 +133,7 @@ impl Witnesses { // the response for blamed headers. thus, in some cases, `None` // might mean *haven't received yet* instead of *not blamed*. if self.in_flight.read().contains(&height) { - bail!(ErrorKind::WitnessUnavailable { epoch }); + bail!(Error::WitnessUnavailable { epoch }); } let header = self diff --git a/crates/cfxcore/core/src/light_protocol/mod.rs b/crates/cfxcore/core/src/light_protocol/mod.rs index b9b122de5..8467a281a 100644 --- a/crates/cfxcore/core/src/light_protocol/mod.rs +++ b/crates/cfxcore/core/src/light_protocol/mod.rs @@ -23,7 +23,7 @@ pub const LIGHT_PROTO_V2: ProtocolVersion = ProtocolVersion(2); use error::handle as handle_error; pub use config::Configuration as LightNodeConfiguration; -pub use error::{Error, ErrorKind}; +pub use error::Error; pub use handler::Handler; pub use provider::Provider; pub use query_service::QueryService; diff --git a/crates/cfxcore/core/src/light_protocol/provider.rs b/crates/cfxcore/core/src/light_protocol/provider.rs index 0b62e5eda..bea3217e2 100644 --- a/crates/cfxcore/core/src/light_protocol/provider.rs +++ b/crates/cfxcore/core/src/light_protocol/provider.rs @@ -142,7 +142,7 @@ impl Provider { None => { // NOTE: this should not happen as we register // all peers in `on_peer_connected` - bail!(ErrorKind::InternalError(format!( + bail!(Error::InternalError(format!( "Received message from unknown peer={:?}", peer ))) @@ -164,7 +164,7 @@ impl Provider { && !state.read().handshake_completed { warn!("Received msg={:?} from handshaking peer={:?}", msg_id, peer); - bail!(ErrorKind::UnexpectedMessage { + bail!(Error::UnexpectedMessage { expected: vec![ msgid::STATUS_PING_DEPRECATED, msgid::STATUS_PING_V2 @@ -200,7 +200,7 @@ impl Provider { msgid::GET_BLOCK_TXS => self.on_get_block_txs(io, peer, decode_rlp_and_check_deprecation(&rlp, min_supported_ver, protocol)?), msgid::GET_TX_INFOS => self.on_get_tx_infos(io, peer, decode_rlp_and_check_deprecation(&rlp, min_supported_ver, protocol)?), msgid::GET_STORAGE_ROOTS => self.on_get_storage_roots(io, peer, decode_rlp_and_check_deprecation(&rlp, min_supported_ver, protocol)?), - _ => bail!(ErrorKind::UnknownMessage{id: msg_id}), + _ => bail!(Error::UnknownMessage{id: msg_id}), } } @@ -228,7 +228,7 @@ impl Provider { let (tx, tx_index, receipt) = match self.consensus.get_transaction_info_by_hash(&hash) { None => { - bail!(ErrorKind::UnableToProduceTxInfo { + bail!(Error::UnableToProduceTxInfo { reason: format!("Unable to get tx info for {:?}", hash) }); } @@ -239,7 +239,7 @@ impl Provider { .. }, )) => { - bail!(ErrorKind::UnableToProduceTxInfo { + bail!(Error::UnableToProduceTxInfo { reason: format!("Unable to get receipt for {:?}", hash) }); } @@ -253,7 +253,7 @@ impl Provider { .. }, )) => { - bail!(ErrorKind::UnableToProduceTxInfo { + bail!(Error::UnableToProduceTxInfo { reason: format!( "Phantom tx not supported (hash: {:?})", hash @@ -284,7 +284,7 @@ impl Provider { let epoch = match self.consensus.get_block_epoch_number(&block_hash) { Some(epoch) => epoch, None => { - bail!(ErrorKind::UnableToProduceTxInfo { + bail!(Error::UnableToProduceTxInfo { reason: format!( "Unable to get epoch number for block {:?}", block_hash @@ -296,7 +296,7 @@ impl Provider { let epoch_hashes = match self.ledger.block_hashes_in(epoch) { Ok(hs) => hs, Err(e) => { - bail!(ErrorKind::UnableToProduceTxInfo { + bail!(Error::UnableToProduceTxInfo { reason: format!( "Unable to find epoch hashes for {}: {}", epoch, e @@ -311,7 +311,7 @@ impl Provider { match epoch_hashes.iter().position(|h| *h == block_hash) { Some(id) => id, None => { - bail!(ErrorKind::UnableToProduceTxInfo { + bail!(Error::UnableToProduceTxInfo { reason: format!( "Unable to find {:?} in epoch {}", block_hash, epoch @@ -411,7 +411,7 @@ impl Provider { fn validate_peer_type(&self, node_type: NodeType) -> Result<()> { match node_type { NodeType::Light => Ok(()), - _ => bail!(ErrorKind::UnexpectedPeerType { node_type }), + _ => bail!(Error::UnexpectedPeerType { node_type }), } } @@ -421,7 +421,7 @@ impl Provider { let theirs = genesis; if ours != theirs { - bail!(ErrorKind::GenesisMismatch { ours, theirs }); + bail!(Error::GenesisMismatch { ours, theirs }); } Ok(()) @@ -447,7 +447,10 @@ impl Provider { )?; self.send_status(io, peer) - .chain_err(|| ErrorKind::SendStatusFailed { peer: *peer })?; + .map_err(|e| Error::SendStatusFailed { + peer: *peer, + source: Some(Box::new(e)), + })?; let state = self.get_existing_peer_state(peer)?; let mut state = state.write(); @@ -615,8 +618,7 @@ impl Provider { .block_header_by_hash(&h) .map(|header_arc| header_arc.as_ref().clone()) .ok_or_else(|| { - ErrorKind::Msg(format!("Block {:?} not found", h)) - .into() + Error::Msg(format!("Block {:?} not found", h)).into() }) }); @@ -664,7 +666,7 @@ impl Provider { } _ => { // NOTE: this should not happen - bail!(ErrorKind::InternalError(format!( + bail!(Error::InternalError(format!( "insert_new_transactions failed: {:?}, {:?}", passed, failed ))) @@ -716,7 +718,7 @@ impl Provider { .take(MAX_TXS_TO_SEND) .map::, _>(|h| { self.tx_by_hash(h).ok_or_else(|| { - ErrorKind::Msg(format!("Tx {:?} not found", h)).into() + Error::Msg(format!("Tx {:?} not found", h)).into() }) }); @@ -948,7 +950,7 @@ impl Provider { let network = match self.network.upgrade() { Some(network) => network, None => { - bail!(ErrorKind::InternalError( + bail!(Error::InternalError( "Network unavailable, not relaying hashes".to_owned() )); } @@ -987,10 +989,10 @@ impl Provider { request_id: msg.get_request_id(), }; - bail!(ErrorKind::Throttled(msg.msg_name(), throttled)) + bail!(Error::Throttled(msg.msg_name(), throttled)) } ThrottleResult::AlreadyThrottled => { - bail!(ErrorKind::AlreadyThrottled(msg.msg_name())) + bail!(Error::AlreadyThrottled(msg.msg_name())) } } } @@ -1033,7 +1035,7 @@ impl NetworkProtocolHandler for Provider { io, peer, msgid::INVALID, - &ErrorKind::InvalidMessageFormat.into(), + &Error::InvalidMessageFormat.into(), ) } }; diff --git a/crates/cfxcore/core/src/light_protocol/query_service.rs b/crates/cfxcore/core/src/light_protocol/query_service.rs index b354f7c08..8b88cdf28 100644 --- a/crates/cfxcore/core/src/light_protocol/query_service.rs +++ b/crates/cfxcore/core/src/light_protocol/query_service.rs @@ -9,8 +9,8 @@ use crate::{ common::{FullPeerFilter, LedgerInfo}, handler::sync::TxInfoValidated, message::msgid, - Error as LightError, ErrorKind, Handler as LightHandler, - LightNodeConfiguration, LIGHT_PROTOCOL_ID, LIGHT_PROTOCOL_VERSION, + Error as LightError, Handler as LightHandler, LightNodeConfiguration, + LIGHT_PROTOCOL_ID, LIGHT_PROTOCOL_VERSION, }, sync::SynchronizationGraph, ConsensusGraph, Notifications, @@ -77,7 +77,7 @@ async fn with_timeout( // set error message with_timeout .await - .map_err(|_| LightError::from(ErrorKind::Timeout(msg)))? + .map_err(|_| LightError::from(LightError::Timeout(msg)))? } pub struct QueryService { @@ -324,7 +324,7 @@ impl QueryService { // `retrieve_block` will only return None if we do not have // the corresponding header, which should not happen in this // case. - bail!(ErrorKind::InternalError(format!( + bail!(LightError::InternalError(format!( "Block {:?} not found during gas price sampling", hash ))); diff --git a/crates/client/src/rpc/impls/cfx/light.rs b/crates/client/src/rpc/impls/cfx/light.rs index eb5a36c36..90a9101eb 100644 --- a/crates/client/src/rpc/impls/cfx/light.rs +++ b/crates/client/src/rpc/impls/cfx/light.rs @@ -10,7 +10,7 @@ use cfxcore::{ consensus::ConsensusConfig, errors::account_result_to_rpc_result, light_protocol::{ - self, query_service::TxInfo, Error as LightError, ErrorKind, + self, query_service::TxInfo, Error as LightError, }, verification::EpochReceiptProof, ConsensusGraph, ConsensusGraphTrait, LightQueryService, PeerInfo, @@ -516,7 +516,7 @@ impl RpcImpl { match /* success = */ light.send_raw_tx(raw) { true => Ok(tx.hash().into()), - false => bail!(LightProtocol(light_protocol::ErrorKind::InternalError("Unable to relay tx".into()).into())), + false => bail!(LightProtocol(light_protocol::Error::InternalError("Unable to relay tx".into()).into())), } } @@ -688,8 +688,8 @@ impl RpcImpl { // return `null` on timeout let tx_info = match light.get_tx_info(hash).await { Ok(t) => t, - Err(LightError(ErrorKind::Timeout(_), _)) => return Ok(None), - Err(LightError(e, _)) => { + Err(LightError::Timeout(_)) => return Ok(None), + Err(e) => { bail!(RpcError::invalid_params(e.to_string())) } }; diff --git a/crates/util/util-macros/src/lib.rs b/crates/util/util-macros/src/lib.rs index e64f8a9d1..717a0df37 100644 --- a/crates/util/util-macros/src/lib.rs +++ b/crates/util/util-macros/src/lib.rs @@ -7,3 +7,13 @@ macro_rules! unwrap_option_or_return_result_none { }; }; } + +#[macro_export] +macro_rules! bail { + ($e:expr) => { + return Err($e.into()); + }; + ($fmt:expr, $($arg:tt)+) => { + return Err(format!($fmt, $($arg)+).into()); + }; +} From abe27a061167e965b006e4313b00862b16a1ed3a Mon Sep 17 00:00:00 2001 From: Pana Date: Sun, 10 Nov 2024 18:46:51 +0800 Subject: [PATCH 16/31] add cargo check&test for no_std cfx_addr --- crates/cfx_addr/src/consts.rs | 7 +++++++ crates/cfx_addr/src/lib.rs | 7 +++++++ crates/cfx_addr/src/types.rs | 7 +++++++ crates/cfx_addr/src/utils.rs | 11 +++++++++++ crates/cfx_addr/tests/decode.rs | 7 +++++++ crates/cfx_addr/tests/encode.rs | 6 ++++++ dev-support/check-crates.sh | 1 + dev-support/test.sh | 2 +- 8 files changed, 47 insertions(+), 1 deletion(-) diff --git a/crates/cfx_addr/src/consts.rs b/crates/cfx_addr/src/consts.rs index ae8776d80..209b2f055 100644 --- a/crates/cfx_addr/src/consts.rs +++ b/crates/cfx_addr/src/consts.rs @@ -1,3 +1,10 @@ +// Copyright 2021 Conflux Foundation. All rights reserved. +// Conflux is free software and distributed under GNU General Public License. +// See http://www.gnu.org/licenses/ +// +// Modification based on https://github.com/hlb8122/rust-bitcoincash-addr in MIT License. +// A copy of the original license is included in LICENSE.rust-bitcoincash-addr. + pub const CHARSET_SIZE: usize = 32; pub const RESERVED_BITS_MASK: u8 = 0xf8; diff --git a/crates/cfx_addr/src/lib.rs b/crates/cfx_addr/src/lib.rs index 5db4046a8..0c3507f0b 100644 --- a/crates/cfx_addr/src/lib.rs +++ b/crates/cfx_addr/src/lib.rs @@ -1,3 +1,10 @@ +// Copyright 2021 Conflux Foundation. All rights reserved. +// Conflux is free software and distributed under GNU General Public License. +// See http://www.gnu.org/licenses/ +// +// Modification based on https://github.com/hlb8122/rust-bitcoincash-addr in MIT License. +// A copy of the original license is included in LICENSE.rust-bitcoincash-addr. + #![cfg_attr(not(feature = "std"), no_std)] mod consts; diff --git a/crates/cfx_addr/src/types.rs b/crates/cfx_addr/src/types.rs index c845d29e8..f96a963d5 100644 --- a/crates/cfx_addr/src/types.rs +++ b/crates/cfx_addr/src/types.rs @@ -1,3 +1,10 @@ +// Copyright 2021 Conflux Foundation. All rights reserved. +// Conflux is free software and distributed under GNU General Public License. +// See http://www.gnu.org/licenses/ +// +// Modification based on https://github.com/hlb8122/rust-bitcoincash-addr in MIT License. +// A copy of the original license is included in LICENSE.rust-bitcoincash-addr. + use crate::consts::{ ADDRESS_TYPE_BUILTIN, ADDRESS_TYPE_CONTRACT, ADDRESS_TYPE_NULL, ADDRESS_TYPE_UNKNOWN, ADDRESS_TYPE_USER, MAINNET_PREFIX, NETWORK_ID_PREFIX, diff --git a/crates/cfx_addr/src/utils.rs b/crates/cfx_addr/src/utils.rs index c71d58159..9be390b9e 100644 --- a/crates/cfx_addr/src/utils.rs +++ b/crates/cfx_addr/src/utils.rs @@ -1,3 +1,10 @@ +// Copyright 2021 Conflux Foundation. All rights reserved. +// Conflux is free software and distributed under GNU General Public License. +// See http://www.gnu.org/licenses/ +// +// Modification based on https://github.com/hlb8122/rust-bitcoincash-addr in MIT License. +// A copy of the original license is included in LICENSE.rust-bitcoincash-addr. + use crate::types::DecodingError; #[cfg(not(feature = "std"))] use alloc::vec::Vec; @@ -87,6 +94,10 @@ pub fn convert_bits( #[cfg(test)] mod tests { use super::*; + #[cfg(not(feature = "std"))] + extern crate alloc; + #[cfg(not(feature = "std"))] + use alloc::vec; #[test] fn test_expand_prefix() { diff --git a/crates/cfx_addr/tests/decode.rs b/crates/cfx_addr/tests/decode.rs index 9705a0da7..57635ba30 100644 --- a/crates/cfx_addr/tests/decode.rs +++ b/crates/cfx_addr/tests/decode.rs @@ -1,3 +1,10 @@ +// Copyright 2021 Conflux Foundation. All rights reserved. +// Conflux is free software and distributed under GNU General Public License. +// See http://www.gnu.org/licenses/ +// +// Modification based on https://github.com/hlb8122/rust-bitcoincash-addr in MIT License. +// A copy of the original license is included in LICENSE.rust-bitcoincash-addr. + use cfx_addr::*; use rustc_hex::FromHex; diff --git a/crates/cfx_addr/tests/encode.rs b/crates/cfx_addr/tests/encode.rs index 15b0e984e..ef53d2c14 100644 --- a/crates/cfx_addr/tests/encode.rs +++ b/crates/cfx_addr/tests/encode.rs @@ -1,3 +1,9 @@ +// Copyright 2021 Conflux Foundation. All rights reserved. +// Conflux is free software and distributed under GNU General Public License. +// See http://www.gnu.org/licenses/ +// +// Modification based on https://github.com/hlb8122/rust-bitcoincash-addr in MIT License. +// A copy of the original license is included in LICENSE.rust-bitcoincash-addr. use cfx_addr::*; use rustc_hex::FromHex; diff --git a/dev-support/check-crates.sh b/dev-support/check-crates.sh index f043edd21..dc641512d 100755 --- a/dev-support/check-crates.sh +++ b/dev-support/check-crates.sh @@ -3,5 +3,6 @@ grep -oP '^\s*\K[\w-]+(?=\s*=\s*{ path)' Cargo.toml | \ xargs -I {} sh -c \ 'echo "\n\033[1;36m Checking individual crate {}\033[0m\n" && \ + cargo check -p cfx-addr --no-default-features --tests && \ cargo check -p {} && \ cargo check --tests -p {}' diff --git a/dev-support/test.sh b/dev-support/test.sh index 2812d0a40..955751814 100755 --- a/dev-support/test.sh +++ b/dev-support/test.sh @@ -71,7 +71,7 @@ function check_unit_tests { pushd $ROOT_DIR > /dev/null local result result=$( - cargo test --release --all | tee /dev/stderr + cargo test --release --all && cargo test -p cfx-addr --no-default-features | tee /dev/stderr ) local exit_code=$? popd > /dev/null From 2cf3311ce2b7aa9f1be822a8013fa376907fb48b Mon Sep 17 00:00:00 2001 From: iosh Date: Mon, 11 Nov 2024 14:14:46 +0800 Subject: [PATCH 17/31] chore: format code --- crates/client/src/rpc/impls/cfx/light.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/client/src/rpc/impls/cfx/light.rs b/crates/client/src/rpc/impls/cfx/light.rs index 90a9101eb..68c7ec9a9 100644 --- a/crates/client/src/rpc/impls/cfx/light.rs +++ b/crates/client/src/rpc/impls/cfx/light.rs @@ -9,9 +9,7 @@ use cfxcore::{ block_data_manager::BlockDataManager, consensus::ConsensusConfig, errors::account_result_to_rpc_result, - light_protocol::{ - self, query_service::TxInfo, Error as LightError, - }, + light_protocol::{self, query_service::TxInfo, Error as LightError}, verification::EpochReceiptProof, ConsensusGraph, ConsensusGraphTrait, LightQueryService, PeerInfo, SharedConsensusGraph, From 1b9c11782cee939b1cfc85f545f7e6c0573fe737 Mon Sep 17 00:00:00 2001 From: Pana Date: Tue, 12 Nov 2024 10:47:03 +0800 Subject: [PATCH 18/31] unset zero gas when estimate and call --- crates/rpc/rpc-eth-types/src/transaction_request.rs | 6 +++++- crates/rpc/rpc/src/eth.rs | 4 ++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/crates/rpc/rpc-eth-types/src/transaction_request.rs b/crates/rpc/rpc-eth-types/src/transaction_request.rs index ee9f0cb1a..47b9e1279 100644 --- a/crates/rpc/rpc-eth-types/src/transaction_request.rs +++ b/crates/rpc/rpc-eth-types/src/transaction_request.rs @@ -67,10 +67,14 @@ pub struct TransactionRequest { } impl TransactionRequest { - pub fn unset_zero_gas_price(&mut self) { + pub fn unset_zero_gas_and_price(&mut self) { if self.gas_price == Some(U256::zero()) { self.gas_price = None; } + + if self.gas == Some(U256::zero()) { + self.gas = None; + } } pub fn transaction_type(&self) -> u8 { diff --git a/crates/rpc/rpc/src/eth.rs b/crates/rpc/rpc/src/eth.rs index 93b291bf4..cf2077a9a 100644 --- a/crates/rpc/rpc/src/eth.rs +++ b/crates/rpc/rpc/src/eth.rs @@ -146,8 +146,8 @@ impl EthApi { epoch => epoch.try_into()?, }; - // if gas_price is zero, it is considered as not set - request.unset_zero_gas_price(); + // if gas_price and gas is zero, it is considered as not set + request.unset_zero_gas_and_price(); let estimate_request = EstimateRequest { has_sender: request.from.is_some(), From 05304163d56ef72fa2cd65026da0d1163155aa47 Mon Sep 17 00:00:00 2001 From: Pana Date: Tue, 12 Nov 2024 14:45:39 +0800 Subject: [PATCH 19/31] change cargo check line --- dev-support/check-crates.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-support/check-crates.sh b/dev-support/check-crates.sh index dc641512d..02f55ecd5 100755 --- a/dev-support/check-crates.sh +++ b/dev-support/check-crates.sh @@ -3,6 +3,6 @@ grep -oP '^\s*\K[\w-]+(?=\s*=\s*{ path)' Cargo.toml | \ xargs -I {} sh -c \ 'echo "\n\033[1;36m Checking individual crate {}\033[0m\n" && \ - cargo check -p cfx-addr --no-default-features --tests && \ cargo check -p {} && \ cargo check --tests -p {}' +cargo check -p cfx-addr --no-default-features --tests From 16265796b40a9aebe2cd675ad6cfc328f64bc4a2 Mon Sep 17 00:00:00 2001 From: Pana Date: Tue, 12 Nov 2024 18:29:50 +0800 Subject: [PATCH 20/31] change cfx_key mod import style to mordern style --- Cargo.lock | 2 +- Cargo.toml | 4 +++- crates/cfx_key/Cargo.toml | 17 +++++++++-------- crates/cfx_key/src/brain.rs | 10 +++++----- crates/cfx_key/src/brain_prefix.rs | 3 +-- crates/cfx_key/src/brain_recover.rs | 1 + crates/cfx_key/src/crypto.rs | 13 ++++--------- crates/cfx_key/src/extended.rs | 9 +++------ crates/cfx_key/src/keccak.rs | 4 ++-- crates/cfx_key/src/keypair.rs | 3 +-- crates/cfx_key/src/lib.rs | 26 +++----------------------- crates/cfx_key/src/password.rs | 1 + crates/cfx_key/src/prefix.rs | 3 +-- crates/cfx_key/src/random.rs | 2 +- crates/cfx_key/src/secret.rs | 3 +-- crates/cfx_key/src/signature.rs | 14 ++++---------- 16 files changed, 41 insertions(+), 74 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2f05cc7c2..f7f6d9ebf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1748,7 +1748,7 @@ dependencies = [ "serde", "serde_derive", "threadpool", - "tiny-keccak 1.5.0", + "tiny-keccak 2.0.2", "zeroize", ] diff --git a/Cargo.toml b/Cargo.toml index 9bb2675e4..2fc70584e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -172,6 +172,7 @@ strum_macros = "0.20" # parallelism parking_lot = "0.11" rayon = "1.10" +threadpool = "1.7" # alloy & revm alloy-sol-types = "0.7.2" @@ -195,11 +196,12 @@ jsonrpsee = "0.24.4" # async async-trait = "0.1" -# crypto +# crypto & hash fixed-hash = "0.5" keccak-hash = "0.5" tiny-keccak = "2.0.2" bls-signatures = { git = "https://github.com/Conflux-Chain/bls-signatures.git", rev = "fb52187df92d27c365642cb7e7b2aaf60437cf9c", default-features = false, features = ["multicore", "blst"] } +secp256k1 = "0.30.0" # misc clap = "2" diff --git a/crates/cfx_key/Cargo.toml b/crates/cfx_key/Cargo.toml index ac694c29b..f8a9b365f 100644 --- a/crates/cfx_key/Cargo.toml +++ b/crates/cfx_key/Cargo.toml @@ -3,26 +3,27 @@ description = "Conflux Keys Generator" name = "cfxkey" version = "0.3.0" authors = ["Conflux Foundation"] +edition = "2021" [dependencies] cfx-types = { workspace = true } edit-distance = "2.0" parity-crypto = { workspace = true } parity-secp256k1 = { workspace = true } -lazy_static = "1.4" -log = "0.4" +lazy_static = { workspace = true } +log = { workspace = true } parity-wordlist = { workspace = true } quick-error = "1.2.2" -rand = "0.7" -rustc-hex = "2.1" -serde = "1.0" -serde_derive = "1.0" -tiny-keccak = "1.4" +rand = { workspace = true } +rustc-hex = { workspace = true } +serde = { workspace = true } +serde_derive = { workspace = true } +tiny-keccak = { workspace = true } zeroize = "1.0.0" malloc_size_of_derive = { workspace = true } malloc_size_of = { workspace = true } docopt = "1.0" env_logger = "0.5" panic_hook = { workspace = true } -threadpool = "1.7" +threadpool = { workspace = true } diff --git a/crates/cfx_key/src/brain.rs b/crates/cfx_key/src/brain.rs index 52070344a..f2092916f 100644 --- a/crates/cfx_key/src/brain.rs +++ b/crates/cfx_key/src/brain.rs @@ -15,7 +15,8 @@ // along with Parity Ethereum. If not, see . use super::{Generator, KeyPair, Secret}; -use keccak::Keccak256; +use crate::keccak::Keccak256; +use log::trace; use parity_wordlist; /// Simple brainwallet. @@ -26,13 +27,13 @@ impl Brain { pub fn validate_phrase( phrase: &str, expected_words: usize, - ) -> Result<(), ::WordlistError> { + ) -> Result<(), crate::WordlistError> { parity_wordlist::validate_phrase(phrase, expected_words) } } impl Generator for Brain { - type Error = ::Void; + type Error = crate::Void; fn generate(&mut self) -> Result { let seed = self.0.clone(); @@ -65,8 +66,7 @@ impl Generator for Brain { #[cfg(test)] mod tests { - use Brain; - use Generator; + use crate::{Brain, Generator}; #[test] fn test_brain() { diff --git a/crates/cfx_key/src/brain_prefix.rs b/crates/cfx_key/src/brain_prefix.rs index b8c58b09e..733aa07d7 100644 --- a/crates/cfx_key/src/brain_prefix.rs +++ b/crates/cfx_key/src/brain_prefix.rs @@ -57,8 +57,7 @@ impl Generator for BrainPrefix { #[cfg(test)] mod tests { - use BrainPrefix; - use Generator; + use crate::{BrainPrefix, Generator}; #[test] fn prefix_generator() { diff --git a/crates/cfx_key/src/brain_recover.rs b/crates/cfx_key/src/brain_recover.rs index 657d0e2d7..4cee87980 100644 --- a/crates/cfx_key/src/brain_recover.rs +++ b/crates/cfx_key/src/brain_recover.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use log::{info, trace}; use std::collections::HashSet; use edit_distance::edit_distance; diff --git a/crates/cfx_key/src/crypto.rs b/crates/cfx_key/src/crypto.rs index ec3278177..4af5c2f23 100644 --- a/crates/cfx_key/src/crypto.rs +++ b/crates/cfx_key/src/crypto.rs @@ -15,6 +15,7 @@ // along with Parity Ethereum. If not, see . use parity_crypto::error::SymmError; +use quick_error::quick_error; use secp256k1; use std::io; @@ -44,10 +45,8 @@ quick_error! { /// ECDH functions pub mod ecdh { use super::Error; + use crate::{Public, Secret, SECP256K1}; use secp256k1::{self, ecdh, key}; - use Public; - use Secret; - use SECP256K1; /// Agree on a shared secret pub fn agree(secret: &Secret, public: &Public) -> Result { @@ -70,12 +69,9 @@ pub mod ecdh { /// ECIES function pub mod ecies { use super::{ecdh, Error}; + use crate::{Generator, Public, Random, Secret}; use cfx_types::H128; use parity_crypto::{aes, digest, hmac, is_equal}; - use Generator; - use Public; - use Random; - use Secret; /// Encrypt a message with a public key, writing an HMAC covering both /// the plaintext and authenticated data. @@ -183,8 +179,7 @@ pub mod ecies { #[cfg(test)] mod tests { use super::ecies; - use Generator; - use Random; + use crate::{Generator, Random}; #[test] fn ecies_shared() { diff --git a/crates/cfx_key/src/extended.rs b/crates/cfx_key/src/extended.rs index 4d37c575d..979cd408b 100644 --- a/crates/cfx_key/src/extended.rs +++ b/crates/cfx_key/src/extended.rs @@ -17,9 +17,8 @@ //! Extended keys pub use self::derivation::Error as DerivationError; +use crate::{secret::Secret, Public}; use cfx_types::H256; -use secret::Secret; -use Public; /// Represents label that can be stored as a part of key derivation pub trait Label { @@ -206,13 +205,11 @@ impl ExtendedKeyPair { // https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki mod derivation { use super::{Derivation, Label}; + use crate::{keccak, math::curve_order, SECP256K1}; use cfx_types::{BigEndianHash, H256, H512, U256, U512}; - use keccak; - use math::curve_order; use parity_crypto::hmac; use secp256k1::key::{PublicKey, SecretKey}; use std::convert::TryInto; - use SECP256K1; #[derive(Debug)] pub enum Error { @@ -403,8 +400,8 @@ mod tests { use super::{ derivation, Derivation, ExtendedKeyPair, ExtendedPublic, ExtendedSecret, }; + use crate::secret::Secret; use cfx_types::{H128, H256, H512}; - use secret::Secret; use std::str::FromStr; fn master_chain_basic() -> (H256, H256) { diff --git a/crates/cfx_key/src/keccak.rs b/crates/cfx_key/src/keccak.rs index 56b2ff0ff..eec03b176 100644 --- a/crates/cfx_key/src/keccak.rs +++ b/crates/cfx_key/src/keccak.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use tiny_keccak::Keccak; +use tiny_keccak::{Keccak, Hasher}; pub trait Keccak256 { fn keccak256(&self) -> T @@ -23,7 +23,7 @@ pub trait Keccak256 { impl Keccak256<[u8; 32]> for [u8] { fn keccak256(&self) -> [u8; 32] { - let mut keccak = Keccak::new_keccak256(); + let mut keccak = Keccak::v256(); let mut result = [0u8; 32]; keccak.update(self); keccak.finalize(&mut result); diff --git a/crates/cfx_key/src/keypair.rs b/crates/cfx_key/src/keypair.rs index 0aafcfa58..e8436362c 100644 --- a/crates/cfx_key/src/keypair.rs +++ b/crates/cfx_key/src/keypair.rs @@ -103,9 +103,8 @@ impl KeyPair { #[cfg(test)] mod tests { + use crate::{KeyPair, Secret}; use std::str::FromStr; - use KeyPair; - use Secret; #[test] fn from_secret() { diff --git a/crates/cfx_key/src/lib.rs b/crates/cfx_key/src/lib.rs index 730516c99..7295bc59c 100644 --- a/crates/cfx_key/src/lib.rs +++ b/crates/cfx_key/src/lib.rs @@ -16,28 +16,6 @@ // #![warn(missing_docs)] -extern crate cfx_types; -extern crate edit_distance; -extern crate parity_crypto; -extern crate parity_wordlist; -#[macro_use] -extern crate quick_error; -extern crate malloc_size_of; -extern crate malloc_size_of_derive; -extern crate rand; -extern crate rustc_hex; -extern crate secp256k1; -extern crate serde; -extern crate tiny_keccak; -extern crate zeroize; - -#[macro_use] -extern crate lazy_static; -#[macro_use] -extern crate log; -#[macro_use] -extern crate serde_derive; - mod brain; mod brain_prefix; mod error; @@ -54,6 +32,9 @@ pub mod brain_recover; pub mod crypto; pub mod math; +use lazy_static::lazy_static; +pub use parity_wordlist::Error as WordlistError; + pub use self::{ brain::Brain, brain_prefix::BrainPrefix, @@ -64,7 +45,6 @@ pub use self::{ }, keypair::{is_compatible_public, public_to_address, KeyPair}, math::public_is_valid, - parity_wordlist::Error as WordlistError, password::Password, prefix::Prefix, random::Random, diff --git a/crates/cfx_key/src/password.rs b/crates/cfx_key/src/password.rs index b0ca63faa..7d3159450 100644 --- a/crates/cfx_key/src/password.rs +++ b/crates/cfx_key/src/password.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use serde::{Deserialize, Serialize}; use std::{fmt, ptr}; #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] diff --git a/crates/cfx_key/src/prefix.rs b/crates/cfx_key/src/prefix.rs index 492f45ff4..63e9435f0 100644 --- a/crates/cfx_key/src/prefix.rs +++ b/crates/cfx_key/src/prefix.rs @@ -45,8 +45,7 @@ impl Generator for Prefix { #[cfg(test)] mod tests { - use Generator; - use Prefix; + use crate::{Generator, Prefix}; #[test] fn prefix_generator() { diff --git a/crates/cfx_key/src/random.rs b/crates/cfx_key/src/random.rs index e703c1e0e..54952f371 100644 --- a/crates/cfx_key/src/random.rs +++ b/crates/cfx_key/src/random.rs @@ -32,7 +32,7 @@ impl Generator for Random { } impl Generator for OsRng { - type Error = ::Void; + type Error = crate::Void; fn generate(&mut self) -> Result { let (sec, publ) = SECP256K1 diff --git a/crates/cfx_key/src/secret.rs b/crates/cfx_key/src/secret.rs index 9f29f962e..ac17dcab0 100644 --- a/crates/cfx_key/src/secret.rs +++ b/crates/cfx_key/src/secret.rs @@ -14,13 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use crate::{Error, SECP256K1}; use cfx_types::H256; use malloc_size_of_derive::MallocSizeOf as DeriveMallocSizeOf; use secp256k1::{constants::SECRET_KEY_SIZE as SECP256K1_SECRET_KEY_SIZE, key}; use std::{fmt, ops::Deref, str::FromStr}; use zeroize::Zeroize; -use Error; -use SECP256K1; #[derive(Clone, PartialEq, Eq, DeriveMallocSizeOf)] pub struct Secret { diff --git a/crates/cfx_key/src/signature.rs b/crates/cfx_key/src/signature.rs index b4d5e2a25..b5e0cf24f 100644 --- a/crates/cfx_key/src/signature.rs +++ b/crates/cfx_key/src/signature.rs @@ -14,8 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . +use crate::{ + public_to_address, Address, Error, Message, Public, Secret, SECP256K1, +}; use cfx_types::{H256, H520}; -use public_to_address; use rustc_hex::{FromHex, ToHex}; use secp256k1::{ key::{PublicKey, SecretKey}, @@ -29,12 +31,6 @@ use std::{ ops::{Deref, DerefMut}, str::FromStr, }; -use Address; -use Error; -use Message; -use Public; -use Secret; -use SECP256K1; /// Signature encoded as RSV components #[repr(C)] @@ -258,10 +254,8 @@ pub fn recover( #[cfg(test)] mod tests { use super::{recover, sign, verify_address, verify_public, Signature}; + use crate::{Generator, Message, Random}; use std::str::FromStr; - use Generator; - use Message; - use Random; #[test] fn vrs_conversion() { From 32816cf691e80cd7db53c70d75e244325aa60e8c Mon Sep 17 00:00:00 2001 From: Pana Date: Tue, 12 Nov 2024 18:44:12 +0800 Subject: [PATCH 21/31] adapt workspace dependent style --- Cargo.lock | 4 +--- Cargo.toml | 2 ++ crates/cfx_key/Cargo.toml | 8 +++----- crates/cfx_key/src/crypto.rs | 32 ++++++++++---------------------- crates/cfx_key/src/keccak.rs | 2 +- 5 files changed, 17 insertions(+), 31 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f7f6d9ebf..9450150ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1731,9 +1731,7 @@ name = "cfxkey" version = "0.3.0" dependencies = [ "cfx-types", - "docopt", "edit-distance", - "env_logger", "lazy_static", "log", "malloc_size_of", @@ -1742,11 +1740,11 @@ dependencies = [ "parity-crypto", "parity-secp256k1", "parity-wordlist", - "quick-error", "rand 0.7.3", "rustc-hex", "serde", "serde_derive", + "thiserror", "threadpool", "tiny-keccak 2.0.2", "zeroize", diff --git a/Cargo.toml b/Cargo.toml index 2fc70584e..754ffff52 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -212,6 +212,8 @@ itertools = "0.10.0" once_cell = "1.17.1" chrono = "=0.4.38" byteorder = "1.2.7" +edit-distance = "2" +zeroize = "1" # conflux forked crates rocksdb = { git = "https://github.com/Conflux-Chain/rust-rocksdb.git", rev = "3773afe5b953997188f37c39308105b5deb0faac" } diff --git a/crates/cfx_key/Cargo.toml b/crates/cfx_key/Cargo.toml index f8a9b365f..3bce1c2b2 100644 --- a/crates/cfx_key/Cargo.toml +++ b/crates/cfx_key/Cargo.toml @@ -7,23 +7,21 @@ edition = "2021" [dependencies] cfx-types = { workspace = true } -edit-distance = "2.0" +edit-distance = { workspace = true } parity-crypto = { workspace = true } parity-secp256k1 = { workspace = true } lazy_static = { workspace = true } log = { workspace = true } parity-wordlist = { workspace = true } -quick-error = "1.2.2" rand = { workspace = true } rustc-hex = { workspace = true } serde = { workspace = true } serde_derive = { workspace = true } tiny-keccak = { workspace = true } -zeroize = "1.0.0" +zeroize = { workspace = true } malloc_size_of_derive = { workspace = true } malloc_size_of = { workspace = true } -docopt = "1.0" -env_logger = "0.5" panic_hook = { workspace = true } threadpool = { workspace = true } +thiserror = { workspace = true } diff --git a/crates/cfx_key/src/crypto.rs b/crates/cfx_key/src/crypto.rs index 4af5c2f23..8fe362413 100644 --- a/crates/cfx_key/src/crypto.rs +++ b/crates/cfx_key/src/crypto.rs @@ -15,31 +15,19 @@ // along with Parity Ethereum. If not, see . use parity_crypto::error::SymmError; -use quick_error::quick_error; use secp256k1; use std::io; -quick_error! { - #[derive(Debug)] - pub enum Error { - Secp(e: secp256k1::Error) { - display("secp256k1 error: {}", e) - cause(e) - from() - } - Io(e: io::Error) { - display("i/o error: {}", e) - cause(e) - from() - } - InvalidMessage { - display("invalid message") - } - Symm(e: SymmError) { - cause(e) - from() - } - } +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("secp256k1 error: {0}")] + Secp(#[from] secp256k1::Error), + #[error("i/o error: {0}")] + Io(#[from] io::Error), + #[error("invalid message")] + InvalidMessage, + #[error(transparent)] + Symm(#[from] SymmError), } /// ECDH functions diff --git a/crates/cfx_key/src/keccak.rs b/crates/cfx_key/src/keccak.rs index eec03b176..9b46af11e 100644 --- a/crates/cfx_key/src/keccak.rs +++ b/crates/cfx_key/src/keccak.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use tiny_keccak::{Keccak, Hasher}; +use tiny_keccak::{Hasher, Keccak}; pub trait Keccak256 { fn keccak256(&self) -> T From d40d3e126e6a0701d18c776cf3db77ddaaf74d48 Mon Sep 17 00:00:00 2001 From: Pana Date: Wed, 13 Nov 2024 11:26:28 +0800 Subject: [PATCH 22/31] add cfxkey unit tests --- crates/cfx_key/src/brain.rs | 6 ++--- crates/cfx_key/src/brain_prefix.rs | 6 ++--- crates/cfx_key/src/brain_recover.rs | 2 +- crates/cfx_key/src/crypto.rs | 29 +++++++++++++++++++---- crates/cfx_key/src/lib.rs | 3 ++- crates/cfx_key/src/math.rs | 2 +- crates/cfx_key/src/prefix.rs | 6 ++--- crates/cfx_key/src/random.rs | 6 ++--- crates/cfx_key/src/secret.rs | 36 ++++++++++++++++++++++++++++- crates/cfx_key/src/signature.rs | 2 +- 10 files changed, 77 insertions(+), 21 deletions(-) diff --git a/crates/cfx_key/src/brain.rs b/crates/cfx_key/src/brain.rs index f2092916f..16069fb54 100644 --- a/crates/cfx_key/src/brain.rs +++ b/crates/cfx_key/src/brain.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use super::{Generator, KeyPair, Secret}; +use super::{KeyPair, KeyPairGenerator, Secret}; use crate::keccak::Keccak256; use log::trace; use parity_wordlist; @@ -32,7 +32,7 @@ impl Brain { } } -impl Generator for Brain { +impl KeyPairGenerator for Brain { type Error = crate::Void; fn generate(&mut self) -> Result { @@ -66,7 +66,7 @@ impl Generator for Brain { #[cfg(test)] mod tests { - use crate::{Brain, Generator}; + use crate::{Brain, KeyPairGenerator}; #[test] fn test_brain() { diff --git a/crates/cfx_key/src/brain_prefix.rs b/crates/cfx_key/src/brain_prefix.rs index 733aa07d7..0ef2b38b2 100644 --- a/crates/cfx_key/src/brain_prefix.rs +++ b/crates/cfx_key/src/brain_prefix.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use super::{Brain, Error, Generator, KeyPair}; +use super::{Brain, Error, KeyPair, KeyPairGenerator}; use parity_wordlist as wordlist; /// Tries to find brain-seed keypair with address starting with given prefix. @@ -38,7 +38,7 @@ impl BrainPrefix { pub fn phrase(&self) -> &str { &self.last_phrase } } -impl Generator for BrainPrefix { +impl KeyPairGenerator for BrainPrefix { type Error = Error; fn generate(&mut self) -> Result { @@ -57,7 +57,7 @@ impl Generator for BrainPrefix { #[cfg(test)] mod tests { - use crate::{BrainPrefix, Generator}; + use crate::{BrainPrefix, KeyPairGenerator}; #[test] fn prefix_generator() { diff --git a/crates/cfx_key/src/brain_recover.rs b/crates/cfx_key/src/brain_recover.rs index 4cee87980..6fec697cd 100644 --- a/crates/cfx_key/src/brain_recover.rs +++ b/crates/cfx_key/src/brain_recover.rs @@ -20,7 +20,7 @@ use std::collections::HashSet; use edit_distance::edit_distance; use parity_wordlist; -use super::{Address, Brain, Generator}; +use super::{Address, Brain, KeyPairGenerator}; /// Tries to find a phrase for address, given the number /// of expected words and a partial phrase. diff --git a/crates/cfx_key/src/crypto.rs b/crates/cfx_key/src/crypto.rs index 8fe362413..832640c28 100644 --- a/crates/cfx_key/src/crypto.rs +++ b/crates/cfx_key/src/crypto.rs @@ -15,7 +15,6 @@ // along with Parity Ethereum. If not, see . use parity_crypto::error::SymmError; -use secp256k1; use std::io; #[derive(Debug, thiserror::Error)] @@ -57,7 +56,7 @@ pub mod ecdh { /// ECIES function pub mod ecies { use super::{ecdh, Error}; - use crate::{Generator, Public, Random, Secret}; + use crate::{KeyPairGenerator, Public, Random, Secret}; use cfx_types::H128; use parity_crypto::{aes, digest, hmac, is_equal}; @@ -166,8 +165,9 @@ pub mod ecies { #[cfg(test)] mod tests { - use super::ecies; - use crate::{Generator, Random}; + use super::{ecdh, ecies}; + use crate::{KeyPairGenerator, Public, Random, Secret}; + use std::str::FromStr; #[test] fn ecies_shared() { @@ -185,4 +185,25 @@ mod tests { ecies::decrypt(kp.secret(), shared, &encrypted).unwrap(); assert_eq!(decrypted[..message.len()], message[..]); } + + #[test] + fn ecdh_agree() { + /* + kp1: KeyPair { secret: 0x3d6c3a910832105febef6f8111b51b11e6cb190fb45b5fc70ee6290c411e9a09, public: 0x057c7d5b963cb4605c3e0c4d5cbefd2a31fb3877e481172d6225a77e0a5964a0112f123aaee2d42f6bec55b396564ffcbd188c799f905253c9394642447063b0 } + kp2: KeyPair { secret: 0x6da0008f5531966a9637266fd180ca66e2643920a2d60d4c34350e25f0ccda98, public: 0x4cf74522f3c86d88cd2ba56b378d3fccd4ba3fe93fe4e11ebecc24b06085fc37ee63073aa998693cf2573dc9a437ac0a94d9093054419d23390bad2329ee5eee } + */ + let secret = Secret::from_str( + "3d6c3a910832105febef6f8111b51b11e6cb190fb45b5fc70ee6290c411e9a09", + ) + .unwrap(); + let publ = Public::from_str("4cf74522f3c86d88cd2ba56b378d3fccd4ba3fe93fe4e11ebecc24b06085fc37ee63073aa998693cf2573dc9a437ac0a94d9093054419d23390bad2329ee5eee").unwrap(); + + let agree_secret = ecdh::agree(&secret, &publ).unwrap(); + + let expected = Secret::from_str( + "c6440592fa14256dbbc39639b77524e51bac84b64fa1b1726130a49263f1fb6f", + ) + .unwrap(); + assert_eq!(agree_secret, expected); + } } diff --git a/crates/cfx_key/src/lib.rs b/crates/cfx_key/src/lib.rs index 7295bc59c..eb0d9cac7 100644 --- a/crates/cfx_key/src/lib.rs +++ b/crates/cfx_key/src/lib.rs @@ -50,6 +50,7 @@ pub use self::{ random::Random, secret::Secret, signature::{recover, sign, verify_address, verify_public, Signature}, + KeyPairGenerator as Generator, }; use cfx_types::H256; @@ -67,7 +68,7 @@ lazy_static! { pub enum Void {} /// Generates new keypair. -pub trait Generator { +pub trait KeyPairGenerator { type Error; /// Should be called to generate new keypair. diff --git a/crates/cfx_key/src/math.rs b/crates/cfx_key/src/math.rs index 497fc3689..9199f4861 100644 --- a/crates/cfx_key/src/math.rs +++ b/crates/cfx_key/src/math.rs @@ -104,7 +104,7 @@ fn set_public(public: &mut Public, key_public: &key::PublicKey) { #[cfg(test)] mod tests { use super::{ - super::{Generator, Random}, + super::{KeyPairGenerator, Random}, public_add, public_sub, }; diff --git a/crates/cfx_key/src/prefix.rs b/crates/cfx_key/src/prefix.rs index 63e9435f0..8880af7dd 100644 --- a/crates/cfx_key/src/prefix.rs +++ b/crates/cfx_key/src/prefix.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use super::{Error, Generator, KeyPair, Random}; +use super::{Error, KeyPair, KeyPairGenerator, Random}; /// Tries to find keypair with address starting with given prefix. pub struct Prefix { @@ -28,7 +28,7 @@ impl Prefix { } } -impl Generator for Prefix { +impl KeyPairGenerator for Prefix { type Error = Error; fn generate(&mut self) -> Result { @@ -45,7 +45,7 @@ impl Generator for Prefix { #[cfg(test)] mod tests { - use crate::{Generator, Prefix}; + use crate::{KeyPairGenerator, Prefix}; #[test] fn prefix_generator() { diff --git a/crates/cfx_key/src/random.rs b/crates/cfx_key/src/random.rs index 54952f371..69f7df8f6 100644 --- a/crates/cfx_key/src/random.rs +++ b/crates/cfx_key/src/random.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -use super::{Generator, KeyPair, SECP256K1}; +use super::{KeyPair, KeyPairGenerator, SECP256K1}; use rand::rngs::OsRng; /// Randomly generates new keypair, instantiating the RNG each time. pub struct Random; -impl Generator for Random { +impl KeyPairGenerator for Random { type Error = ::std::io::Error; fn generate(&mut self) -> Result { @@ -31,7 +31,7 @@ impl Generator for Random { } } -impl Generator for OsRng { +impl KeyPairGenerator for OsRng { type Error = crate::Void; fn generate(&mut self) -> Result { diff --git a/crates/cfx_key/src/secret.rs b/crates/cfx_key/src/secret.rs index ac17dcab0..5a48a4bfc 100644 --- a/crates/cfx_key/src/secret.rs +++ b/crates/cfx_key/src/secret.rs @@ -253,7 +253,7 @@ impl Deref for Secret { #[cfg(test)] mod tests { use super::{ - super::{Generator, Random}, + super::{KeyPairGenerator, Random}, Secret, }; use std::str::FromStr; @@ -301,4 +301,38 @@ mod tests { pow3_expected.mul(&secret).unwrap(); assert_eq!(pow3, pow3_expected); } + + #[test] + fn secret_sub_and_add() { + let secret = Random.generate().unwrap().secret().clone(); + let secret_one = Secret::from_str( + "0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(); + + let mut sub1 = secret.clone(); + sub1.sub(&secret_one).unwrap(); + + let mut dec1 = secret.clone(); + dec1.dec().unwrap(); + + assert_eq!(sub1, dec1); + + let mut add1 = sub1.clone(); + add1.add(&secret_one).unwrap(); + assert_eq!(add1, secret); + } + + #[test] + fn secret_neg() { + let secret_one = Secret::from_str( + "0000000000000000000000000000000000000000000000000000000000000001", + ) + .unwrap(); + let minus_one = Secret::from(secp256k1::key::MINUS_ONE_KEY); + + let mut inv1 = secret_one.clone(); + inv1.neg().unwrap(); + assert_eq!(inv1, minus_one); + } } diff --git a/crates/cfx_key/src/signature.rs b/crates/cfx_key/src/signature.rs index b5e0cf24f..2cd9fdfde 100644 --- a/crates/cfx_key/src/signature.rs +++ b/crates/cfx_key/src/signature.rs @@ -254,7 +254,7 @@ pub fn recover( #[cfg(test)] mod tests { use super::{recover, sign, verify_address, verify_public, Signature}; - use crate::{Generator, Message, Random}; + use crate::{KeyPairGenerator, Message, Random}; use std::str::FromStr; #[test] From ee3c12c8ac234d83ec684506fc1ee02572875b11 Mon Sep 17 00:00:00 2001 From: Pana Date: Wed, 13 Nov 2024 11:34:47 +0800 Subject: [PATCH 23/31] change bins/cfx_key dependency import style to mordern style --- Cargo.toml | 2 ++ bins/cfx_key/Cargo.toml | 17 +++++++++-------- {crates => bins}/cfx_key/README.md | 0 bins/cfx_key/src/main.rs | 13 +------------ 4 files changed, 12 insertions(+), 20 deletions(-) rename {crates => bins}/cfx_key/README.md (100%) diff --git a/Cargo.toml b/Cargo.toml index 754ffff52..4591d49a9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -214,6 +214,8 @@ chrono = "=0.4.38" byteorder = "1.2.7" edit-distance = "2" zeroize = "1" +docopt = "1.0" +env_logger = "0.5" # conflux forked crates rocksdb = { git = "https://github.com/Conflux-Chain/rust-rocksdb.git", rev = "3773afe5b953997188f37c39308105b5deb0faac" } diff --git a/bins/cfx_key/Cargo.toml b/bins/cfx_key/Cargo.toml index 0d77ad2d5..84b0fb0ba 100644 --- a/bins/cfx_key/Cargo.toml +++ b/bins/cfx_key/Cargo.toml @@ -1,19 +1,20 @@ [package] -description = "Parity Ethereum Keys Generator CLI" +description = "Conflux Core Space Keys Generator CLI" name = "ctxkey-cli" version = "0.1.0" authors = ["Parity Technologies "] +edition = "2021" [dependencies] -docopt = "1.0" -env_logger = "0.5" +docopt = { workspace = true } +env_logger = { workspace = true } cfxkey = { workspace = true } panic_hook = { workspace = true } -parity-wordlist={ workspace = true } -rustc-hex = "2.1" -serde = "1.0" -serde_derive = "1.0" -threadpool = "1.7" +parity-wordlist= { workspace = true } +rustc-hex = { workspace = true } +serde = { workspace = true } +serde_derive = { workspace = true } +threadpool = { workspace = true } [[bin]] name = "cfxkey" diff --git a/crates/cfx_key/README.md b/bins/cfx_key/README.md similarity index 100% rename from crates/cfx_key/README.md rename to bins/cfx_key/README.md diff --git a/bins/cfx_key/src/main.rs b/bins/cfx_key/src/main.rs index 96993f73e..64ba96175 100644 --- a/bins/cfx_key/src/main.rs +++ b/bins/cfx_key/src/main.rs @@ -18,18 +18,6 @@ // You should have received a copy of the GNU General Public License // along with Parity Ethereum. If not, see . -extern crate cfxkey; -extern crate docopt; -extern crate env_logger; -extern crate panic_hook; -extern crate parity_wordlist; -extern crate rustc_hex; -extern crate serde; -extern crate threadpool; - -#[macro_use] -extern crate serde_derive; - use std::{env, fmt, io, num::ParseIntError, process, sync}; use cfxkey::{ @@ -38,6 +26,7 @@ use cfxkey::{ }; use docopt::Docopt; use rustc_hex::{FromHex, FromHexError}; +use serde::Deserialize; const USAGE: &str = r#" Conflux keys generator. From 4cf7e5e109b9336e34ad85c3e5795a6f3ea6dfc8 Mon Sep 17 00:00:00 2001 From: Pana Date: Thu, 14 Nov 2024 11:43:12 +0800 Subject: [PATCH 24/31] add base_fee_per_gas and custom to pubsub block header --- crates/client/src/rpc/types/cfx/block.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/crates/client/src/rpc/types/cfx/block.rs b/crates/client/src/rpc/types/cfx/block.rs index 6cfa62f5f..77c60c4ad 100644 --- a/crates/client/src/rpc/types/cfx/block.rs +++ b/crates/client/src/rpc/types/cfx/block.rs @@ -427,11 +427,13 @@ pub struct Header { pub block_number: Option, /// Gas Limit pub gas_limit: U256, + /// Base fee + #[serde(skip_serializing_if = "Option::is_none")] + pub base_fee_per_gas: Option, /// Timestamp pub timestamp: U256, /// Difficulty pub difficulty: U256, - // TODO: We should change python test script and remove this field /// PoW Quality pub pow_quality: Option, /// Referee hashes @@ -440,6 +442,8 @@ pub struct Header { pub adaptive: bool, /// Nonce of the block pub nonce: U256, + /// Custom field + pub custom: Vec, /// PoS reference. pub pos_reference: Option, } @@ -458,6 +462,9 @@ impl Header { let block_number = consensus.get_block_number(&hash)?.map(Into::into); + let base_fee_per_gas: Option = + h.base_price().map(|x| x[Space::Native]).into(); + let referee_hashes = h.referee_hashes().iter().map(|x| H256::from(*x)).collect(); @@ -474,6 +481,7 @@ impl Header { epoch_number, block_number, gas_limit: h.gas_limit().into(), + base_fee_per_gas, timestamp: h.timestamp().into(), difficulty: h.difficulty().into(), adaptive: h.adaptive(), @@ -481,9 +489,9 @@ impl Header { nonce: h.nonce().into(), pow_quality: h.pow_hash.map(|pow_hash| { pow::pow_hash_to_quality(&pow_hash, &h.nonce()) - }), /* TODO(thegaram): - * include custom */ + }), pos_reference: *h.pos_reference(), + custom: h.custom().clone().into_iter().map(Into::into).collect(), }) } } @@ -619,6 +627,8 @@ mod tests { transactions_root: KECCAK_EMPTY_LIST_RLP.into(), epoch_number: None, block_number: None, + base_fee_per_gas: None, + custom: vec![], gas_limit: U256::default(), timestamp: 0.into(), difficulty: U256::default(), @@ -632,7 +642,7 @@ mod tests { assert_eq!( serialized_header, - r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","height":"0x0","miner":"CFX:TYPE.NULL:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA0SFBNJM2","deferredStateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","deferredReceiptsRoot":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","deferredLogsBloomHash":"0xd397b3b043d87fcd6fad1291ff0bfd16401c274896d8c63a923727f077b8e0b5","blame":"0x0","transactionsRoot":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","epochNumber":null,"blockNumber":null,"gasLimit":"0x0","timestamp":"0x0","difficulty":"0x0","powQuality":null,"refereeHashes":[],"adaptive":false,"nonce":"0x0","posReference":null}"# + r#"{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000","height":"0x0","miner":"CFX:TYPE.NULL:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA0SFBNJM2","deferredStateRoot":"0x0000000000000000000000000000000000000000000000000000000000000000","deferredReceiptsRoot":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","deferredLogsBloomHash":"0xd397b3b043d87fcd6fad1291ff0bfd16401c274896d8c63a923727f077b8e0b5","blame":"0x0","transactionsRoot":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347","epochNumber":null,"blockNumber":null,"gasLimit":"0x0","timestamp":"0x0","difficulty":"0x0","powQuality":null,"refereeHashes":[],"adaptive":false,"nonce":"0x0","custom":[],"posReference":null}"# ); } } From 187ed1659922c28f7fdb816dc18a02a63cb450ae Mon Sep 17 00:00:00 2001 From: Pana Date: Thu, 14 Nov 2024 12:33:59 +0800 Subject: [PATCH 25/31] update core space block.gasUsed to return null if block is not executed --- changelogs/JSONRPC.md | 5 +++++ crates/client/Cargo.toml | 2 +- crates/client/src/rpc/types/cfx/block.rs | 4 +--- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/changelogs/JSONRPC.md b/changelogs/JSONRPC.md index 00b8b6aed..332ec7d26 100644 --- a/changelogs/JSONRPC.md +++ b/changelogs/JSONRPC.md @@ -1,5 +1,10 @@ # JSON-RPC CHANGELOG +## vNext + +1. The gasUsed field of Core Space block will return `null` if the block is not executed. +2. Core Space pubsub block header notification add two new fields: `baseFeePerGas` and `custom`. + ## v2.4.1 1. eSpace add new RPC method `eth_getBlockReceipts` diff --git a/crates/client/Cargo.toml b/crates/client/Cargo.toml index 261c2c12d..007e77607 100644 --- a/crates/client/Cargo.toml +++ b/crates/client/Cargo.toml @@ -57,7 +57,7 @@ rand_08 = {package = "rand", version = "0.8.0"} kvdb-rocksdb = { workspace = true } tempdir = "0.3" rustc-hex = { workspace = true } -threadpool = "1.0" +threadpool = { workspace = true } metrics = { workspace = true } delegate = { workspace = true } transient-hashmap = "0.4" diff --git a/crates/client/src/rpc/types/cfx/block.rs b/crates/client/src/rpc/types/cfx/block.rs index 77c60c4ad..ccaf08011 100644 --- a/crates/client/src/rpc/types/cfx/block.rs +++ b/crates/client/src/rpc/types/cfx/block.rs @@ -160,9 +160,7 @@ impl Block { // get the block.gas_used let tx_len = b.transactions.len(); - let (gas_used, transactions) = if tx_len == 0 { - (Some(U256::from(0)), BlockTransactions::Hashes(vec![])) - } else { + let (gas_used, transactions) = { let maybe_results = consensus_inner .block_execution_results_by_hash( &b.hash(), From 611958f8be856495b7d1fb0fb9d9dcf96aa3bacd Mon Sep 17 00:00:00 2001 From: Pana Date: Thu, 14 Nov 2024 15:04:39 +0800 Subject: [PATCH 26/31] set tests block.gasUsed to none to keep same with light block --- tests/light/rpc_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/light/rpc_test.py b/tests/light/rpc_test.py index 6f2216a34..3ff560b0c 100755 --- a/tests/light/rpc_test.py +++ b/tests/light/rpc_test.py @@ -472,7 +472,7 @@ def assert_blocks_equal(self, light_block, block): # full nodes will use '0x0' for empty blocks and None # for transactions not executed yet - block['gasUsed'] = '0x0' if block['gasUsed'] == '0x0' else None + block['gasUsed'] = None for tx in block['transactions']: if type(tx) is not dict: continue From 2b2c4d40443750831419ef297eb6dca01d54a90c Mon Sep 17 00:00:00 2001 From: Pana Date: Fri, 15 Nov 2024 14:34:14 +0800 Subject: [PATCH 27/31] op collection assert equal to one by one --- tests/tx_consistency_test.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tests/tx_consistency_test.py b/tests/tx_consistency_test.py index a5db4ce78..79c0162ed 100755 --- a/tests/tx_consistency_test.py +++ b/tests/tx_consistency_test.py @@ -60,6 +60,7 @@ def test(self, num_senders, num_receivers, num_txs): self.check_with_rpc(client.epoch_number) self.check_with_rpc(client.best_block_hash) self.check_with_rpc(client.gas_price) + time.sleep(5) self.check_with_rpc(client.chain, True) # check receipt @@ -198,7 +199,12 @@ def check_with_rpc(self, client_rpc, collection_result=False): for name in dir(client): if name == client_rpc.__name__: value = getattr(client, name)() - assert_equal(value, expected_value) + if collection_result: + assert_equal(len(value), len(expected_value)) + for idy in range(len(value) - 7): + assert_equal(value[idy], expected_value[idy]) + else: + assert_equal(value, expected_value) if collection_result: self.log.info("check RPC: API = {}, Len = {}".format(client_rpc.__name__, len(expected_value))) From 679cf00e2e330db43e6abe07ada44fc2ed1cb9d1 Mon Sep 17 00:00:00 2001 From: darwintree <17946284+darwintree@users.noreply.github.com> Date: Thu, 14 Nov 2024 16:52:15 +0800 Subject: [PATCH 28/31] refactor: remove asyncore in favor of asyncio --- tests/conflux/messages.py | 1 - tests/crash_archive_era150_test.py | 1 - tests/crash_archive_test.py | 1 - tests/crash_test.py | 1 - tests/expire_block_test.py | 1 - tests/invalid_block_sync_test.py | 1 - tests/invalid_message_test.py | 1 - tests/message_test.py | 10 +- tests/network_tests/handshake.py | 3 +- tests/network_tests/node_reputation_test.py | 1 - tests/network_tests/session_ip_limit_test.py | 5 +- tests/test_framework/mininode.py | 182 ++++++------------- tests/test_framework/test_framework.py | 9 +- tests/test_framework/test_node.py | 3 +- 14 files changed, 76 insertions(+), 144 deletions(-) diff --git a/tests/conflux/messages.py b/tests/conflux/messages.py index c8c6ff8dd..31c485ed7 100644 --- a/tests/conflux/messages.py +++ b/tests/conflux/messages.py @@ -5,7 +5,6 @@ P2PInterface: A high-level interface object for communicating to a node over P2P """ -import asyncore from collections import defaultdict from io import BytesIO diff --git a/tests/crash_archive_era150_test.py b/tests/crash_archive_era150_test.py index 2c8525a2e..ca4d733ad 100755 --- a/tests/crash_archive_era150_test.py +++ b/tests/crash_archive_era150_test.py @@ -64,7 +64,6 @@ def run_test(self): self.log.info("Stopped all other nodes except node 0") genesis = self.nodes[0].cfx_getBlockByEpochNumber("0x0", False)["hash"] self.nodes[0].add_p2p_connection(P2PInterface(genesis)) - network_thread_start() self.nodes[0].p2p.wait_for_status() self.log.info("p2p connection to node 0 connected") gas_price = 1 diff --git a/tests/crash_archive_test.py b/tests/crash_archive_test.py index 9b1531e09..52aad236a 100755 --- a/tests/crash_archive_test.py +++ b/tests/crash_archive_test.py @@ -57,7 +57,6 @@ def run_test(self): self.stop_node(i) genesis = self.nodes[0].cfx_getBlockByEpochNumber("0x0", False)["hash"] self.nodes[0].add_p2p_connection(P2PInterface(genesis)) - network_thread_start() self.nodes[0].p2p.wait_for_status() client = RpcClient(self.nodes[0]) gas_price = 1 diff --git a/tests/crash_test.py b/tests/crash_test.py index 781008fa2..04df77950 100755 --- a/tests/crash_test.py +++ b/tests/crash_test.py @@ -56,7 +56,6 @@ def run_test(self): self.stop_node(i, kill=True) genesis = self.nodes[0].cfx_getBlockByEpochNumber("0x0", False)["hash"] self.nodes[0].add_p2p_connection(P2PInterface(genesis)) - network_thread_start() self.nodes[0].p2p.wait_for_status() client = RpcClient(self.nodes[0]) gas_price = 1 diff --git a/tests/expire_block_test.py b/tests/expire_block_test.py index b81feed42..d15da9332 100755 --- a/tests/expire_block_test.py +++ b/tests/expire_block_test.py @@ -25,7 +25,6 @@ def setup_network(self): genesis = self.nodes[0].cfx_getBlockByEpochNumber("0x0", False)["hash"] self.nodes[0].add_p2p_connection(P2PInterface(genesis)) self.nodes[1].add_p2p_connection(P2PInterface(genesis)) - network_thread_start() self.nodes[0].p2p.wait_for_status() self.nodes[1].p2p.wait_for_status() diff --git a/tests/invalid_block_sync_test.py b/tests/invalid_block_sync_test.py index 705b98bbc..589df3679 100755 --- a/tests/invalid_block_sync_test.py +++ b/tests/invalid_block_sync_test.py @@ -84,7 +84,6 @@ def run_test(self): conn0 = InvalidBodyNode(genesis) conn1 = DefaultNode(genesis) self.nodes[1].add_p2p_connection(conn1) - network_thread_start() conn1.wait_for_status() for (h, b) in conn0.block_map.items(): if h != conn0.invalid_block and h != decode_hex(genesis): diff --git a/tests/invalid_message_test.py b/tests/invalid_message_test.py index 2f7af64b5..c0bf13aef 100755 --- a/tests/invalid_message_test.py +++ b/tests/invalid_message_test.py @@ -40,7 +40,6 @@ def reconnect(self, node: TestNode): time.sleep(0.5) genesis = node.cfx_getBlockByEpochNumber("0x0", False)["hash"] node.add_p2p_connection(DefaultNode(genesis)) - network_thread_start() node.p2p.wait_for_status() def _test_invalid_packet(self): diff --git a/tests/message_test.py b/tests/message_test.py index 1abe0e681..7b1c0c66a 100755 --- a/tests/message_test.py +++ b/tests/message_test.py @@ -82,18 +82,18 @@ def test_socket_msg(self, node): # empty packet buf = struct.pack(" 0: self.recvbuf += buf self._on_data() @@ -206,34 +203,7 @@ def on_protocol_packet(self, protocol, payload): """Callback for processing a protocol-specific P2P payload. Must be overridden by derived class.""" raise NotImplementedError - # Socket write methods - - def writable(self): - """asyncore method to determine whether the handle_write() callback should be called on the next loop.""" - with mininode_lock: - pre_connection = self.state == "connecting" - length = len(self.sendbuf) - return (length > 0 or pre_connection) - - def handle_write(self): - """asyncore callback when data should be written to the socket.""" - with mininode_lock: - # asyncore does not expose socket connection, only the first read/write - # event, thus we must check connection manually here to know when we - # actually connect - if self.state == "connecting": - self.handle_connect() - if not self.writable(): - return - - try: - sent = self.send(self.sendbuf) - except: - self.handle_close() - return - self.sendbuf = self.sendbuf[sent:] - - def send_packet(self, packet_id, payload, pushbuf=False): + def send_packet(self, packet_id, payload): """Send a P2P message over the socket. This method takes a P2P payload, builds the P2P header and adds @@ -244,21 +214,13 @@ def send_packet(self, packet_id, payload, pushbuf=False): self.send_data(buf) - def send_data(self, data, pushbuf=False): - if self.state != "connected" and not pushbuf: + def send_data(self, data): + if not self.is_connected: raise IOError('Not connected, no pushbuf') buf = self.assemble_connection_packet(data) - with mininode_lock: - if (len(self.sendbuf) == 0 and not pushbuf): - try: - sent = self.send(buf) - self.sendbuf = buf[sent:] - except BlockingIOError: - self.sendbuf = buf - else: - self.sendbuf += buf + NetworkThread.network_event_loop.call_soon_threadsafe(lambda: self._transport and self._transport.write(buf)) def send_protocol_packet(self, payload): """Send packet of protocols""" @@ -321,7 +283,7 @@ def __init__(self, genesis: str, remote=False): self.remote = remote def peer_connect(self, *args, **kwargs): - super().peer_connect(*args, **kwargs) + return super().peer_connect(*args, **kwargs) def wait_for_status(self, timeout=60): wait_until(lambda: self.had_status, timeout=timeout, lock=mininode_lock) @@ -465,10 +427,6 @@ def on_get_block_hashes_by_epoch(self, msg): resp = BlockHashes(reqid=msg.reqid, hashes=[]) self.send_protocol_msg(resp) -# Keep our own socket map for asyncore, so that we can track disconnects -# ourselves (to work around an issue with closing an asyncore socket when -# using select) -mininode_socket_map = dict() # One lock for synchronizing all data access between the networking thread (see # NetworkThread below) and the thread running the test logic. For simplicity, @@ -483,46 +441,26 @@ def __init__(self, genesis: str, remote = False): super().__init__(genesis, remote) class NetworkThread(threading.Thread): + network_event_loop: asyncio.AbstractEventLoop = None # type: ignore def __init__(self): super().__init__(name="NetworkThread") + # There is only one event loop and no more than one thread must be created + assert not self.network_event_loop - def run(self): - while mininode_socket_map: - # We check for whether to disconnect outside of the asyncore - # loop to work around the behavior of asyncore when using - # select - disconnected = [] - for fd, obj in mininode_socket_map.items(): - if obj.disconnect: - disconnected.append(obj) - [obj.handle_close() for obj in disconnected] - asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1) - logger.debug("Network thread closing") - - -def network_thread_running(): - """Return whether the network thread is running.""" - return any([thread.name == "NetworkThread" for thread in threading.enumerate()]) - - -def network_thread_start(): - """Start the network thread.""" - assert not network_thread_running() - - NetworkThread().start() + NetworkThread.network_event_loop = asyncio.new_event_loop() + def run(self): + """Start the network thread.""" + self.network_event_loop.run_forever() -def network_thread_join(timeout=10): - """Wait timeout seconds for the network thread to terminate. + def close(self, timeout=10): + """Close the connections and network event loop.""" + self.network_event_loop.call_soon_threadsafe(self.network_event_loop.stop) + wait_until(lambda: not self.network_event_loop.is_running(), timeout=timeout) + self.network_event_loop.close() + self.join(timeout) - Throw if network thread doesn't terminate in timeout seconds.""" - network_threads = [ - thread for thread in threading.enumerate() if thread.name == "NetworkThread"] - assert len(network_threads) <= 1 - for thread in network_threads: - thread.join(timeout) - assert not thread.is_alive() def start_p2p_connection(nodes: list, remote=False): if len(nodes) == 0: @@ -537,8 +475,6 @@ def start_p2p_connection(nodes: list, remote=False): p2p_connections.append(conn) node.add_p2p_connection(conn) - network_thread_start() - for p2p in p2p_connections: p2p.wait_for_status() diff --git a/tests/test_framework/test_framework.py b/tests/test_framework/test_framework.py index 1651e548e..878da6330 100644 --- a/tests/test_framework/test_framework.py +++ b/tests/test_framework/test_framework.py @@ -21,7 +21,7 @@ from .authproxy import JSONRPCException from . import coverage -from .mininode import start_p2p_connection +from .mininode import start_p2p_connection, NetworkThread from .test_node import TestNode from .util import ( CONFLUX_RPC_WAIT_TIMEOUT, @@ -197,6 +197,10 @@ def main(self): default=tempfile.mkdtemp(prefix="conflux_test_")) self._start_logging() + + self.log.debug('Setting up network thread') + self.network_thread = NetworkThread() + self.network_thread.start() success = TestStatus.FAILED @@ -232,6 +236,9 @@ def main(self): print( "Testcase failed. Attaching python debugger. Enter ? for help") pdb.set_trace() + + self.log.debug('Closing down network thread') + self.network_thread.close() self.log.debug('Closing down network thread') if not self.options.noshutdown: diff --git a/tests/test_framework/test_node.py b/tests/test_framework/test_node.py index 91425a049..e48d43208 100644 --- a/tests/test_framework/test_node.py +++ b/tests/test_framework/test_node.py @@ -24,7 +24,6 @@ from .authproxy import JSONRPCException from .util import * - class FailedToStartError(Exception): """Raised when a node fails to start correctly.""" @@ -389,7 +388,7 @@ def add_p2p_connection(self, p2p_conn, *args, **kwargs): # if self.ip is not None: # kwargs['dstaddr'] = self.ip # print(args, kwargs) - p2p_conn.peer_connect(*args, **kwargs) + p2p_conn.peer_connect(*args, **kwargs)() self.p2ps.append(p2p_conn) return p2p_conn From 82d2787ecaa477ca8242d86c4da1b7d03dec5e7e Mon Sep 17 00:00:00 2001 From: darwintree <17946284+darwintree@users.noreply.github.com> Date: Fri, 15 Nov 2024 10:06:24 +0800 Subject: [PATCH 29/31] refactor(tests): reduce deprecation warnings in higher version python --- tests/evm_space/filter_block_test.py | 2 +- tests/evm_space/filter_fork_finalize_state_after_fork_test.py | 2 +- tests/evm_space/filter_fork_finalize_state_in_fork_test.py | 2 +- tests/evm_space/filter_lifetime_test.py | 2 +- tests/evm_space/filter_log_test.py | 2 +- tests/evm_space/filter_transaction_test.py | 2 +- tests/evm_space/ws_test.py | 2 +- tests/issue2159_test.py | 2 ++ tests/pubsub/epochs_test.py | 4 ++-- tests/pubsub/eth_logs_test.py | 2 +- tests/pubsub/eth_newHeads_test.py | 2 +- tests/pubsub/logs_test.py | 2 +- tests/pubsub/newHeads_test.py | 2 +- tests/rpc/filter_block_test.py | 2 +- tests/rpc/filter_fork_finalize_state_after_fork_test.py | 2 +- tests/rpc/filter_fork_finalize_state_in_fork_test.py | 2 +- tests/rpc/filter_lifetime_test.py | 2 +- tests/rpc/filter_log_test.py | 2 +- tests/rpc/filter_transaction_test.py | 2 +- 19 files changed, 21 insertions(+), 19 deletions(-) diff --git a/tests/evm_space/filter_block_test.py b/tests/evm_space/filter_block_test.py index 1b0bcfda3..00a087dc0 100644 --- a/tests/evm_space/filter_block_test.py +++ b/tests/evm_space/filter_block_test.py @@ -124,7 +124,7 @@ async def run_async(self): def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) if __name__ == "__main__": diff --git a/tests/evm_space/filter_fork_finalize_state_after_fork_test.py b/tests/evm_space/filter_fork_finalize_state_after_fork_test.py index b9c432f37..fe6778588 100644 --- a/tests/evm_space/filter_fork_finalize_state_after_fork_test.py +++ b/tests/evm_space/filter_fork_finalize_state_after_fork_test.py @@ -117,7 +117,7 @@ async def run_async(self): idx -= 1 def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) if __name__ == "__main__": diff --git a/tests/evm_space/filter_fork_finalize_state_in_fork_test.py b/tests/evm_space/filter_fork_finalize_state_in_fork_test.py index 708078bbc..596e833c5 100644 --- a/tests/evm_space/filter_fork_finalize_state_in_fork_test.py +++ b/tests/evm_space/filter_fork_finalize_state_in_fork_test.py @@ -119,7 +119,7 @@ async def run_async(self): idx -= 1 def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) if __name__ == "__main__": diff --git a/tests/evm_space/filter_lifetime_test.py b/tests/evm_space/filter_lifetime_test.py index 0a01b6dda..cf03b4f13 100644 --- a/tests/evm_space/filter_lifetime_test.py +++ b/tests/evm_space/filter_lifetime_test.py @@ -81,7 +81,7 @@ async def run_async(self): raise AssertionError("Expected exception") def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) if __name__ == "__main__": diff --git a/tests/evm_space/filter_log_test.py b/tests/evm_space/filter_log_test.py index 7430dcccd..cfe0364c0 100644 --- a/tests/evm_space/filter_log_test.py +++ b/tests/evm_space/filter_log_test.py @@ -172,7 +172,7 @@ async def run_async(self): assert_equal(len(logs2), 2 * NUM_CALLS + 2) def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) def deploy_evm_space(self, data_hex): nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) diff --git a/tests/evm_space/filter_transaction_test.py b/tests/evm_space/filter_transaction_test.py index c1270656b..b663a1aa1 100644 --- a/tests/evm_space/filter_transaction_test.py +++ b/tests/evm_space/filter_transaction_test.py @@ -132,7 +132,7 @@ def wait_to_pack_txs(size): assert_equal(len(filter_txs), 0) def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) if __name__ == "__main__": diff --git a/tests/evm_space/ws_test.py b/tests/evm_space/ws_test.py index 0817ee3c2..62b4b2b5e 100755 --- a/tests/evm_space/ws_test.py +++ b/tests/evm_space/ws_test.py @@ -46,7 +46,7 @@ async def test_ws(self): self.log.info("Pass") def run_test(self): - asyncio.get_event_loop().run_until_complete(self.test_ws()) + asyncio.run(self.test_ws()) if __name__ == "__main__": EthWsTest().main() diff --git a/tests/issue2159_test.py b/tests/issue2159_test.py index 9a4843f74..eb4266241 100755 --- a/tests/issue2159_test.py +++ b/tests/issue2159_test.py @@ -13,6 +13,8 @@ FULLNODE = 0 def block_on(op): + # will raise DeprecationWarning in python higher version (e.g. 3.13) + # ignore it so far return asyncio.get_event_loop().run_until_complete(op) class Issue2159Test(ConfluxTestFramework): diff --git a/tests/pubsub/epochs_test.py b/tests/pubsub/epochs_test.py index 563cbfe91..43e87c821 100755 --- a/tests/pubsub/epochs_test.py +++ b/tests/pubsub/epochs_test.py @@ -199,8 +199,8 @@ async def test_latest_state(self): def run_test(self): assert(SHORT_FORK_LEN < LONG_FORK_LEN) - asyncio.get_event_loop().run_until_complete(self.test_forks()) - asyncio.get_event_loop().run_until_complete(self.test_latest_state()) + asyncio.run(self.test_forks()) + asyncio.run(self.test_latest_state()) def generate_chain(self, parent, len): hashes = [parent] diff --git a/tests/pubsub/eth_logs_test.py b/tests/pubsub/eth_logs_test.py index a9a5c7245..e67eda5e9 100755 --- a/tests/pubsub/eth_logs_test.py +++ b/tests/pubsub/eth_logs_test.py @@ -194,7 +194,7 @@ async def run_async(self): self.log.info(f"Pass -- test #1989 fix") def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) def deploy_evm_space(self, data_hex): nonce = self.w3.eth.get_transaction_count(self.evmAccount.address) diff --git a/tests/pubsub/eth_newHeads_test.py b/tests/pubsub/eth_newHeads_test.py index 0afbf76ab..bb42d2ca5 100644 --- a/tests/pubsub/eth_newHeads_test.py +++ b/tests/pubsub/eth_newHeads_test.py @@ -75,7 +75,7 @@ async def run_async(self): self.log.info("Pass -- 2") def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) if __name__ == "__main__": PubSubTest().main() diff --git a/tests/pubsub/logs_test.py b/tests/pubsub/logs_test.py index af8ebc099..dc66a1929 100755 --- a/tests/pubsub/logs_test.py +++ b/tests/pubsub/logs_test.py @@ -152,7 +152,7 @@ async def run_async(self): self.log.info(f"Pass -- test #1989 fix") def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) def deploy_contract(self, sender, priv_key, data_hex): tx = self.rpc[FULLNODE0].new_contract_tx(receiver="", data_hex=data_hex, sender=sender, priv_key=priv_key, storage_limit=20000) diff --git a/tests/pubsub/newHeads_test.py b/tests/pubsub/newHeads_test.py index 96bcf09b4..a56bab2e7 100644 --- a/tests/pubsub/newHeads_test.py +++ b/tests/pubsub/newHeads_test.py @@ -86,7 +86,7 @@ async def run_async(self): await sub_light.unsubscribe() def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) if __name__ == "__main__": PubSubTest().main() diff --git a/tests/rpc/filter_block_test.py b/tests/rpc/filter_block_test.py index 04b97cc54..57098054c 100644 --- a/tests/rpc/filter_block_test.py +++ b/tests/rpc/filter_block_test.py @@ -123,7 +123,7 @@ async def run_async(self): idx -= 1 def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) if __name__ == "__main__": diff --git a/tests/rpc/filter_fork_finalize_state_after_fork_test.py b/tests/rpc/filter_fork_finalize_state_after_fork_test.py index 114392004..a691cee78 100644 --- a/tests/rpc/filter_fork_finalize_state_after_fork_test.py +++ b/tests/rpc/filter_fork_finalize_state_after_fork_test.py @@ -121,7 +121,7 @@ async def run_async(self): idx -= 1 def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) if __name__ == "__main__": diff --git a/tests/rpc/filter_fork_finalize_state_in_fork_test.py b/tests/rpc/filter_fork_finalize_state_in_fork_test.py index 99fefe82c..9a6cfa4fa 100644 --- a/tests/rpc/filter_fork_finalize_state_in_fork_test.py +++ b/tests/rpc/filter_fork_finalize_state_in_fork_test.py @@ -114,7 +114,7 @@ async def run_async(self): idx -= 1 def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) if __name__ == "__main__": diff --git a/tests/rpc/filter_lifetime_test.py b/tests/rpc/filter_lifetime_test.py index 0309eb823..7de2d309a 100644 --- a/tests/rpc/filter_lifetime_test.py +++ b/tests/rpc/filter_lifetime_test.py @@ -70,7 +70,7 @@ async def run_async(self): raise AssertionError("Expected exception") def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) if __name__ == "__main__": diff --git a/tests/rpc/filter_log_test.py b/tests/rpc/filter_log_test.py index 4fea2358c..fe08f2f3c 100644 --- a/tests/rpc/filter_log_test.py +++ b/tests/rpc/filter_log_test.py @@ -151,7 +151,7 @@ async def run_async(self): assert_equal(len(logs2), 2 * NUM_CALLS + 2) def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) def deploy_contract(self, client, data_hex): tx = client.new_contract_tx("", data_hex, storage_limit=200000) diff --git a/tests/rpc/filter_transaction_test.py b/tests/rpc/filter_transaction_test.py index a1e0e3ea8..68ab35419 100644 --- a/tests/rpc/filter_transaction_test.py +++ b/tests/rpc/filter_transaction_test.py @@ -101,7 +101,7 @@ def wait_to_pack_txs(size): assert_equal(len(filter_txs), 0) def run_test(self): - asyncio.get_event_loop().run_until_complete(self.run_async()) + asyncio.run(self.run_async()) if __name__ == "__main__": From 59929697de05193c299a6715e13f1a23ef1cc391 Mon Sep 17 00:00:00 2001 From: iosh Date: Sat, 16 Nov 2024 17:59:35 +0800 Subject: [PATCH 30/31] chore: migrate dbs error from error_chain to thiserror --- Cargo.lock | 7 +- .../consensus_executor/epoch_execution.rs | 2 +- .../src/state/overlay_account/ext_fields.rs | 2 +- .../src/state/state_object/cache_layer.rs | 2 +- crates/dbs/db-errors/Cargo.toml | 5 +- crates/dbs/db-errors/src/lib.rs | 5 - crates/dbs/db-errors/src/statedb.rs | 52 ++-- crates/dbs/db-errors/src/storage.rs | 286 ++++++++---------- crates/dbs/statedb/Cargo.toml | 2 +- crates/dbs/statedb/src/in_memory_storage.rs | 4 +- crates/dbs/statedb/src/lib.rs | 8 +- crates/dbs/statedb/src/tests.rs | 4 +- crates/dbs/storage/Cargo.toml | 4 +- .../cache/algorithm/removable_heap.rs | 2 +- .../delta_mpt/mem_optimized_trie_node.rs | 26 +- crates/dbs/storage/src/impls/delta_mpt/mod.rs | 2 +- .../storage/src/impls/delta_mpt/row_number.rs | 2 +- .../storage/src/impls/delta_mpt/slab/mod.rs | 6 +- .../impls/merkle_patricia_trie/mpt_cursor.rs | 10 +- .../impls/merkle_patricia_trie/trie_proof.rs | 2 +- .../dbs/storage/src/impls/single_mpt_state.rs | 2 +- .../restoration/full_sync_verifier.rs | 10 +- crates/dbs/storage/src/impls/state.rs | 4 +- .../storage_db/delta_db_manager_rocksdb.rs | 2 +- .../storage_db/delta_db_manager_sqlite.rs | 2 +- .../src/impls/storage_db/kvdb_sqlite.rs | 18 +- .../impls/storage_db/kvdb_sqlite_sharded.rs | 10 +- .../storage_db/snapshot_db_manager_sqlite.rs | 24 +- .../storage/src/impls/storage_db/sqlite.rs | 2 +- .../single_mpt_storage_manager.rs | 2 +- .../impls/storage_manager/storage_manager.rs | 18 +- crates/dbs/storage/src/lib.rs | 6 +- 32 files changed, 252 insertions(+), 281 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7cec58e11..087941fcb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1132,11 +1132,12 @@ name = "cfx-db-errors" version = "2.0.2" dependencies = [ "cfx-types", - "error-chain", + "cfx-util-macros", "primitives", "rlp 0.4.6", "sqlite", "strfmt", + "thiserror", ] [[package]] @@ -1432,7 +1433,7 @@ dependencies = [ "cfx-parameters", "cfx-storage", "cfx-types", - "error-chain", + "cfx-util-macros", "hashbrown 0.7.2", "lazy_static", "log", @@ -1451,9 +1452,9 @@ dependencies = [ "cfx-db-errors", "cfx-internal-common", "cfx-types", + "cfx-util-macros", "delegate 0.5.2", "derivative", - "error-chain", "fallible-iterator", "fs_extra", "futures 0.3.27", diff --git a/crates/cfxcore/core/src/consensus/consensus_inner/consensus_executor/epoch_execution.rs b/crates/cfxcore/core/src/consensus/consensus_inner/consensus_executor/epoch_execution.rs index bbd222777..b19aca092 100644 --- a/crates/cfxcore/core/src/consensus/consensus_inner/consensus_executor/epoch_execution.rs +++ b/crates/cfxcore/core/src/consensus/consensus_inner/consensus_executor/epoch_execution.rs @@ -6,7 +6,7 @@ use cfx_parameters::genesis::GENESIS_ACCOUNT_ADDRESS; use geth_tracer::{GethTraceWithHash, GethTracer, TxExecContext}; use pow_types::StakingEvent; -use cfx_statedb::{ErrorKind as DbErrorKind, Result as DbResult}; +use cfx_statedb::{Error as DbErrorKind, Result as DbResult}; use cfx_types::{AddressSpaceUtil, Space, SpaceMap, H256, U256}; use primitives::{ receipt::BlockReceipts, Action, Block, BlockNumber, Receipt, diff --git a/crates/cfxcore/executor/src/state/overlay_account/ext_fields.rs b/crates/cfxcore/executor/src/state/overlay_account/ext_fields.rs index f0b12be3d..f8f086d74 100644 --- a/crates/cfxcore/executor/src/state/overlay_account/ext_fields.rs +++ b/crates/cfxcore/executor/src/state/overlay_account/ext_fields.rs @@ -1,7 +1,7 @@ use super::OverlayAccount; use cfx_bytes::Bytes; use cfx_statedb::{ - ErrorKind as DbErrorKind, Result as DbResult, StateDbExt, StateDbGeneric, + Error as DbErrorKind, Result as DbResult, StateDbExt, StateDbGeneric, }; use cfx_types::Address; use keccak_hash::KECCAK_EMPTY; diff --git a/crates/cfxcore/executor/src/state/state_object/cache_layer.rs b/crates/cfxcore/executor/src/state/state_object/cache_layer.rs index ae857f82b..6afae73e7 100644 --- a/crates/cfxcore/executor/src/state/state_object/cache_layer.rs +++ b/crates/cfxcore/executor/src/state/state_object/cache_layer.rs @@ -5,7 +5,7 @@ use super::{AccountEntry, OverlayAccount, RequireFields, State}; use crate::unwrap_or_return; use cfx_statedb::{ - ErrorKind as DbErrorKind, Result as DbResult, StateDb, StateDbExt, + Error as DbErrorKind, Result as DbResult, StateDb, StateDbExt, }; use cfx_types::{Address, AddressSpaceUtil, AddressWithSpace, U256}; use parking_lot::{ diff --git a/crates/dbs/db-errors/Cargo.toml b/crates/dbs/db-errors/Cargo.toml index 1b322148f..aef4e1ecd 100644 --- a/crates/dbs/db-errors/Cargo.toml +++ b/crates/dbs/db-errors/Cargo.toml @@ -10,6 +10,7 @@ edition = "2021" primitives = { workspace = true } cfx-types = { workspace = true } rlp = { workspace = true } -error-chain = { version = "0.12", default-features = false } sqlite = "0.25" -strfmt = "0.1" \ No newline at end of file +strfmt = "0.1" +thiserror ={ workspace = true } +cfx-util-macros ={ workspace = true} \ No newline at end of file diff --git a/crates/dbs/db-errors/src/lib.rs b/crates/dbs/db-errors/src/lib.rs index e687b53f1..47a16921e 100644 --- a/crates/dbs/db-errors/src/lib.rs +++ b/crates/dbs/db-errors/src/lib.rs @@ -1,7 +1,2 @@ -#![recursion_limit = "512"] - -#[macro_use] -extern crate error_chain; - pub mod statedb; pub mod storage; diff --git a/crates/dbs/db-errors/src/statedb.rs b/crates/dbs/db-errors/src/statedb.rs index f7f8cb3be..7177bc0ca 100644 --- a/crates/dbs/db-errors/src/statedb.rs +++ b/crates/dbs/db-errors/src/statedb.rs @@ -7,25 +7,35 @@ use cfx_types::Address; use primitives::account::AccountError; use rlp::DecoderError; -error_chain! { - links { - } - - foreign_links { - Account(AccountError); - Storage(StorageError); - Decoder(DecoderError); - } - - errors { - IncompleteDatabase(address: Address) { - description("incomplete database") - display("incomplete database: address={:?}", address) - } - - PosDatabaseError(err: String) { - description("PoS database error") - display("PoS database error, err={:?}", err) - } - } +use thiserror::Error; + +#[derive(Error, Debug)] +pub enum Error { + #[error(transparent)] + Account(#[from] AccountError), + + #[error(transparent)] + Storage(#[from] StorageError), + + #[error(transparent)] + Decoder(#[from] DecoderError), + + #[error("incomplete database: address={0:?}")] + IncompleteDatabase(Address), + + #[error("PoS database error, err={0:?}")] + PosDatabaseError(String), + + #[error("{0}")] + Msg(String), +} + +pub type Result = std::result::Result; + +impl From for Error { + fn from(e: String) -> Self { Error::Msg(e) } +} + +impl From<&str> for Error { + fn from(e: &str) -> Self { Error::Msg(e.into()) } } diff --git a/crates/dbs/db-errors/src/storage.rs b/crates/dbs/db-errors/src/storage.rs index 046a8d439..8f91b16ee 100644 --- a/crates/dbs/db-errors/src/storage.rs +++ b/crates/dbs/db-errors/src/storage.rs @@ -4,166 +4,132 @@ use primitives::account::AccountError; use std::{io, num}; - +use thiserror::Error; type DeltaMptId = u16; -error_chain! { - links { - } - - foreign_links { - Account(AccountError); - Io(io::Error); - IntegerConversionError(std::num::TryFromIntError); - ParseIntError(num::ParseIntError); - RlpDecodeError(rlp::DecoderError); - SqliteError(sqlite::Error); - StrfmtFmtError(strfmt::FmtError); - } - - errors { - OutOfCapacity { - description("Out of capacity"), - display("Out of capacity"), - } - - OutOfMem { - description("Out of memory."), - display("Out of memory."), - } - - SlabKeyError { - description("Slab: invalid position accessed"), - display("Slab: invalid position accessed"), - } - - MPTKeyNotFound { - description("Key not found."), - display("Key not found."), - } - - MPTInvalidKeyLength(length: usize, length_limit: usize){ - description("Invalid key length."), - display( - "Invalid key length {}. length must be within [1, {}].", - length, length_limit), - } - - MPTInvalidValueLength(length: usize, length_limit: usize) { - description("Invalid value length."), - display( - "Invalid value length {}. Length must be less than {}", - length, length_limit), - } - - MPTTooManyNodes { - description("Too many nodes."), - display("Too many nodes."), - } - - StateCommitWithoutMerkleHash { - description("State commit called before computing Merkle hash."), - display("State commit called before computing Merkle hash."), - } - - DbNotExist { - description("Not allowed to operate on an readonly empty db."), - display("Not allowed to operate on an readonly empty db."), - } - - // TODO(yz): add error details. - DbValueError { - description("Unexpected result from db query."), - display("Unexpected result from db query."), - } - - DbIsUnclean { - description("Db is unclean."), - display("Db is unclean."), - } - - SnapshotCowCreation { - description("Failed to create new snapshot by COW."), - display("Failed to create new snapshot by COW. Use XFS on linux or APFS on Mac."), - } - - SnapshotCopyFailure { - description("Failed to directly copy a snapshot."), - display("Failed to copy a snapshot."), - } - - SnapshotNotFound { - description("Snapshot file not found."), - display("Snapshot file not found."), - } - - SnapshotAlreadyExists { - description("Attempting to create or modify a Snapshot which already exists."), - display("Attempting to create or modify a Snapshot which already exists."), - } - - SnapshotMPTTrieNodeNotFound { - description("Trie node not found when loading Snapshot MPT."), - display("Trie node not found when loading Snapshot MPT."), - } - - TooManyDeltaMPT { - description("Too many Delta MPTs created."), - display("Too many Delta MPTs created ({}).", DeltaMptId::max_value()), - } - - DeltaMPTAlreadyExists { - description("Attempting to create a Delta MPT which already exists."), - display("Attempting to create a Delta MPT which already exists."), - } - - DeltaMPTEntryNotFound { - description("Can't find requested Delta MPT in registry."), - display("Can't find requested Delta MPT in registry."), - } - - DeltaMPTDestroyErrors(e1: Option>, e2: Option>) { - description("Error(s) happened in Delta MPT destroy"), - display( - "Error(s) happened in Delta MPT destroy, error_1: {:?}, error_2: {:?}", - e1.as_ref().map(|x| format!("{}", &**x)), - e2.as_ref().map(|x| format!("{}", &**x)), - ), - } - - UnsupportedByFreshlySyncedSnapshot(op: &'static str) { - description("The operation isn't possible on freshly synced snapshot."), - display("The operation \"{}\" isn't possible on freshly synced snapshot.", op), - } - - InvalidTrieProof { - description("Trie proof is invalid."), - display("Trie proof is invalid."), - } - - InvalidSnapshotSyncProof { - description("Snapshot sync proof is invalid"), - display("Snapshot sync proof is invalid"), - } - - FailedToCreateUnitTestDataDir { - description("Failed to create unit test data dir."), - display("Failed to create unit test data dir."), - } - - ThreadPanicked(msg: String) { - description("Thread panicked."), - display("Thread panicked with message {:?}.", msg), - } - - MpscError { - description("Error from std::sync::mpsc."), - display("Error from std::sync::mpsc."), - } - - SemaphoreTryAcquireError { - description("tokio::sync::Semaphore::try_acquire(): the semaphore is unavailable."), - display("tokio::sync::Semaphore::try_acquire(): the semaphore is unavailable."), - } - } +#[derive(Debug, Error)] +pub enum Error { + #[error(transparent)] + Account(#[from] AccountError), + + #[error(transparent)] + Io(#[from] io::Error), + + #[error(transparent)] + IntegerConversionError(#[from] std::num::TryFromIntError), + + #[error(transparent)] + ParseIntError(#[from] num::ParseIntError), + + #[error(transparent)] + RlpDecodeError(#[from] rlp::DecoderError), + + #[error(transparent)] + SqliteError(#[from] sqlite::Error), + + #[error(transparent)] + StrfmtFmtError(#[from] strfmt::FmtError), + + #[error("Out of capacity")] + OutOfCapacity, + + #[error("Out of memory.")] + OutOfMem, + + #[error("Slab: invalid position accessed")] + SlabKeyError, + + #[error("Key not found.")] + MPTKeyNotFound, + + #[error("Invalid key length {length}. length must be within [1, {length_limit}].")] + MPTInvalidKeyLength { length: usize, length_limit: usize }, + + #[error("Invalid value length {length}. Length must be less than {length_limit}")] + MPTInvalidValueLength { length: usize, length_limit: usize }, + + #[error("Too many nodes.")] + MPTTooManyNodes, + + #[error("State commit called before computing Merkle hash.")] + StateCommitWithoutMerkleHash, + + #[error("Not allowed to operate on an readonly empty db.")] + DbNotExist, + + // TODO(yz): add error details. + #[error("Unexpected result from db query.")] + DbValueError, + + #[error("Db is unclean.")] + DbIsUnclean, + + #[error("Failed to create new snapshot by COW. Use XFS on linux or APFS on Mac.")] + SnapshotCowCreation, + + #[error("Failed to copy a snapshot.")] + SnapshotCopyFailure, + + #[error("Snapshot file not found.")] + SnapshotNotFound, + + #[error("Attempting to create or modify a Snapshot which already exists.")] + SnapshotAlreadyExists, + + #[error("Trie node not found when loading Snapshot MPT.")] + SnapshotMPTTrieNodeNotFound, + + #[error("Too many Delta MPTs created ({}).", DeltaMptId::max_value())] + TooManyDeltaMPT, + + #[error("Attempting to create a Delta MPT which already exists.")] + DeltaMPTAlreadyExists, + + #[error("Can't find requested Delta MPT in registry.")] + DeltaMPTEntryNotFound, + + #[error( + "Error(s) happened in Delta MPT destroy, error_1: {e1:?}, error_2: {e2:?}" + )] + DeltaMPTDestroyErrors { + e1: Option>, + e2: Option>, + }, + + #[error( + "The operation \"{0}\" isn't possible on freshly synced snapshot." + )] + UnsupportedByFreshlySyncedSnapshot(&'static str), + + #[error("Trie proof is invalid.")] + InvalidTrieProof, + + #[error("Snapshot sync proof is invalid")] + InvalidSnapshotSyncProof, + + #[error("Failed to create unit test data dir.")] + FailedToCreateUnitTestDataDir, + + #[error("Thread panicked with message {0:?}.")] + ThreadPanicked(String), + + #[error("Error from std::sync::mpsc.")] + MpscError, + + #[error( + "tokio::sync::Semaphore::try_acquire(): the semaphore is unavailable." + )] + SemaphoreTryAcquireError, + + #[error("{0}")] + Msg(String), +} + +pub type Result = std::result::Result; + +impl From for Error { + fn from(e: String) -> Self { Error::Msg(e) } +} +impl From<&str> for Error { + fn from(e: &str) -> Self { Error::Msg(e.into()) } } diff --git a/crates/dbs/statedb/Cargo.toml b/crates/dbs/statedb/Cargo.toml index cc9430961..21d144b7f 100644 --- a/crates/dbs/statedb/Cargo.toml +++ b/crates/dbs/statedb/Cargo.toml @@ -12,7 +12,6 @@ cfx-parameters = { workspace = true } cfx-db-errors = { workspace = true } cfx-storage = { workspace = true } cfx-types = { workspace = true } -error-chain = { version = "0.12", default-features = false } hashbrown = "0.7.1" log = "0.4" parking_lot = { workspace = true } @@ -20,6 +19,7 @@ primitives = { workspace = true, optional = true } rlp = { workspace = true } tiny-keccak = { workspace = true } lazy_static = "1.4.0" +cfx-util-macros ={ workspace = true} [dev-dependencies] primitives = { workspace = true, features = ["test_no_account_length_check"] } diff --git a/crates/dbs/statedb/src/in_memory_storage.rs b/crates/dbs/statedb/src/in_memory_storage.rs index 3bdbd9f60..5191e8b40 100644 --- a/crates/dbs/statedb/src/in_memory_storage.rs +++ b/crates/dbs/statedb/src/in_memory_storage.rs @@ -4,7 +4,7 @@ use std::{ }; use cfx_internal_common::StateRootWithAuxInfo; -use cfx_storage::{state::StateTrait as StorageTrait, ErrorKind, Result}; +use cfx_storage::{state::StateTrait as StorageTrait, Error, Result}; use cfx_types::H256; use primitives::StorageKeyWithSpace; use tiny_keccak::{Hasher, Keccak}; @@ -96,7 +96,7 @@ impl StorageTrait for InmemoryStorage { fn get_state_root(&self) -> Result { self.cached_state_root .clone() - .ok_or(ErrorKind::Msg("No state root".to_owned()).into()) + .ok_or(Error::Msg("No state root".to_owned()).into()) } fn commit( diff --git a/crates/dbs/statedb/src/lib.rs b/crates/dbs/statedb/src/lib.rs index eb497bc0c..543128a09 100644 --- a/crates/dbs/statedb/src/lib.rs +++ b/crates/dbs/statedb/src/lib.rs @@ -3,7 +3,7 @@ // See http://www.gnu.org/licenses/ #[macro_use] -extern crate error_chain; +extern crate cfx_util_macros; #[macro_use] extern crate log; @@ -18,7 +18,7 @@ use cfx_db_errors::statedb as error; mod tests; pub use self::{ - error::{Error, ErrorKind, Result}, + error::{Error, Result}, impls::StateDb as StateDbGeneric, statedb_ext::StateDbExt, }; @@ -289,7 +289,7 @@ mod impls { // This is defensive checking, against certain // cases when we are not deleting the account // for sure. - bail!(ErrorKind::IncompleteDatabase( + bail!(Error::IncompleteDatabase( Address::from_slice(address) )); } @@ -300,7 +300,7 @@ mod impls { None => match storage.get(storage_layout_key)? { // A new account must set StorageLayout before accessing // the storage. - None => bail!(ErrorKind::IncompleteDatabase( + None => bail!(Error::IncompleteDatabase( Address::from_slice(address) )), Some(raw) => StorageLayout::from_bytes(raw.as_ref())?, diff --git a/crates/dbs/statedb/src/tests.rs b/crates/dbs/statedb/src/tests.rs index 4cd1d44df..380665f2c 100644 --- a/crates/dbs/statedb/src/tests.rs +++ b/crates/dbs/statedb/src/tests.rs @@ -5,7 +5,7 @@ use super::StateDbGeneric; use cfx_internal_common::StateRootWithAuxInfo; use cfx_storage::{ - utils::access_mode, ErrorKind, MptKeyValue, Result, StorageStateTrait, + utils::access_mode, Error, MptKeyValue, Result, StorageStateTrait, }; use parking_lot::Mutex; use primitives::{EpochId, StorageKey, StorageKeyWithSpace, MERKLE_NULL_NODE}; @@ -103,7 +103,7 @@ impl StorageStateTrait for MockStorage { } fn get_state_root(&self) -> Result { - Err(ErrorKind::Msg("No state root".to_owned()).into()) + Err(Error::Msg("No state root".to_owned()).into()) } fn set( diff --git a/crates/dbs/storage/Cargo.toml b/crates/dbs/storage/Cargo.toml index 87a0cff15..fc05365a7 100644 --- a/crates/dbs/storage/Cargo.toml +++ b/crates/dbs/storage/Cargo.toml @@ -14,7 +14,6 @@ cfx-internal-common = { workspace = true } cfx-types = { workspace = true } delegate = "0.5.0" derivative = "2.0.2" -error-chain = { version = "0.12", default-features = false } fallible-iterator = "0.2" fs_extra = "1.1.0" futures = { version = "0.3.3", features = ["compat"] } @@ -31,7 +30,7 @@ memoffset = "0.5.1" parking_lot = { workspace = true } primitives = { workspace = true, optional = true } rand = "0.7" -rand_chacha="0.2.1" +rand_chacha = "0.2.1" random-crash = { workspace = true } rlp = { workspace = true } rlp_derive = { workspace = true } @@ -44,6 +43,7 @@ strfmt = "0.1" tokio = { version = "0.2", features = ["full"] } once_cell = "1.10.0" parity-util-mem = { workspace = true, default-features = false } +cfx-util-macros = { workspace = true } [dev-dependencies] primitives = { workspace = true, features = ["test_no_account_length_check"] } diff --git a/crates/dbs/storage/src/impls/delta_mpt/cache/algorithm/removable_heap.rs b/crates/dbs/storage/src/impls/delta_mpt/cache/algorithm/removable_heap.rs index 2109978e0..b529c82f1 100644 --- a/crates/dbs/storage/src/impls/delta_mpt/cache/algorithm/removable_heap.rs +++ b/crates/dbs/storage/src/impls/delta_mpt/cache/algorithm/removable_heap.rs @@ -522,7 +522,7 @@ impl RemovableHeap { &mut self, value: ValueType, value_util: &mut ValueUtilT, ) -> Result { if self.array.capacity() == self.array.len() { - return Err(ErrorKind::OutOfCapacity.into()); + return Err(Error::OutOfCapacity.into()); } let mut hole: Hole = unsafe { mem::uninitialized() }; diff --git a/crates/dbs/storage/src/impls/delta_mpt/mem_optimized_trie_node.rs b/crates/dbs/storage/src/impls/delta_mpt/mem_optimized_trie_node.rs index 10480ebce..807ba2fad 100644 --- a/crates/dbs/storage/src/impls/delta_mpt/mem_optimized_trie_node.rs +++ b/crates/dbs/storage/src/impls/delta_mpt/mem_optimized_trie_node.rs @@ -245,10 +245,10 @@ impl MemOptimizedTrieNode { pub fn check_value_size(value: &[u8]) -> Result<()> { let value_size = value.len(); if ValueSizeFieldConverter::is_size_over_limit(value_size) { - return Err(Error::from_kind(ErrorKind::MPTInvalidValueLength( - value_size, - ValueSizeFieldConverter::max_size(), - ))); + return Err(Error::MPTInvalidValueLength { + length: value_size, + length_limit: ValueSizeFieldConverter::max_size(), + }); } // We may use empty value to represent special state, such as tombstone. // Therefore We don't check for emptiness. @@ -259,16 +259,16 @@ impl MemOptimizedTrieNode { pub fn check_key_size(access_key: &[u8]) -> Result<()> { let key_size = access_key.len(); if TrivialSizeFieldConverterU16::is_size_over_limit(key_size) { - return Err(Error::from_kind(ErrorKind::MPTInvalidKeyLength( - key_size, - TrivialSizeFieldConverterU16::max_size(), - ))); + return Err(Error::MPTInvalidKeyLength { + length: key_size, + length_limit: TrivialSizeFieldConverterU16::max_size(), + }); } if key_size == 0 { - return Err(Error::from_kind(ErrorKind::MPTInvalidKeyLength( - key_size, - TrivialSizeFieldConverterU16::max_size(), - ))); + return Err(Error::MPTInvalidKeyLength { + length: key_size, + length_limit: TrivialSizeFieldConverterU16::max_size(), + }); } Ok(()) @@ -461,7 +461,7 @@ impl MemOptimizedTrieNode { _ => TrieNodeAction::Modify, }) } else { - Err(ErrorKind::MPTKeyNotFound.into()) + Err(Error::MPTKeyNotFound.into()) } } diff --git a/crates/dbs/storage/src/impls/delta_mpt/mod.rs b/crates/dbs/storage/src/impls/delta_mpt/mod.rs index 2863eba11..f22ab6ed8 100644 --- a/crates/dbs/storage/src/impls/delta_mpt/mod.rs +++ b/crates/dbs/storage/src/impls/delta_mpt/mod.rs @@ -425,7 +425,7 @@ impl DeltaMptIdGen { id = Ok(self.id_limit); self.id_limit += 1; } else { - id = Err(ErrorKind::TooManyDeltaMPT.into()) + id = Err(Error::TooManyDeltaMPT.into()) } } Some(x) => id = Ok(x), diff --git a/crates/dbs/storage/src/impls/delta_mpt/row_number.rs b/crates/dbs/storage/src/impls/delta_mpt/row_number.rs index fe113270f..8be328859 100644 --- a/crates/dbs/storage/src/impls/delta_mpt/row_number.rs +++ b/crates/dbs/storage/src/impls/delta_mpt/row_number.rs @@ -32,7 +32,7 @@ impl RowNumber { value: self.value + 1, }) } else { - Err(ErrorKind::MPTTooManyNodes.into()) + Err(Error::MPTTooManyNodes.into()) } } } diff --git a/crates/dbs/storage/src/impls/delta_mpt/slab/mod.rs b/crates/dbs/storage/src/impls/delta_mpt/slab/mod.rs index 33a19c548..c3f7083ae 100644 --- a/crates/dbs/storage/src/impls/delta_mpt/slab/mod.rs +++ b/crates/dbs/storage/src/impls/delta_mpt/slab/mod.rs @@ -769,7 +769,7 @@ impl> Slab { let mut alloc_fields = self.alloc_fields.lock(); let key = alloc_fields.next; if key == self.entries.capacity() { - Err(Error::from_kind(ErrorKind::OutOfMem)) + Err(Error::OutOfMem) } else { alloc_fields.used += 1; if key == alloc_fields.size_initialized { @@ -848,14 +848,14 @@ impl> Slab { pub fn remove(&self, key: usize) -> Result { if key > self.entries.len() { // Index out of range. - return Err(Error::from_kind(ErrorKind::SlabKeyError)); + return Err(Error::SlabKeyError); } let mut alloc_fields = self.alloc_fields.lock(); let next = alloc_fields.next; let entry = self.cast_entry_ref_mut(key); if entry.is_vacant() { // Trying to free unallocated space. - Err(Error::from_kind(ErrorKind::SlabKeyError)) + Err(Error::SlabKeyError) } else { alloc_fields.used -= 1; alloc_fields.next = key; diff --git a/crates/dbs/storage/src/impls/merkle_patricia_trie/mpt_cursor.rs b/crates/dbs/storage/src/impls/merkle_patricia_trie/mpt_cursor.rs index 63bb3b33c..ad9503e69 100644 --- a/crates/dbs/storage/src/impls/merkle_patricia_trie/mpt_cursor.rs +++ b/crates/dbs/storage/src/impls/merkle_patricia_trie/mpt_cursor.rs @@ -591,7 +591,7 @@ impl CursorLoadNodeWrapper for BasicPathNode { ) -> Result { mpt.get_read_mpt() .load_node(path)? - .ok_or(Error::from(ErrorKind::SnapshotMPTTrieNodeNotFound)) + .ok_or(Error::from(Error::SnapshotMPTTrieNodeNotFound)) } } @@ -603,7 +603,7 @@ impl CursorLoadNodeWrapper ) -> Result { mpt.get_read_mpt() .load_node(path)? - .ok_or(Error::from(ErrorKind::SnapshotMPTTrieNodeNotFound)) + .ok_or(Error::from(Error::SnapshotMPTTrieNodeNotFound)) } } @@ -624,7 +624,7 @@ impl> Ok(None) => { self.set_has_io_error(); - Err(Error::from(ErrorKind::SnapshotMPTTrieNodeNotFound)) + Err(Error::from(Error::SnapshotMPTTrieNodeNotFound)) } } } @@ -819,8 +819,8 @@ pub trait PathNodeTrait: node } - Err(e) => match e.kind() { - ErrorKind::SnapshotMPTTrieNodeNotFound => { + Err(e) => match e { + Error::SnapshotMPTTrieNodeNotFound => { mpt_is_empty = true; SnapshotMptNode(VanillaTrieNode::new( diff --git a/crates/dbs/storage/src/impls/merkle_patricia_trie/trie_proof.rs b/crates/dbs/storage/src/impls/merkle_patricia_trie/trie_proof.rs index 9468b8ed2..0ea7a812a 100644 --- a/crates/dbs/storage/src/impls/merkle_patricia_trie/trie_proof.rs +++ b/crates/dbs/storage/src/impls/merkle_patricia_trie/trie_proof.rs @@ -70,7 +70,7 @@ impl TrieProof { for (node_index, node) in nodes.iter().enumerate() { if !connected_child_parent_map.contains_key(node.get_merkle()) { // Not connected. - bail!(ErrorKind::InvalidTrieProof); + bail!(Error::InvalidTrieProof); } for (child_index, child_merkle) in node.get_children_table_ref().iter() diff --git a/crates/dbs/storage/src/impls/single_mpt_state.rs b/crates/dbs/storage/src/impls/single_mpt_state.rs index bfafc2f43..eb8df8ab1 100644 --- a/crates/dbs/storage/src/impls/single_mpt_state.rs +++ b/crates/dbs/storage/src/impls/single_mpt_state.rs @@ -95,7 +95,7 @@ impl SingleMptState { Some(merkle_hash) => { // Non-empty state if merkle_hash.is_zero() { - Err(ErrorKind::StateCommitWithoutMerkleHash.into()) + Err(Error::StateCommitWithoutMerkleHash.into()) } else { Ok(merkle_hash) } diff --git a/crates/dbs/storage/src/impls/snapshot_sync/restoration/full_sync_verifier.rs b/crates/dbs/storage/src/impls/snapshot_sync/restoration/full_sync_verifier.rs index 1ffc4a416..b45bf7721 100644 --- a/crates/dbs/storage/src/impls/snapshot_sync/restoration/full_sync_verifier.rs +++ b/crates/dbs/storage/src/impls/snapshot_sync/restoration/full_sync_verifier.rs @@ -27,10 +27,10 @@ impl epoch_height: u64, ) -> Result { if number_chunks != chunk_boundaries.len() + 1 { - bail!(ErrorKind::InvalidSnapshotSyncProof) + bail!(Error::InvalidSnapshotSyncProof) } if number_chunks != chunk_boundary_proofs.len() + 1 { - bail!(ErrorKind::InvalidSnapshotSyncProof) + bail!(Error::InvalidSnapshotSyncProof) } let mut chunk_index_by_upper_key = HashMap::new(); for (chunk_index, (chunk_boundary, proof)) in chunk_boundaries @@ -39,16 +39,16 @@ impl .enumerate() { if merkle_root.ne(proof.get_merkle_root()) { - bail!(ErrorKind::InvalidSnapshotSyncProof) + bail!(Error::InvalidSnapshotSyncProof) } // We don't want the proof to carry extra nodes. if proof.number_leaf_nodes() != 1 { - bail!(ErrorKind::InvalidSnapshotSyncProof) + bail!(Error::InvalidSnapshotSyncProof) } if proof.if_proves_key(&*chunk_boundary) != (true, proof.get_proof_nodes().last()) { - bail!(ErrorKind::InvalidSnapshotSyncProof) + bail!(Error::InvalidSnapshotSyncProof) } chunk_index_by_upper_key .insert(chunk_boundary.clone(), chunk_index); diff --git a/crates/dbs/storage/src/impls/state.rs b/crates/dbs/storage/src/impls/state.rs index bc709ec32..f9a87b6a2 100644 --- a/crates/dbs/storage/src/impls/state.rs +++ b/crates/dbs/storage/src/impls/state.rs @@ -94,7 +94,7 @@ impl State { { Ok(()) } else { - Err(ErrorKind::UnsupportedByFreshlySyncedSnapshot(op).into()) + Err(Error::UnsupportedByFreshlySyncedSnapshot(op).into()) } } @@ -775,7 +775,7 @@ impl State { Some(merkle_hash) => { // Non-empty state if merkle_hash.is_zero() { - Err(ErrorKind::StateCommitWithoutMerkleHash.into()) + Err(Error::StateCommitWithoutMerkleHash.into()) } else { Ok(merkle_hash) } diff --git a/crates/dbs/storage/src/impls/storage_db/delta_db_manager_rocksdb.rs b/crates/dbs/storage/src/impls/storage_db/delta_db_manager_rocksdb.rs index 5ac35e3a9..f8daea4a9 100644 --- a/crates/dbs/storage/src/impls/storage_db/delta_db_manager_rocksdb.rs +++ b/crates/dbs/storage/src/impls/storage_db/delta_db_manager_rocksdb.rs @@ -52,7 +52,7 @@ impl DeltaDbManagerTrait for DeltaDbManagerRocksdb { let path = self.get_delta_db_path(delta_db_name); if path.exists() { - Err(ErrorKind::DeltaMPTAlreadyExists.into()) + Err(Error::DeltaMPTAlreadyExists.into()) } else { Ok(KvdbRocksdb { kvdb: Arc::new(Database::open( diff --git a/crates/dbs/storage/src/impls/storage_db/delta_db_manager_sqlite.rs b/crates/dbs/storage/src/impls/storage_db/delta_db_manager_sqlite.rs index fdca56e78..ccabc0db0 100644 --- a/crates/dbs/storage/src/impls/storage_db/delta_db_manager_sqlite.rs +++ b/crates/dbs/storage/src/impls/storage_db/delta_db_manager_sqlite.rs @@ -59,7 +59,7 @@ impl DeltaDbManagerTrait for DeltaDbManagerSqlite { let path_str = self.get_delta_db_path(delta_db_name); if Path::new(&path_str).exists() { - Err(ErrorKind::DeltaMPTAlreadyExists.into()) + Err(Error::DeltaMPTAlreadyExists.into()) } else { fs::create_dir_all(&path_str).ok(); KvdbSqliteSharded::create_and_open( diff --git a/crates/dbs/storage/src/impls/storage_db/kvdb_sqlite.rs b/crates/dbs/storage/src/impls/storage_db/kvdb_sqlite.rs index a62e79164..516fbeaa7 100644 --- a/crates/dbs/storage/src/impls/storage_db/kvdb_sqlite.rs +++ b/crates/dbs/storage/src/impls/storage_db/kvdb_sqlite.rs @@ -464,7 +464,7 @@ where ValueType::Type: SqlBindableValue &self, immediate_write: bool, ) -> Result> { if self.connection.is_none() { - bail!(ErrorKind::DbNotExist); + bail!(Error::DbNotExist); } KvdbSqliteTransaction::new(self.try_clone()?, immediate_write) @@ -773,7 +773,7 @@ where ValueType::Type: SqlBindableValue ) -> Result>> { let (connection, statements) = self.destructure_mut(); match connection { - None => Err(Error::from(ErrorKind::DbNotExist)), + None => Err(Error::from(Error::DbNotExist)), Some(conn) => { conn.execute( &statements.stmts_bytes_key_table.delete, @@ -790,7 +790,7 @@ where ValueType::Type: SqlBindableValue ) -> Result::ValueType>>> { let (connection, statements) = self.destructure_mut(); match connection { - None => Err(Error::from(ErrorKind::DbNotExist)), + None => Err(Error::from(Error::DbNotExist)), Some(conn) => { conn.execute( &statements.stmts_main_table.delete, @@ -808,7 +808,7 @@ where ValueType::Type: SqlBindableValue random_crash_if_enabled("sqlite put"); let (connection, statements) = self.destructure_mut(); match connection { - None => Err(Error::from(ErrorKind::DbNotExist)), + None => Err(Error::from(Error::DbNotExist)), Some(conn) => { let mut bind_list = Vec::::new(); bind_list.push(Box::new(&key)); @@ -831,7 +831,7 @@ where ValueType::Type: SqlBindableValue random_crash_if_enabled("sqlite put_with_number_key"); let (connection, statements) = self.destructure_mut(); match connection { - None => Err(Error::from(ErrorKind::DbNotExist)), + None => Err(Error::from(Error::DbNotExist)), Some(conn) => { let mut bind_list = Vec::::new(); bind_list.push(Box::new(key)); @@ -861,7 +861,7 @@ where ValueType::Type: SqlBindableValue random_crash_if_enabled("sqlite delete"); let (connection, statements) = self.destructure(); match connection { - None => Err(Error::from(ErrorKind::DbNotExist)), + None => Err(Error::from(Error::DbNotExist)), Some(conn) => { let mut db = conn.lock_db(); let mut statement_cache = conn.lock_statement_cache(); @@ -887,7 +887,7 @@ where ValueType::Type: SqlBindableValue random_crash_if_enabled("sqlite delete_with_number_key"); let (connection, statements) = self.destructure(); match connection { - None => Err(Error::from(ErrorKind::DbNotExist)), + None => Err(Error::from(Error::DbNotExist)), Some(conn) => { let mut db = conn.lock_db(); let mut statement_cache = conn.lock_statement_cache(); @@ -912,7 +912,7 @@ where ValueType::Type: SqlBindableValue ) -> Result>> { let (connection, statements) = self.destructure(); match connection { - None => Err(Error::from(ErrorKind::DbNotExist)), + None => Err(Error::from(Error::DbNotExist)), Some(conn) => { let mut bind_list = Vec::::new(); bind_list.push(Box::new(&key)); @@ -941,7 +941,7 @@ where ValueType::Type: SqlBindableValue ) -> Result>> { let (connection, statements) = self.destructure(); match connection { - None => Err(Error::from(ErrorKind::DbNotExist)), + None => Err(Error::from(Error::DbNotExist)), Some(conn) => { let mut bind_list = Vec::::new(); bind_list.push(Box::new(key)); diff --git a/crates/dbs/storage/src/impls/storage_db/kvdb_sqlite_sharded.rs b/crates/dbs/storage/src/impls/storage_db/kvdb_sqlite_sharded.rs index f2e973c02..016ebf2a6 100644 --- a/crates/dbs/storage/src/impls/storage_db/kvdb_sqlite_sharded.rs +++ b/crates/dbs/storage/src/impls/storage_db/kvdb_sqlite_sharded.rs @@ -405,7 +405,7 @@ where ValueType::Type: SqlBindableValue ) -> Result>> { let (maybe_connections, statements) = self.destructure_mut(); match maybe_connections { - None => Err(Error::from(ErrorKind::DbNotExist)), + None => Err(Error::from(Error::DbNotExist)), Some(connections) => KvdbSqliteBorrowMut::::new(( connections.get_mut(key_to_shard_id(key, connections.len())), statements, @@ -419,7 +419,7 @@ where ValueType::Type: SqlBindableValue ) -> Result>> { let (maybe_connections, statements) = self.destructure_mut(); match maybe_connections { - None => Err(Error::from(ErrorKind::DbNotExist)), + None => Err(Error::from(Error::DbNotExist)), Some(connections) => KvdbSqliteBorrowMut::new(( connections .get_mut(number_key_to_shard_id(key, connections.len())), @@ -434,7 +434,7 @@ where ValueType::Type: SqlBindableValue ) -> Result>> { let (maybe_connections, statements) = self.destructure_mut(); match maybe_connections { - None => Err(Error::from(ErrorKind::DbNotExist)), + None => Err(Error::from(Error::DbNotExist)), Some(connections) => KvdbSqliteBorrowMut::::new(( connections.get_mut(key_to_shard_id(key, connections.len())), statements, @@ -448,7 +448,7 @@ where ValueType::Type: SqlBindableValue ) -> Result>> { let (maybe_connections, statements) = self.destructure_mut(); match maybe_connections { - None => Err(Error::from(ErrorKind::DbNotExist)), + None => Err(Error::from(Error::DbNotExist)), Some(connections) => KvdbSqliteBorrowMut::new(( connections .get_mut(number_key_to_shard_id(key, connections.len())), @@ -902,7 +902,7 @@ where ValueType::Type: SqlBindableValue &self, immediate_write: bool, ) -> Result> { if self.shards_connections.is_none() { - bail!(ErrorKind::DbNotExist); + bail!(Error::DbNotExist); } KvdbSqliteShardedTransaction::new(self.try_clone()?, immediate_write) diff --git a/crates/dbs/storage/src/impls/storage_db/snapshot_db_manager_sqlite.rs b/crates/dbs/storage/src/impls/storage_db/snapshot_db_manager_sqlite.rs index 8a79cca04..89f0ca52f 100644 --- a/crates/dbs/storage/src/impls/storage_db/snapshot_db_manager_sqlite.rs +++ b/crates/dbs/storage/src/impls/storage_db/snapshot_db_manager_sqlite.rs @@ -289,7 +289,7 @@ impl SnapshotDbManagerSqlite { .try_acquire() // Unfortunately we have to use map_error because the // TryAcquireError isn't public. - .map_err(|_err| ErrorKind::SemaphoreTryAcquireError)? + .map_err(|_err| Error::SemaphoreTryAcquireError)? } else { executor::block_on(self.open_snapshot_semaphore.acquire()) }; @@ -406,7 +406,7 @@ impl SnapshotDbManagerSqlite { .get(&snapshot_path) .is_some() { - bail!(ErrorKind::SnapshotAlreadyExists) + bail!(Error::SnapshotAlreadyExists) } let semaphore_permit = @@ -421,7 +421,7 @@ impl SnapshotDbManagerSqlite { .get(&snapshot_path) .is_some() { - bail!(ErrorKind::SnapshotAlreadyExists) + bail!(Error::SnapshotAlreadyExists) } let mpt_table_in_current_db = @@ -450,7 +450,7 @@ impl SnapshotDbManagerSqlite { Ok(db) } else { - bail!(ErrorKind::SnapshotNotFound); + bail!(Error::SnapshotNotFound); } }?; @@ -494,7 +494,7 @@ impl SnapshotDbManagerSqlite { .try_acquire() // Unfortunately we have to use map_error because the // TryAcquireError isn't public. - .map_err(|_err| ErrorKind::SemaphoreTryAcquireError)? + .map_err(|_err| Error::SemaphoreTryAcquireError)? } else { executor::block_on(self.mpt_open_snapshot_semaphore.acquire()) }; @@ -580,7 +580,7 @@ impl SnapshotDbManagerSqlite { .get(&snapshot_path) .is_some() { - bail!(ErrorKind::SnapshotAlreadyExists) + bail!(Error::SnapshotAlreadyExists) } let semaphore_permit = @@ -604,7 +604,7 @@ impl SnapshotDbManagerSqlite { Some(self.latest_mpt_snapshot_semaphore.clone()), ) } else { - bail!(ErrorKind::SnapshotNotFound); + bail!(Error::SnapshotNotFound); } }?; @@ -803,7 +803,7 @@ impl SnapshotDbManagerSqlite { "COW copy failed, check file system support. Command {:?}", command, ); - Err(ErrorKind::SnapshotCowCreation.into()) + Err(Error::SnapshotCowCreation.into()) } else { info!( "COW copy failed, check file system support. Command {:?}", @@ -836,7 +836,7 @@ impl SnapshotDbManagerSqlite { "Fail to copy snapshot {:?}, err={:?}", old_snapshot_path, e, ); - ErrorKind::SnapshotCopyFailure.into() + Error::SnapshotCopyFailure.into() }) } } @@ -862,7 +862,7 @@ impl SnapshotDbManagerSqlite { "Failed to create a new snapshot by COW. \ Use XFS on linux or APFS on Mac" ); - Err(ErrorKind::SnapshotCowCreation.into()) + Err(Error::SnapshotCowCreation.into()) } else { Ok(false) } @@ -885,7 +885,7 @@ impl SnapshotDbManagerSqlite { false, )?; let old_snapshot_db = maybe_old_snapshot_db - .ok_or(Error::from(ErrorKind::SnapshotNotFound))?; + .ok_or(Error::from(Error::SnapshotNotFound))?; temp_snapshot_db.copy_and_merge( &old_snapshot_db.snapshot_db, mpt_snapshot_db, @@ -1158,7 +1158,7 @@ impl SnapshotDbManagerTrait for SnapshotDbManagerSqlite { false, )?; old_snapshot = maybe_old_snapshot_db - .ok_or(Error::from(ErrorKind::SnapshotNotFound))?; + .ok_or(Error::from(Error::SnapshotNotFound))?; if old_snapshot.is_mpt_table_in_current_db() { Some(&old_snapshot.snapshot_db) } else { diff --git a/crates/dbs/storage/src/impls/storage_db/sqlite.rs b/crates/dbs/storage/src/impls/storage_db/sqlite.rs index 42d24ac94..8733b5174 100644 --- a/crates/dbs/storage/src/impls/storage_db/sqlite.rs +++ b/crates/dbs/storage/src/impls/storage_db/sqlite.rs @@ -377,7 +377,7 @@ impl<'db, Item, F: FnMut(&Statement<'db>) -> Item> MappedRows<'db, F> { if MaybeRows::next(&mut self.maybe_rows)?.is_none() { Ok(Some(row_mapped)) } else { - bail!(ErrorKind::DbValueError) + bail!(Error::DbValueError) } } } diff --git a/crates/dbs/storage/src/impls/storage_manager/single_mpt_storage_manager.rs b/crates/dbs/storage/src/impls/storage_manager/single_mpt_storage_manager.rs index 0062700ce..b0ba68a05 100644 --- a/crates/dbs/storage/src/impls/storage_manager/single_mpt_storage_manager.rs +++ b/crates/dbs/storage/src/impls/storage_manager/single_mpt_storage_manager.rs @@ -128,7 +128,7 @@ impl OpenableOnDemandOpenDeltaDbTrait for SingleMptDbManager { *maybe_mpt = Some(mpt.clone()); Ok(mpt) } else { - Err(ErrorKind::DbNotExist.into()) + Err(Error::DbNotExist.into()) } } } diff --git a/crates/dbs/storage/src/impls/storage_manager/storage_manager.rs b/crates/dbs/storage/src/impls/storage_manager/storage_manager.rs index a90aea85d..95485dbad 100644 --- a/crates/dbs/storage/src/impls/storage_manager/storage_manager.rs +++ b/crates/dbs/storage/src/impls/storage_manager/storage_manager.rs @@ -188,7 +188,7 @@ impl InProgressSnapshotTask { if let Some(join_handle) = self.thread_handle.take() { match join_handle.join() { Ok(task_result) => Some(task_result), - Err(_) => Some(Err(ErrorKind::ThreadPanicked(format!( + Err(_) => Some(Err(Error::ThreadPanicked(format!( "Background Snapshotting for {:?} panicked.", self.snapshot_info )) @@ -417,7 +417,7 @@ impl StorageManager { let snapshot_associated_mpts_locked = self.snapshot_associated_mpts_by_epoch.read(); match snapshot_associated_mpts_locked.get(snapshot_epoch_id) { - None => bail!(ErrorKind::DeltaMPTEntryNotFound), + None => bail!(Error::DeltaMPTEntryNotFound), Some(delta_mpts) => { if delta_mpts.1.is_some() { return Ok(delta_mpts.1.as_ref().unwrap().clone()); @@ -441,7 +441,7 @@ impl StorageManager { .read() .get(snapshot_epoch_id) { - None => bail!(ErrorKind::DeltaMPTEntryNotFound), + None => bail!(Error::DeltaMPTEntryNotFound), Some(mpts) => Ok(mpts.0.clone()), } } @@ -461,7 +461,7 @@ impl StorageManager { let mut maybe_snapshot_entry = snapshot_associated_mpts_mut.get_mut(snapshot_epoch_id); if maybe_snapshot_entry.is_none() { - bail!(ErrorKind::SnapshotNotFound); + bail!(Error::SnapshotNotFound); }; // DeltaMpt already exists if maybe_snapshot_entry.as_ref().unwrap().1.is_some() { @@ -591,7 +591,7 @@ impl StorageManager { let delta_db = maybe_delta_db.as_ref().unwrap(); while delta_height > 0 { epoch_id = match delta_db.mpt.get_parent_epoch(&epoch_id)? { - None => bail!(ErrorKind::DbValueError), + None => bail!(Error::DbValueError), Some(epoch_id) => epoch_id, }; delta_height -= 1; @@ -664,7 +664,7 @@ impl StorageManager { } task_finished_sender_cloned.lock().send(Some(snapshot_epoch_id)) - .or(Err(Error::from(ErrorKind::MpscError)))?; + .or(Err(Error::from(Error::MpscError)))?; drop(snapshot_info_map_locked); let debug_snapshot_checkers = @@ -1377,8 +1377,8 @@ impl StorageManager { .delta_db_manager .get_delta_db_name(&snapshot_epoch_id), ) - .or_else(|e| match e.kind() { - ErrorKind::Io(io_err) => match io_err.kind() { + .or_else(|e| match &e { + Error::Io(io_err) => match io_err.kind() { std::io::ErrorKind::NotFound => Ok(()), _ => Err(e), }, @@ -1557,7 +1557,7 @@ impl MaybeDeltaTrieDestroyErrors { let e1 = self.delta_trie_destroy_error_1.take().map(|e| Box::new(e)); let e2 = self.delta_trie_destroy_error_2.take().map(|e| Box::new(e)); if e1.is_some() || e2.is_some() { - Err(ErrorKind::DeltaMPTDestroyErrors(e1, e2).into()) + Err(Error::DeltaMPTDestroyErrors { e1, e2 }.into()) } else { Ok(()) } diff --git a/crates/dbs/storage/src/lib.rs b/crates/dbs/storage/src/lib.rs index ad4691eea..4da34d680 100644 --- a/crates/dbs/storage/src/lib.rs +++ b/crates/dbs/storage/src/lib.rs @@ -4,13 +4,11 @@ // TODO: check them again and reason about the safety of each usage. #![allow(clippy::mut_from_ref, clippy::cast_ref_to_mut, clippy::drop_ref)] -// Recursion limit raised for error_chain -#![recursion_limit = "512"] #![allow(deprecated)] //extern crate futures; #[macro_use] -extern crate error_chain; +extern crate cfx_util_macros; #[macro_use] extern crate lazy_static; #[macro_use] @@ -200,7 +198,7 @@ pub use self::{ impls::{ defaults, delta_mpt::*, - errors::{Error, ErrorKind, Result}, + errors::{Error, Result}, merkle_patricia_trie::{ mpt_cursor::rlp_key_value_len, simple_mpt::*, trie_proof::TrieProofNode, CompressedPathRaw, KVInserter, From 23a586365a3ce9b410926c2630481ddb4243deb4 Mon Sep 17 00:00:00 2001 From: Pana Date: Tue, 19 Nov 2024 17:14:46 +0800 Subject: [PATCH 31/31] fix test broken --- tests/full_node_tests/remove_old_eras_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/full_node_tests/remove_old_eras_test.py b/tests/full_node_tests/remove_old_eras_test.py index e7d91631d..541826d7d 100755 --- a/tests/full_node_tests/remove_old_eras_test.py +++ b/tests/full_node_tests/remove_old_eras_test.py @@ -71,9 +71,9 @@ def run_test(self): self.log.info(f"checking existing blocks...") # TODO We can use `num_blocks` here if we can guarantee all blocks form a chain. - latest_epoch = self.rpc[ARCHIVE_NODE].epoch_number() + latest_epoch = self.rpc[ARCHIVE_NODE].epoch_number("latest_state") # we expect the last few eras are not removed - for epoch in range(8 * ERA_EPOCH_COUNT, latest_epoch+1): + for epoch in range(8 * ERA_EPOCH_COUNT, latest_epoch): archive_block = self.rpc[ARCHIVE_NODE].block_by_epoch(hex(epoch), include_txs=True) assert(archive_block != None)