From ccd7faa01f28bbb8eee04f045883870c8a15e364 Mon Sep 17 00:00:00 2001 From: Fuyao Zhao Date: Sat, 2 Mar 2024 12:05:30 -0700 Subject: [PATCH] disable storage range for now --- runtime/astar/src/lib.rs | 22 +- runtime/local/src/lib.rs | 22 +- runtime/shibuya/src/lib.rs | 22 +- runtime/shiden/src/lib.rs | 22 +- vendor/rpc-core/debug/src/lib.rs | 18 +- vendor/rpc/debug/src/lib.rs | 356 +++++++++++++++---------------- 6 files changed, 231 insertions(+), 231 deletions(-) diff --git a/runtime/astar/src/lib.rs b/runtime/astar/src/lib.rs index f4c1e94dc8..6cadb8a5eb 100644 --- a/runtime/astar/src/lib.rs +++ b/runtime/astar/src/lib.rs @@ -1413,17 +1413,17 @@ impl_runtime_apis! { pallet_evm::AccountStorages::::get(address, H256::from_slice(&tmp[..])) } - fn storage_range_at(address: H160, start_key: H256, limit: u64) -> (Vec<(H256, H256)>, Option) { - let iter = pallet_evm::AccountStorages::::iter_prefix_from(address, start_key.as_bytes().to_vec()); - let mut res: Vec<(H256, H256)> = vec![]; - for (key, value) in iter { - if res.len() == limit as usize { - return (res, Some(key)); - } - res.push((key, value)); - } - return (res, None); - } + // fn storage_range_at(address: H160, start_key: H256, limit: u64) -> (Vec<(H256, H256)>, Option) { + // let iter = pallet_evm::AccountStorages::::iter_prefix_from(address, start_key.as_bytes().to_vec()); + // let mut res: Vec<(H256, H256)> = vec![]; + // for (key, value) in iter { + // if res.len() == limit as usize { + // return (res, Some(key)); + // } + // res.push((key, value)); + // } + // return (res, None); + // } fn call( from: H160, diff --git a/runtime/local/src/lib.rs b/runtime/local/src/lib.rs index a965987ca2..674b3e2ef2 100644 --- a/runtime/local/src/lib.rs +++ b/runtime/local/src/lib.rs @@ -1451,17 +1451,17 @@ impl_runtime_apis! { pallet_evm::AccountStorages::::get(address, H256::from_slice(&tmp[..])) } - fn storage_range_at(address: H160, start_key: H256, limit: u64) -> (Vec<(H256, H256)>, Option) { - let iter = pallet_evm::AccountStorages::::iter_prefix_from(address, start_key.as_bytes().to_vec()); - let mut res: Vec<(H256, H256)> = vec![]; - for (key, value) in iter { - if res.len() == limit as usize { - return (res, Some(key)); - } - res.push((key, value)); - } - return (res, None); - } + // fn storage_range_at(address: H160, start_key: H256, limit: u64) -> (Vec<(H256, H256)>, Option) { + // let iter = pallet_evm::AccountStorages::::iter_prefix_from(address, start_key.as_bytes().to_vec()); + // let mut res: Vec<(H256, H256)> = vec![]; + // for (key, value) in iter { + // if res.len() == limit as usize { + // return (res, Some(key)); + // } + // res.push((key, value)); + // } + // return (res, None); + // } fn call( from: H160, diff --git a/runtime/shibuya/src/lib.rs b/runtime/shibuya/src/lib.rs index 2f9dc6c8a8..531193b46c 100644 --- a/runtime/shibuya/src/lib.rs +++ b/runtime/shibuya/src/lib.rs @@ -1629,17 +1629,17 @@ impl_runtime_apis! { pallet_evm::AccountStorages::::get(address, H256::from_slice(&tmp[..])) } - fn storage_range_at(address: H160, start_key: H256, limit: u64) -> (Vec<(H256, H256)>, Option) { - let iter = pallet_evm::AccountStorages::::iter_prefix_from(address, start_key.as_bytes().to_vec()); - let mut res: Vec<(H256, H256)> = vec![]; - for (key, value) in iter { - if res.len() == limit as usize { - return (res, Some(key)); - } - res.push((key, value)); - } - return (res, None); - } + // fn storage_range_at(address: H160, start_key: H256, limit: u64) -> (Vec<(H256, H256)>, Option) { + // let iter = pallet_evm::AccountStorages::::iter_prefix_from(address, start_key.as_bytes().to_vec()); + // let mut res: Vec<(H256, H256)> = vec![]; + // for (key, value) in iter { + // if res.len() == limit as usize { + // return (res, Some(key)); + // } + // res.push((key, value)); + // } + // return (res, None); + // } fn call( from: H160, diff --git a/runtime/shiden/src/lib.rs b/runtime/shiden/src/lib.rs index 881ff8620b..53fec46638 100644 --- a/runtime/shiden/src/lib.rs +++ b/runtime/shiden/src/lib.rs @@ -1391,17 +1391,17 @@ impl_runtime_apis! { pallet_evm::AccountStorages::::get(address, H256::from_slice(&tmp[..])) } - fn storage_range_at(address: H160, start_key: H256, limit: u64) -> (Vec<(H256, H256)>, Option) { - let iter = pallet_evm::AccountStorages::::iter_prefix_from(address, start_key.as_bytes().to_vec()); - let mut res: Vec<(H256, H256)> = vec![]; - for (key, value) in iter { - if res.len() == limit as usize { - return (res, Some(key)); - } - res.push((key, value)); - } - return (res, None); - } + // fn storage_range_at(address: H160, start_key: H256, limit: u64) -> (Vec<(H256, H256)>, Option) { + // let iter = pallet_evm::AccountStorages::::iter_prefix_from(address, start_key.as_bytes().to_vec()); + // let mut res: Vec<(H256, H256)> = vec![]; + // for (key, value) in iter { + // if res.len() == limit as usize { + // return (res, Some(key)); + // } + // res.push((key, value)); + // } + // return (res, None); + // } fn call( from: H160, diff --git a/vendor/rpc-core/debug/src/lib.rs b/vendor/rpc-core/debug/src/lib.rs index b031d4a339..f54eb81f9e 100644 --- a/vendor/rpc-core/debug/src/lib.rs +++ b/vendor/rpc-core/debug/src/lib.rs @@ -63,13 +63,13 @@ pub trait Debug { id: RequestBlockId, params: Option, ) -> RpcResult>; - #[method(name = "debug_storageRangeAt")] - async fn storage_range_at( - &self, - block_hash: H256, - tx_index: u64, - address: H160, - start_key: H256, - limit: u64, - ) -> RpcResult; + // #[method(name = "debug_storageRangeAt")] + // async fn storage_range_at( + // &self, + // block_hash: H256, + // tx_index: u64, + // address: H160, + // start_key: H256, + // limit: u64, + // ) -> RpcResult; } diff --git a/vendor/rpc/debug/src/lib.rs b/vendor/rpc/debug/src/lib.rs index 3528193cd9..161696d603 100644 --- a/vendor/rpc/debug/src/lib.rs +++ b/vendor/rpc/debug/src/lib.rs @@ -133,47 +133,47 @@ impl DebugServer for Debug { }) } - async fn storage_range_at( - &self, - block_hash: H256, - tx_index: u64, - address: H160, - start_key: H256, - limit: u64, - ) -> RpcResult { - let requester = self.requester.clone(); - - let (tx, rx) = oneshot::channel(); - // Send a message from the rpc handler to the service level task. - requester - .unbounded_send(( - ( - RequesterInput::StorageRange(StorageRangeParam { - block_hash, - tx_index, - address, - start_key, - limit, - }), - None, - ), - tx, - )) - .map_err(|err| { - internal_err(format!( - "failed to send request to debug service : {:?}", - err - )) - })?; - - // Receive a message from the service level task and send the rpc response. - rx.await - .map_err(|err| internal_err(format!("debug service dropped the channel : {:?}", err)))? - .map(|res| match res { - Response::StorageRange(res) => res, - _ => unreachable!(), - }) - } + // async fn storage_range_at( + // &self, + // block_hash: H256, + // tx_index: u64, + // address: H160, + // start_key: H256, + // limit: u64, + // ) -> RpcResult { + // let requester = self.requester.clone(); + + // let (tx, rx) = oneshot::channel(); + // // Send a message from the rpc handler to the service level task. + // requester + // .unbounded_send(( + // ( + // RequesterInput::StorageRange(StorageRangeParam { + // block_hash, + // tx_index, + // address, + // start_key, + // limit, + // }), + // None, + // ), + // tx, + // )) + // .map_err(|err| { + // internal_err(format!( + // "failed to send request to debug service : {:?}", + // err + // )) + // })?; + + // // Receive a message from the service level task and send the rpc response. + // rx.await + // .map_err(|err| internal_err(format!("debug service dropped the channel : {:?}", err)))? + // .map(|res| match res { + // Response::StorageRange(res) => res, + // _ => unreachable!(), + // }) + // } } pub struct DebugHandler(PhantomData<(B, C, BE)>); @@ -279,39 +279,39 @@ where ); }); } - Some(((RequesterInput::StorageRange(storage_range), ..), response_tx)) => { - let client = client.clone(); - let backend = backend.clone(); - let frontier_backend = frontier_backend.clone(); - let permit_pool = permit_pool.clone(); - let overrides = overrides.clone(); - - tokio::task::spawn(async move { - let _ = response_tx.send( - async { - let _permit = permit_pool.acquire().await; - - tokio::task::spawn_blocking(move || { - Self::handle_storage_range_request( - client.clone(), - backend.clone(), - frontier_backend.clone(), - storage_range, - overrides.clone(), - ) - }) - .await - .map_err(|e| { - internal_err(format!( - "Internal error on spawned task : {:?}", - e - )) - })? - } - .await, - ); - }); - } + // Some(((RequesterInput::StorageRange(storage_range), ..), response_tx)) => { + // let client = client.clone(); + // let backend = backend.clone(); + // let frontier_backend = frontier_backend.clone(); + // let permit_pool = permit_pool.clone(); + // let overrides = overrides.clone(); + + // tokio::task::spawn(async move { + // let _ = response_tx.send( + // async { + // let _permit = permit_pool.acquire().await; + + // tokio::task::spawn_blocking(move || { + // Self::handle_storage_range_request( + // client.clone(), + // backend.clone(), + // frontier_backend.clone(), + // storage_range, + // overrides.clone(), + // ) + // }) + // .await + // .map_err(|e| { + // internal_err(format!( + // "Internal error on spawned task : {:?}", + // e + // )) + // })? + // } + // .await, + // ); + // }); + // } _ => {} } } @@ -761,108 +761,108 @@ where Err(internal_err("Runtime block call failed".to_string())) } - fn handle_storage_range_request( - client: Arc, - backend: Arc, - frontier_backend: Arc + Send + Sync>, - storage_range: StorageRangeParam, - overrides: Arc>, - ) -> RpcResult { - let reference_id = - match futures::executor::block_on(frontier_backend_client::load_hash::( - client.as_ref(), - frontier_backend.as_ref(), - storage_range.block_hash, - )) { - Ok(Some(hash)) => BlockId::Hash(hash), - Ok(_) => return Err(internal_err("Block hash not found".to_string())), - Err(e) => return Err(e), - }; - - // Get ApiRef. This handle allow to keep changes between txs in an internal buffer. - let api = client.runtime_api(); - // Get Blockchain backend - let blockchain = backend.blockchain(); - // Get the header I want to work with. - let Ok(hash) = client.expect_block_hash_from_id(&reference_id) else { - return Err(internal_err("Block header not found")) - }; - let header = match client.header(hash) { - Ok(Some(h)) => h, - _ => return Err(internal_err("Block header not found")), - }; - - // Get parent blockid. - let parent_block_hash = *header.parent_hash(); - - let schema = fc_storage::onchain_storage_schema::(client.as_ref(), hash); - - // Using storage overrides we align with `:ethereum_schema` which will result in proper - // SCALE decoding in case of migration. - let statuses = match overrides.schemas.get(&schema) { - Some(schema) => schema - .current_transaction_statuses(hash) - .unwrap_or_default(), - _ => { - return Err(internal_err(format!( - "No storage override at {:?}", - reference_id - ))) - } - }; - - // // Known ethereum transaction hashes. - // let eth_tx_hashes: Vec<_> = statuses.iter().map(|t| t.transaction_hash).collect(); - - // Get block extrinsics. - let exts = blockchain - .body(hash) - .map_err(|e| internal_err(format!("Fail to read blockchain db: {:?}", e)))? - .unwrap_or_default(); - - api.initialize_block(parent_block_hash, &header) - .map_err(|e| internal_err(format!("Runtime api access error: {:?}", e)))?; - if storage_range.tx_index as usize >= statuses.len() { - panic!("tx index is too large"); - } - let to = exts.len() - (statuses.len() - storage_range.tx_index as usize); - let mut i = 0; - for ext in exts { - if i >= to { - break; - } - i = i + 1; - let _ = api.apply_extrinsic(parent_block_hash, ext); - } - - let version = api.version(parent_block_hash).expect("has version"); - let res = api.storage_range_at( - parent_block_hash, - storage_range.address, - storage_range.start_key, - storage_range.limit, - ); - match res { - Ok((storages, next_key)) => { - let mut result = StorageRangeResult { - storage: Default::default(), - next_key: next_key, - }; - - for (key, value) in storages { - let key_hash = H256::from_slice(Keccak256::digest(key.as_bytes()).as_slice()); - result.storage.insert(key_hash, StorageEntry { key, value }); - } - - return Ok(Response::StorageRange(result)); - } - Err(e) => { - return Err(internal_err(format!( - "{} for version {}", - e.to_string(), - version.spec_version - ))); - } - } - } + // fn handle_storage_range_request( + // client: Arc, + // backend: Arc, + // frontier_backend: Arc + Send + Sync>, + // storage_range: StorageRangeParam, + // overrides: Arc>, + // ) -> RpcResult { + // let reference_id = + // match futures::executor::block_on(frontier_backend_client::load_hash::( + // client.as_ref(), + // frontier_backend.as_ref(), + // storage_range.block_hash, + // )) { + // Ok(Some(hash)) => BlockId::Hash(hash), + // Ok(_) => return Err(internal_err("Block hash not found".to_string())), + // Err(e) => return Err(e), + // }; + + // // Get ApiRef. This handle allow to keep changes between txs in an internal buffer. + // let api = client.runtime_api(); + // // Get Blockchain backend + // let blockchain = backend.blockchain(); + // // Get the header I want to work with. + // let Ok(hash) = client.expect_block_hash_from_id(&reference_id) else { + // return Err(internal_err("Block header not found")) + // }; + // let header = match client.header(hash) { + // Ok(Some(h)) => h, + // _ => return Err(internal_err("Block header not found")), + // }; + + // // Get parent blockid. + // let parent_block_hash = *header.parent_hash(); + + // let schema = fc_storage::onchain_storage_schema::(client.as_ref(), hash); + + // // Using storage overrides we align with `:ethereum_schema` which will result in proper + // // SCALE decoding in case of migration. + // let statuses = match overrides.schemas.get(&schema) { + // Some(schema) => schema + // .current_transaction_statuses(hash) + // .unwrap_or_default(), + // _ => { + // return Err(internal_err(format!( + // "No storage override at {:?}", + // reference_id + // ))) + // } + // }; + + // // // Known ethereum transaction hashes. + // // let eth_tx_hashes: Vec<_> = statuses.iter().map(|t| t.transaction_hash).collect(); + + // // Get block extrinsics. + // let exts = blockchain + // .body(hash) + // .map_err(|e| internal_err(format!("Fail to read blockchain db: {:?}", e)))? + // .unwrap_or_default(); + + // api.initialize_block(parent_block_hash, &header) + // .map_err(|e| internal_err(format!("Runtime api access error: {:?}", e)))?; + // if storage_range.tx_index as usize >= statuses.len() { + // panic!("tx index is too large"); + // } + // let to = exts.len() - (statuses.len() - storage_range.tx_index as usize); + // let mut i = 0; + // for ext in exts { + // if i >= to { + // break; + // } + // i = i + 1; + // let _ = api.apply_extrinsic(parent_block_hash, ext); + // } + + // let version = api.version(parent_block_hash).expect("has version"); + // let res = api.storage_range_at( + // parent_block_hash, + // storage_range.address, + // storage_range.start_key, + // storage_range.limit, + // ); + // match res { + // Ok((storages, next_key)) => { + // let mut result = StorageRangeResult { + // storage: Default::default(), + // next_key: next_key, + // }; + + // for (key, value) in storages { + // let key_hash = H256::from_slice(Keccak256::digest(key.as_bytes()).as_slice()); + // result.storage.insert(key_hash, StorageEntry { key, value }); + // } + + // return Ok(Response::StorageRange(result)); + // } + // Err(e) => { + // return Err(internal_err(format!( + // "{} for version {}", + // e.to_string(), + // version.spec_version + // ))); + // } + // } + // } }