From 2ff6312f76b717df109e44d6e25f9b43cf872db0 Mon Sep 17 00:00:00 2001 From: fishseabowl Date: Mon, 25 Mar 2024 15:42:05 -0700 Subject: [PATCH 1/5] fetch non-state data --- crates/katana/primitives/src/event.rs | 5 + crates/katana/primitives/src/transaction.rs | 1 + .../provider/src/providers/fork/backend.rs | 179 +++++++++++++++++- 3 files changed, 184 insertions(+), 1 deletion(-) diff --git a/crates/katana/primitives/src/event.rs b/crates/katana/primitives/src/event.rs index ebe08293cd..24829a51ce 100644 --- a/crates/katana/primitives/src/event.rs +++ b/crates/katana/primitives/src/event.rs @@ -1,6 +1,11 @@ use core::fmt; use std::num::ParseIntError; +pub type ChunkSize = u64; +pub type EventContinuationToken = Option; +pub type EventFilter = starknet::core::types::EventFilter; +pub type EventsPage = starknet::core::types::EventsPage; + #[derive(PartialEq, Eq, Debug, Default)] pub struct ContinuationToken { pub block_n: u64, diff --git a/crates/katana/primitives/src/transaction.rs b/crates/katana/primitives/src/transaction.rs index 76b3c4671b..61de2cb735 100644 --- a/crates/katana/primitives/src/transaction.rs +++ b/crates/katana/primitives/src/transaction.rs @@ -18,6 +18,7 @@ use crate::{utils, FieldElement}; pub type TxHash = FieldElement; /// The sequential number for all the transactions.. pub type TxNumber = u64; +pub type Transaction = starknet::core::types::Transaction; #[derive(Debug, Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] diff --git a/crates/katana/storage/provider/src/providers/fork/backend.rs b/crates/katana/storage/provider/src/providers/fork/backend.rs index 41f92f63f2..3211ba4507 100644 --- a/crates/katana/storage/provider/src/providers/fork/backend.rs +++ b/crates/katana/storage/provider/src/providers/fork/backend.rs @@ -9,7 +9,7 @@ use futures::channel::mpsc::{channel, Receiver, SendError, Sender}; use futures::future::BoxFuture; use futures::stream::Stream; use futures::{Future, FutureExt}; -use katana_primitives::block::BlockHashOrNumber; +use katana_primitives::block::{BlockHashOrNumber, BlockIdOrTag}; use katana_primitives::contract::{ ClassHash, CompiledClassHash, CompiledContractClass, ContractAddress, FlattenedSierraClass, GenericContractInfo, Nonce, StorageKey, StorageValue, @@ -18,6 +18,10 @@ use katana_primitives::conversion::rpc::{ compiled_class_hash_from_flattened_sierra_class, flattened_sierra_to_compiled_class, legacy_rpc_to_inner_compiled_class, }; +use katana_primitives::event::{ + ChunkSize, EventContinuationToken, EventFilter, EventsPage, +}; +use katana_primitives::transaction::{Transaction, TxHash, TxNumber}; use katana_primitives::FieldElement; use parking_lot::Mutex; use starknet::core::types::{BlockId, ContractClass, StarknetError}; @@ -35,6 +39,14 @@ type GetNonceResult = Result; type GetStorageResult = Result; type GetClassHashAtResult = Result; type GetClassAtResult = Result; +type GetEventResult = Result; +type GetBlockWithTxHashesResult = + Result; +type GetBlockWithTxsResult = + Result; +type GetTransactionResult = Result; +type GetTransactionReceiptResult = + Result; #[derive(Debug, thiserror::Error)] pub enum ForkedBackendError { @@ -60,6 +72,12 @@ pub enum BackendRequest { GetNonce(ContractAddress, OneshotSender), GetClassHashAt(ContractAddress, OneshotSender), GetStorage(ContractAddress, StorageKey, OneshotSender), + GetEvents(EventFilter, EventContinuationToken, ChunkSize, OneshotSender), + GetBlockWithTxHash(BlockIdOrTag, OneshotSender), + GetBlockWithTxs(BlockIdOrTag, OneshotSender), + GetTransactionByBlockIdAndIndex(BlockIdOrTag, TxNumber, OneshotSender), + GetTransactionByHash(TxHash, OneshotSender), + GetTransactionReceipt(TxHash, OneshotSender), } type BackendRequestFuture = BoxFuture<'static, ()>; @@ -142,6 +160,84 @@ impl Backend { self.pending_requests.push(fut); } + + BackendRequest::GetEvents(filter, continuation_token, chunks_size, sender) => { + let fut = Box::pin(async move { + let res = provider + .get_events(filter, continuation_token, chunks_size) + .await + .map_err(ForkedBackendError::StarknetProvider); + + sender.send(res).expect("failed to send events result") + }); + + self.pending_requests.push(fut); + } + + BackendRequest::GetBlockWithTxHash(block_id, sender) => { + let fut = Box::pin(async move { + let res = provider + .get_block_with_tx_hashes(block_id) + .await + .map_err(ForkedBackendError::StarknetProvider); + + sender.send(res).expect("failed to send block result") + }); + + self.pending_requests.push(fut); + } + + BackendRequest::GetBlockWithTxs(block_id, sender) => { + let fut = Box::pin(async move { + let res = provider + .get_block_with_txs(block_id) + .await + .map_err(ForkedBackendError::StarknetProvider); + + sender.send(res).expect("failed to send block result") + }); + + self.pending_requests.push(fut); + } + + BackendRequest::GetTransactionByBlockIdAndIndex(block_id, index, sender) => { + let fut = Box::pin(async move { + let res = provider + .get_transaction_by_block_id_and_index(block_id, index) + .await + .map_err(ForkedBackendError::StarknetProvider); + + sender.send(res).expect("failed to send transaction result") + }); + + self.pending_requests.push(fut); + } + + BackendRequest::GetTransactionByHash(transaction_hash, sender) => { + let fut = Box::pin(async move { + let res = provider + .get_transaction_by_hash(transaction_hash) + .await + .map_err(ForkedBackendError::StarknetProvider); + + sender.send(res).expect("failed to send transaction result") + }); + + self.pending_requests.push(fut); + } + + BackendRequest::GetTransactionReceipt(transaction_hash, sender) => { + let fut = Box::pin(async move { + let res = provider + .get_transaction_receipt(transaction_hash) + .await + .map_err(ForkedBackendError::StarknetProvider); + + sender.send(res).expect("failed to send transaction result") + }); + + self.pending_requests.push(fut); + } } } } @@ -313,6 +409,87 @@ impl ForkedBackend { } } } + + pub fn do_get_events( + &self, + filter: EventFilter, + continuation_token: Option, + chunks_size: ChunkSize, + ) -> Result { + trace!(target: "forked_backend", "requesting evetns at filter{filter:#?}, continuation_token {continuation_token:#?}, and chunks_size {chunks_size:#?} "); + let (sender, rx) = oneshot(); + self.0 + .lock() + .try_send(BackendRequest::GetEvents(filter, continuation_token, chunks_size, sender)) + .map_err(|e| e.into_send_error())?; + rx.recv()? + } + + pub fn do_get_block_with_tx_hashes( + &self, + block_id: BlockIdOrTag, + ) -> Result { + trace!(target: "forked_backend", "requesting block with tx_hashes at block {block_id:#?} "); + let (sender, rx) = oneshot(); + self.0 + .lock() + .try_send(BackendRequest::GetBlockWithTxHash(block_id, sender)) + .map_err(|e| e.into_send_error())?; + rx.recv()? + } + + pub fn do_get_block_with_txs( + &self, + block_id: BlockIdOrTag, + ) -> Result { + trace!(target: "forked_backend", "requesting block with txs at block {block_id:#?} "); + let (sender, rx) = oneshot(); + self.0 + .lock() + .try_send(BackendRequest::GetBlockWithTxs(block_id, sender)) + .map_err(|e| e.into_send_error())?; + rx.recv()? + } + + pub fn do_get_transaction_by_block_id_and_index( + &self, + block_id: BlockIdOrTag, + index: TxNumber, + ) -> Result { + trace!(target: "forked_backend", "requesting transaction at block {block_id:#?}, index {index:#?}"); + let (sender, rx) = oneshot(); + self.0 + .lock() + .try_send(BackendRequest::GetTransactionByBlockIdAndIndex(block_id, index, sender)) + .map_err(|e| e.into_send_error())?; + rx.recv()? + } + + pub fn do_get_transaction_by_hash( + &self, + transaction_hash: TxHash + ) -> Result { + trace!(target: "forked_backend", "requesting transaction at trasanction hash {transaction_hash:#?} "); + let (sender, rx) = oneshot(); + self.0 + .lock() + .try_send(BackendRequest::GetTransactionByHash(transaction_hash, sender)) + .map_err(|e| e.into_send_error())?; + rx.recv()? + } + + pub fn do_get_transaction_receipt( + &self, + transaction_hash: TxHash + ) -> Result { + trace!(target: "forked_backend", "requesting transaction receipt at trasanction hash {transaction_hash:#?} "); + let (sender, rx) = oneshot(); + self.0 + .lock() + .try_send(BackendRequest::GetTransactionReceipt(transaction_hash, sender)) + .map_err(|e| e.into_send_error())?; + rx.recv()? + } } /// A shared cache that stores data fetched from the forked network. From ce984080da2dc31f3baee80c0ded84b9b430734e Mon Sep 17 00:00:00 2001 From: fishseabowl Date: Thu, 4 Apr 2024 19:07:19 -0700 Subject: [PATCH 2/5] Add test case --- .../provider/src/providers/fork/backend.rs | 96 ++++++++++++++----- 1 file changed, 71 insertions(+), 25 deletions(-) diff --git a/crates/katana/storage/provider/src/providers/fork/backend.rs b/crates/katana/storage/provider/src/providers/fork/backend.rs index 3211ba4507..34c49df906 100644 --- a/crates/katana/storage/provider/src/providers/fork/backend.rs +++ b/crates/katana/storage/provider/src/providers/fork/backend.rs @@ -18,9 +18,7 @@ use katana_primitives::conversion::rpc::{ compiled_class_hash_from_flattened_sierra_class, flattened_sierra_to_compiled_class, legacy_rpc_to_inner_compiled_class, }; -use katana_primitives::event::{ - ChunkSize, EventContinuationToken, EventFilter, EventsPage, -}; +use katana_primitives::event::{ChunkSize, EventContinuationToken, EventFilter, EventsPage}; use katana_primitives::transaction::{Transaction, TxHash, TxNumber}; use katana_primitives::FieldElement; use parking_lot::Mutex; @@ -48,6 +46,8 @@ type GetTransactionResult = Result; type GetTransactionReceiptResult = Result; +const FORKED_BACKEND: &str = "forked_backend"; + #[derive(Debug, thiserror::Error)] pub enum ForkedBackendError { #[error("Failed to send request to the forked backend: {0}")] @@ -314,7 +314,7 @@ impl ForkedBackend { .block_on(backend); })?; - trace!(target: "forked_backend", "fork backend thread spawned"); + trace!(target: FORKED_BACKEND, "fork backend thread spawned"); Ok(handler) } @@ -344,7 +344,7 @@ impl ForkedBackend { &self, contract_address: ContractAddress, ) -> Result { - trace!(target: "forked_backend", "requesting nonce for contract address {contract_address}"); + trace!(target: FORKED_BACKEND, "requesting nonce for contract address {contract_address}"); let (sender, rx) = oneshot(); self.0 .lock() @@ -358,7 +358,7 @@ impl ForkedBackend { contract_address: ContractAddress, key: StorageKey, ) -> Result { - trace!(target: "forked_backend", "requesting storage for address {contract_address} at key {key:#x}" ); + trace!(target: FORKED_BACKEND, "requesting storage for address {contract_address} at key {key:#x}" ); let (sender, rx) = oneshot(); self.0 .lock() @@ -371,7 +371,7 @@ impl ForkedBackend { &self, contract_address: ContractAddress, ) -> Result { - trace!(target: "forked_backend", "requesting class hash at address {contract_address}"); + trace!(target: FORKED_BACKEND, "requesting class hash at address {contract_address}"); let (sender, rx) = oneshot(); self.0 .lock() @@ -384,7 +384,7 @@ impl ForkedBackend { &self, class_hash: ClassHash, ) -> Result { - trace!(target: "forked_backend", "requesting class at hash {class_hash:#x}"); + trace!(target: FORKED_BACKEND, "requesting class at hash {class_hash:#x}"); let (sender, rx) = oneshot(); self.0 .lock() @@ -397,7 +397,7 @@ impl ForkedBackend { &self, class_hash: ClassHash, ) -> Result { - trace!(target: "forked_backend", "requesting compiled class hash at class {class_hash:#x}"); + trace!(target: FORKED_BACKEND, "requesting compiled class hash at class {class_hash:#x}"); let class = self.do_get_class_at(class_hash)?; // if its a legacy class, then we just return back the class hash // else if sierra class, then we have to compile it and compute the compiled class hash. @@ -416,7 +416,7 @@ impl ForkedBackend { continuation_token: Option, chunks_size: ChunkSize, ) -> Result { - trace!(target: "forked_backend", "requesting evetns at filter{filter:#?}, continuation_token {continuation_token:#?}, and chunks_size {chunks_size:#?} "); + trace!(target: FORKED_BACKEND, "requesting evetns at filter{filter:#?}, continuation_token {continuation_token:#?}, and chunks_size {chunks_size:#?} "); let (sender, rx) = oneshot(); self.0 .lock() @@ -429,7 +429,7 @@ impl ForkedBackend { &self, block_id: BlockIdOrTag, ) -> Result { - trace!(target: "forked_backend", "requesting block with tx_hashes at block {block_id:#?} "); + trace!(target: FORKED_BACKEND, "requesting block with tx_hashes at block {block_id:#?} "); let (sender, rx) = oneshot(); self.0 .lock() @@ -442,7 +442,7 @@ impl ForkedBackend { &self, block_id: BlockIdOrTag, ) -> Result { - trace!(target: "forked_backend", "requesting block with txs at block {block_id:#?} "); + trace!(target: FORKED_BACKEND, "requesting block with txs at block {block_id:#?} "); let (sender, rx) = oneshot(); self.0 .lock() @@ -456,7 +456,7 @@ impl ForkedBackend { block_id: BlockIdOrTag, index: TxNumber, ) -> Result { - trace!(target: "forked_backend", "requesting transaction at block {block_id:#?}, index {index:#?}"); + trace!(target: FORKED_BACKEND, "requesting transaction at block {block_id:#?}, index {index:#?}"); let (sender, rx) = oneshot(); self.0 .lock() @@ -467,9 +467,9 @@ impl ForkedBackend { pub fn do_get_transaction_by_hash( &self, - transaction_hash: TxHash + transaction_hash: TxHash, ) -> Result { - trace!(target: "forked_backend", "requesting transaction at trasanction hash {transaction_hash:#?} "); + trace!(target: FORKED_BACKEND, "requesting transaction at trasanction hash {transaction_hash:#?} "); let (sender, rx) = oneshot(); self.0 .lock() @@ -480,9 +480,9 @@ impl ForkedBackend { pub fn do_get_transaction_receipt( &self, - transaction_hash: TxHash + transaction_hash: TxHash, ) -> Result { - trace!(target: "forked_backend", "requesting transaction receipt at trasanction hash {transaction_hash:#?} "); + trace!(target: FORKED_BACKEND, "requesting transaction receipt at trasanction hash {transaction_hash:#?} "); let (sender, rx) = oneshot(); self.0 .lock() @@ -520,7 +520,7 @@ impl StateProvider for SharedStateProvider { } if let Some(nonce) = handle_contract_or_class_not_found_err(self.0.do_get_nonce(address)).map_err(|e| { - error!(target: "forked_backend", "error while fetching nonce of contract {address}: {e}"); + error!(target: FORKED_BACKEND, "error while fetching nonce of contract {address}: {e}"); e })? { self.0.contract_state.write().entry(address).or_default().nonce = nonce; @@ -542,7 +542,7 @@ impl StateProvider for SharedStateProvider { } let value = handle_contract_or_class_not_found_err(self.0.do_get_storage(address, storage_key)).map_err(|e| { - error!(target: "forked_backend", "error while fetching storage value of contract {address} at key {storage_key:#x}: {e}"); + error!(target: FORKED_BACKEND, "error while fetching storage value of contract {address} at key {storage_key:#x}: {e}"); e })?; @@ -565,7 +565,7 @@ impl StateProvider for SharedStateProvider { } if let Some(hash) = handle_contract_or_class_not_found_err(self.0.do_get_class_hash_at(address)).map_err(|e| { - error!(target: "forked_backend", "error while fetching class hash of contract {address}: {e}"); + error!(target: FORKED_BACKEND, "error while fetching class hash of contract {address}: {e}"); e })? { self.0.contract_state.write().entry(address).or_default().class_hash = hash; @@ -584,7 +584,7 @@ impl ContractClassProvider for SharedStateProvider { let Some(class) = handle_contract_or_class_not_found_err(self.0.do_get_class_at(hash)) .map_err(|e| { - error!(target: "forked_backend", "error while fetching sierra class {hash:#x}: {e}"); + error!(target: FORKED_BACKEND, "error while fetching sierra class {hash:#x}: {e}"); e })? else { @@ -615,7 +615,7 @@ impl ContractClassProvider for SharedStateProvider { if let Some(hash) = handle_contract_or_class_not_found_err(self.0.do_get_compiled_class_hash(hash)) .map_err(|e| { - error!(target: "forked_backend", "error while fetching compiled class hash for class hash {hash:#x}: {e}"); + error!(target: FORKED_BACKEND, "error while fetching compiled class hash for class hash {hash:#x}: {e}"); e })? { @@ -633,7 +633,7 @@ impl ContractClassProvider for SharedStateProvider { let Some(class) = handle_contract_or_class_not_found_err(self.0.do_get_class_at(hash)) .map_err(|e| { - error!(target: "forked_backend", "error while fetching class {hash:#x}: {e}"); + error!(target: FORKED_BACKEND, "error while fetching class {hash:#x}: {e}"); e })? else { @@ -643,7 +643,7 @@ impl ContractClassProvider for SharedStateProvider { let (class_hash, compiled_class_hash, casm, sierra) = match class { ContractClass::Legacy(class) => { let (_, compiled_class) = legacy_rpc_to_inner_compiled_class(&class).map_err(|e| { - error!(target: "forked_backend", "error while parsing legacy class {hash:#x}: {e}"); + error!(target: FORKED_BACKEND, "error while parsing legacy class {hash:#x}: {e}"); ProviderError::ParsingError(e.to_string()) })?; @@ -652,7 +652,7 @@ impl ContractClassProvider for SharedStateProvider { ContractClass::Sierra(sierra_class) => { let (_, compiled_class_hash, compiled_class) = flattened_sierra_to_compiled_class(&sierra_class).map_err(|e|{ - error!(target: "forked_backend", "error while parsing sierra class {hash:#x}: {e}"); + error!(target: FORKED_BACKEND, "error while parsing sierra class {hash:#x}: {e}"); ProviderError::ParsingError(e.to_string()) })?; @@ -817,4 +817,50 @@ mod tests { assert_eq!(class_hash, class_hash_in_cache, "value must be stored in cache"); assert_eq!(storage_value, storage_value_in_cache, "value must be stored in cache"); } + + #[test] + fn fetch_non_state_data_from_fork() { + let (backend, _) = create_forked_backend(LOCAL_RPC_URL.into(), 1); + + assert!(backend + .do_get_events( + EventFilter { + from_block: Some(BlockId::Number(0)), + to_block: Some(BlockId::Number(5)), + address: None, + keys: None, + }, + Some("0,100,0".into()), + 100, + ) + .is_ok()); + + assert!(backend + .do_get_block_with_tx_hashes(starknet::core::types::BlockId::Number(0)) + .is_ok()); + + assert!(backend.do_get_block_with_txs(starknet::core::types::BlockId::Number(0)).is_ok()); + + assert!(backend + .do_get_transaction_by_block_id_and_index(starknet::core::types::BlockId::Number(0), 1) + .is_ok()); + + assert!(backend + .do_get_transaction_by_hash( + FieldElement::from_hex_be( + "0x41a78e741e5af2fec34b695679bc6891742439f7afb8484ecd7766661ad02bf", + ) + .unwrap() + ) + .is_err()); + + assert!(backend + .do_get_transaction_receipt( + FieldElement::from_hex_be( + "0x41a78e741e5af2fec34b695679bc6891742439f7afb8484ecd7766661ad02bf", + ) + .unwrap() + ) + .is_err()); + } } From 01d857a0734ef49fe5445ee450ffd0ce4ed2b854 Mon Sep 17 00:00:00 2001 From: fishseabowl Date: Sun, 7 Apr 2024 12:23:23 -0600 Subject: [PATCH 3/5] Update EventFilter test parameters --- .../provider/src/providers/fork/backend.rs | 89 ++++++++++--------- 1 file changed, 47 insertions(+), 42 deletions(-) diff --git a/crates/katana/storage/provider/src/providers/fork/backend.rs b/crates/katana/storage/provider/src/providers/fork/backend.rs index 1dc55e07a0..a1c2e473f5 100644 --- a/crates/katana/storage/provider/src/providers/fork/backend.rs +++ b/crates/katana/storage/provider/src/providers/fork/backend.rs @@ -9,7 +9,6 @@ use futures::channel::mpsc::{channel, Receiver, SendError, Sender}; use futures::future::BoxFuture; use futures::stream::Stream; use futures::{Future, FutureExt}; - use katana_primitives::block::{BlockHashOrNumber, BlockIdOrTag}; use katana_primitives::class::{ClassHash, CompiledClass, CompiledClassHash, FlattenedSierraClass}; use katana_primitives::contract::{ @@ -47,8 +46,6 @@ type GetTransactionResult = Result; type GetTransactionReceiptResult = Result; -const FORKED_BACKEND: &str = "forked_backend"; - pub(crate) const LOG_TARGET: &str = "forked_backend"; #[derive(Debug, thiserror::Error)] @@ -433,7 +430,7 @@ impl ForkedBackend { continuation_token: Option, chunks_size: ChunkSize, ) -> Result { - trace!(target: FORKED_BACKEND, "requesting evetns at filter{filter:#?}, continuation_token {continuation_token:#?}, and chunks_size {chunks_size:#?} "); + trace!(target: LOG_TARGET, "requesting evetns at filter{filter:#?}, continuation_token {continuation_token:#?}, and chunks_size {chunks_size:#?} "); let (sender, rx) = oneshot(); self.0 .lock() @@ -446,7 +443,7 @@ impl ForkedBackend { &self, block_id: BlockIdOrTag, ) -> Result { - trace!(target: FORKED_BACKEND, "requesting block with tx_hashes at block {block_id:#?} "); + trace!(target: LOG_TARGET, "requesting block with tx_hashes at block {block_id:#?} "); let (sender, rx) = oneshot(); self.0 .lock() @@ -459,7 +456,7 @@ impl ForkedBackend { &self, block_id: BlockIdOrTag, ) -> Result { - trace!(target: FORKED_BACKEND, "requesting block with txs at block {block_id:#?} "); + trace!(target: LOG_TARGET, "requesting block with txs at block {block_id:#?} "); let (sender, rx) = oneshot(); self.0 .lock() @@ -473,7 +470,7 @@ impl ForkedBackend { block_id: BlockIdOrTag, index: TxNumber, ) -> Result { - trace!(target: FORKED_BACKEND, "requesting transaction at block {block_id:#?}, index {index:#?}"); + trace!(target: LOG_TARGET, "requesting transaction at block {block_id:#?}, index {index:#?}"); let (sender, rx) = oneshot(); self.0 .lock() @@ -486,7 +483,7 @@ impl ForkedBackend { &self, transaction_hash: TxHash, ) -> Result { - trace!(target: FORKED_BACKEND, "requesting transaction at trasanction hash {transaction_hash:#?} "); + trace!(target: LOG_TARGET, "requesting transaction at trasanction hash {transaction_hash:#?} "); let (sender, rx) = oneshot(); self.0 .lock() @@ -499,7 +496,7 @@ impl ForkedBackend { &self, transaction_hash: TxHash, ) -> Result { - trace!(target: FORKED_BACKEND, "requesting transaction receipt at trasanction hash {transaction_hash:#?} "); + trace!(target: LOG_TARGET, "requesting transaction receipt at trasanction hash {transaction_hash:#?} "); let (sender, rx) = oneshot(); self.0 .lock() @@ -886,45 +883,53 @@ mod tests { fn fetch_non_state_data_from_fork() { let (backend, _) = create_forked_backend(LOCAL_RPC_URL.into(), 1); - assert!(backend - .do_get_events( - EventFilter { - from_block: Some(BlockId::Number(0)), - to_block: Some(BlockId::Number(5)), - address: None, - keys: None, - }, - Some("0,100,0".into()), - 100, - ) - .is_ok()); - - assert!(backend - .do_get_block_with_tx_hashes(starknet::core::types::BlockId::Number(0)) - .is_ok()); + assert!( + backend + .do_get_events( + EventFilter { + from_block: Some(BlockId::Number(0)), + to_block: Some(BlockId::Number(0)), + address: None, + keys: None, + }, + None, + 0, + ) + .is_ok()); - assert!(backend.do_get_block_with_txs(starknet::core::types::BlockId::Number(0)).is_ok()); + assert!( + backend.do_get_block_with_tx_hashes(starknet::core::types::BlockId::Number(0)).is_ok() + ); - assert!(backend - .do_get_transaction_by_block_id_and_index(starknet::core::types::BlockId::Number(0), 1) - .is_ok()); + assert!(backend.do_get_block_with_txs(starknet::core::types::BlockId::Number(0)).is_ok()); - assert!(backend - .do_get_transaction_by_hash( - FieldElement::from_hex_be( - "0x41a78e741e5af2fec34b695679bc6891742439f7afb8484ecd7766661ad02bf", + assert!( + backend + .do_get_transaction_by_block_id_and_index( + starknet::core::types::BlockId::Number(0), + 1 ) + .is_ok()); + + assert!( + backend + .do_get_transaction_by_hash( + FieldElement::from_hex_be( + "0x41a78e741e5af2fec34b695679bc6891742439f7afb8484ecd7766661ad02bf", + ) .unwrap() - ) - .is_err()); + ) + .is_err() + ); - assert!(backend - .do_get_transaction_receipt( - FieldElement::from_hex_be( - "0x41a78e741e5af2fec34b695679bc6891742439f7afb8484ecd7766661ad02bf", + assert!( + backend + .do_get_transaction_receipt( + FieldElement::from_hex_be( + "0x41a78e741e5af2fec34b695679bc6891742439f7afb8484ecd7766661ad02bf", + ) + .unwrap() ) - .unwrap() - ) - .is_err()); + .is_err()); } } From a916fac0090e1322fbd9bff668f818a12c3c6d80 Mon Sep 17 00:00:00 2001 From: fishseabowl Date: Sun, 7 Apr 2024 13:13:26 -0600 Subject: [PATCH 4/5] Refactor event filter parameters --- .../provider/src/providers/fork/backend.rs | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/crates/katana/storage/provider/src/providers/fork/backend.rs b/crates/katana/storage/provider/src/providers/fork/backend.rs index a1c2e473f5..6916ff3cc3 100644 --- a/crates/katana/storage/provider/src/providers/fork/backend.rs +++ b/crates/katana/storage/provider/src/providers/fork/backend.rs @@ -759,7 +759,7 @@ fn handle_contract_or_class_not_found_err( #[cfg(test)] mod tests { - use katana_primitives::block::BlockNumber; + use katana_primitives::block::{BlockNumber, BlockTag}; use katana_primitives::contract::GenericContractInfo; use starknet::macros::felt; use url::Url; @@ -887,15 +887,16 @@ mod tests { backend .do_get_events( EventFilter { - from_block: Some(BlockId::Number(0)), - to_block: Some(BlockId::Number(0)), + from_block: Some(starknet::core::types::BlockId::Tag(BlockTag::Latest)), + to_block: Some(starknet::core::types::BlockId::Tag(BlockTag::Latest)), address: None, keys: None, }, None, - 0, + 10, ) - .is_ok()); + .is_ok() + ); assert!( backend.do_get_block_with_tx_hashes(starknet::core::types::BlockId::Number(0)).is_ok() @@ -909,7 +910,8 @@ mod tests { starknet::core::types::BlockId::Number(0), 1 ) - .is_ok()); + .is_ok() + ); assert!( backend @@ -930,6 +932,7 @@ mod tests { ) .unwrap() ) - .is_err()); + .is_err() + ); } } From 0423f89027a268cd214e3328e5ee17616cd33732 Mon Sep 17 00:00:00 2001 From: fishseabowl Date: Thu, 20 Jun 2024 10:09:46 -0700 Subject: [PATCH 5/5] Update non-state functions --- Cargo.lock | 1 - crates/katana/rpc/rpc-types/Cargo.toml | 1 - crates/katana/storage/provider/Cargo.toml | 1 - .../provider/src/providers/fork/backend.rs | 167 ++++++++++-------- 4 files changed, 90 insertions(+), 80 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 530db77742..2bf83813a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8035,7 +8035,6 @@ dependencies = [ "katana-core", "katana-executor", "katana-primitives", - "katana-provider", "rstest 0.18.2", "serde", "serde_json", diff --git a/crates/katana/rpc/rpc-types/Cargo.toml b/crates/katana/rpc/rpc-types/Cargo.toml index bc351aa0fc..82aba9df66 100644 --- a/crates/katana/rpc/rpc-types/Cargo.toml +++ b/crates/katana/rpc/rpc-types/Cargo.toml @@ -10,7 +10,6 @@ version.workspace = true katana-core.workspace = true katana-executor.workspace = true katana-primitives.workspace = true -katana-provider.workspace = true anyhow.workspace = true derive_more.workspace = true diff --git a/crates/katana/storage/provider/Cargo.toml b/crates/katana/storage/provider/Cargo.toml index b9c0379152..4f1f5f4e41 100644 --- a/crates/katana/storage/provider/Cargo.toml +++ b/crates/katana/storage/provider/Cargo.toml @@ -9,7 +9,6 @@ version.workspace = true [dependencies] katana-db = { workspace = true, features = [ "test-utils" ] } katana-primitives = { workspace = true, features = [ "rpc" ] } - anyhow.workspace = true auto_impl.workspace = true parking_lot.workspace = true diff --git a/crates/katana/storage/provider/src/providers/fork/backend.rs b/crates/katana/storage/provider/src/providers/fork/backend.rs index d2b4eb97fd..a9fa8fffbb 100644 --- a/crates/katana/storage/provider/src/providers/fork/backend.rs +++ b/crates/katana/storage/provider/src/providers/fork/backend.rs @@ -22,7 +22,7 @@ use katana_primitives::event::{ChunkSize, EventContinuationToken, EventFilter, E use katana_primitives::transaction::{Transaction, TxHash, TxNumber}; use katana_primitives::FieldElement; use parking_lot::Mutex; -use starknet::core::types::{BlockId, ContractClass as RpcContractClass, StarknetError}; +use starknet::core::types::{BlockId, ContractClass as RpcContractClass, MaybePendingBlockWithTxHashes, MaybePendingBlockWithTxs, MaybePendingTransactionReceipt, StarknetError}; use starknet::providers::{Provider, ProviderError as StarknetProviderError}; use tracing::{error, info, trace}; @@ -32,20 +32,7 @@ use crate::traits::contract::ContractClassProvider; use crate::traits::state::StateProvider; use crate::ProviderResult; - const LOG_TARGET: &str = "forking::backend"; -type GetNonceResult = Result; -type GetStorageResult = Result; -type GetClassHashAtResult = Result; -type GetClassAtResult = Result; -type GetEventResult = Result; -type GetBlockWithTxHashesResult = - Result; -type GetBlockWithTxsResult = - Result; -type GetTransactionResult = Result; -type GetTransactionReceiptResult = - Result; type BackendResult = Result; @@ -53,6 +40,11 @@ type GetNonceResult = BackendResult; type GetStorageResult = BackendResult; type GetClassHashAtResult = BackendResult; type GetClassAtResult = BackendResult; +type GetEventResult = BackendResult; +type GetBlockWithTxsResult = BackendResult; +type GetBlockWithTxHashesResult = BackendResult; +type GetTransactionResult = BackendResult; +type GetTransactionReceiptResult = BackendResult; #[derive(Debug, thiserror::Error)] pub enum BackendError { @@ -82,12 +74,12 @@ enum BackendRequest { Class(Request), ClassHash(Request), Storage(Request<(ContractAddress, StorageKey), StorageValue>), - GetEvents(EventFilter, EventContinuationToken, ChunkSize, OneshotSender), - GetBlockWithTxHash(BlockIdOrTag, OneshotSender), - GetBlockWithTxs(BlockIdOrTag, OneshotSender), - GetTransactionByBlockIdAndIndex(BlockIdOrTag, TxNumber, OneshotSender), - GetTransactionByHash(TxHash, OneshotSender), - GetTransactionReceipt(TxHash, OneshotSender), + Events(Request<(EventFilter, EventContinuationToken, ChunkSize), EventsPage>), + BlockWithTxHash(Request), + BlockWithTxs(Request), + TransactionByBlockIdAndIndex(Request<(BlockIdOrTag, TxNumber), Transaction>), + TransactionByHash(Request), + TransactionReceipt(Request), // Test-only request kind for requesting the backend stats #[cfg(test)] Stats(OneshotSender), @@ -129,6 +121,43 @@ impl BackendRequest { (BackendRequest::Stats(sender), receiver) } + /// Create a new request for fetching events. + fn events(filter: EventFilter, continuation_token: EventContinuationToken, chunk_size: ChunkSize) -> (BackendRequest, OneshotReceiver) { + let (sender, receiver) = oneshot(); + (BackendRequest::Events(Request { payload: (filter, continuation_token, chunk_size), sender }), receiver) + } + + /// Create a new request for fetching blocks with transaction hash. + fn block_with_tx_hashes(block_id: BlockIdOrTag) -> (BackendRequest, OneshotReceiver) { + let (sender, receiver) = oneshot(); + (BackendRequest::BlockWithTxHash(Request { payload: block_id, sender }), receiver) + } + + /// Create a new request for fetching blocks. + fn block_with_txs(block_id: BlockIdOrTag) -> (BackendRequest, OneshotReceiver) { + let (sender, receiver) = oneshot(); + (BackendRequest::BlockWithTxs(Request { payload: block_id, sender }), receiver) + } + + /// Create a new request for fetching transaction with block id and index. + fn transaction_by_block_id_and_index(block_id: BlockIdOrTag, tx_number: TxNumber) -> (BackendRequest, OneshotReceiver>) { + let (sender, receiver) = oneshot(); + let payload = (block_id, tx_number); + (BackendRequest::TransactionByBlockIdAndIndex(Request { payload, sender }), receiver) + } + + /// Create a new request for fetching transaction with transaction hash + fn transaction_by_hash(tx_hash: TxHash) -> (BackendRequest, OneshotReceiver>) { + let (sender, receiver) = oneshot(); + (BackendRequest::TransactionByHash(Request { payload: tx_hash, sender }), receiver) + } + + /// Create a new request for fetching transaction receipt with transaction hash + fn transaction_receipt(tx_hash: TxHash) -> (BackendRequest, OneshotReceiver>) { + let (sender, receiver) = oneshot(); + (BackendRequest::TransactionReceipt(Request { payload: tx_hash, sender }), receiver) + } + } type BackendRequestFuture = BoxFuture<'static, ()>; @@ -261,26 +290,27 @@ where BackendRequest::Stats(sender) => { let total_ongoing_request = self.pending_requests.len(); sender.send(total_ongoing_request).expect("failed to send backend stats"); + } - BackendRequest::GetEvents(filter, continuation_token, chunks_size, sender) => { + BackendRequest::Events(Request { payload: (filter, continuation_token, chunks_size), sender }) => { let fut = Box::pin(async move { let res = provider .get_events(filter, continuation_token, chunks_size) .await - .map_err(ForkedBackendError::StarknetProvider); - - sender.send(res).expect("failed to send events result") + .map_err(BackendError::StarknetProvider); + + sender.send(res).expect("failed to send events result"); }); - + self.pending_requests.push(fut); } - BackendRequest::GetBlockWithTxHash(block_id, sender) => { + BackendRequest::BlockWithTxHash(Request{payload: block_id, sender}) => { let fut = Box::pin(async move { let res = provider .get_block_with_tx_hashes(block_id) .await - .map_err(ForkedBackendError::StarknetProvider); + .map_err(BackendError::StarknetProvider); sender.send(res).expect("failed to send block result") }); @@ -288,12 +318,12 @@ where self.pending_requests.push(fut); } - BackendRequest::GetBlockWithTxs(block_id, sender) => { + BackendRequest::BlockWithTxs(Request{payload: block_id, sender}) => { let fut = Box::pin(async move { let res = provider .get_block_with_txs(block_id) .await - .map_err(ForkedBackendError::StarknetProvider); + .map_err(BackendError::StarknetProvider); sender.send(res).expect("failed to send block result") }); @@ -301,12 +331,12 @@ where self.pending_requests.push(fut); } - BackendRequest::GetTransactionByBlockIdAndIndex(block_id, index, sender) => { + BackendRequest::TransactionByBlockIdAndIndex(Request{payload: (block_id, index), sender}) => { let fut = Box::pin(async move { let res = provider .get_transaction_by_block_id_and_index(block_id, index) .await - .map_err(ForkedBackendError::StarknetProvider); + .map_err(BackendError::StarknetProvider); sender.send(res).expect("failed to send transaction result") }); @@ -314,12 +344,12 @@ where self.pending_requests.push(fut); } - BackendRequest::GetTransactionByHash(transaction_hash, sender) => { + BackendRequest::TransactionByHash(Request { payload: transaction_hash, sender }) => { let fut = Box::pin(async move { let res = provider .get_transaction_by_hash(transaction_hash) .await - .map_err(ForkedBackendError::StarknetProvider); + .map_err(BackendError::StarknetProvider); sender.send(res).expect("failed to send transaction result") }); @@ -327,12 +357,12 @@ where self.pending_requests.push(fut); } - BackendRequest::GetTransactionReceipt(transaction_hash, sender) => { + BackendRequest::TransactionReceipt(Request { payload: transaction_hash, sender }) => { let fut = Box::pin(async move { let res = provider .get_transaction_receipt(transaction_hash) .await - .map_err(ForkedBackendError::StarknetProvider); + .map_err(BackendError::StarknetProvider); sender.send(res).expect("failed to send transaction result") }); @@ -466,45 +496,37 @@ impl BackendHandle { let (req, rx) = BackendRequest::stats(); self.request(req)?; Ok(rx.recv()?) + } pub fn do_get_events( &self, filter: EventFilter, continuation_token: Option, chunks_size: ChunkSize, - ) -> Result { - trace!(target: LOG_TARGET, "requesting evetns at filter{filter:#?}, continuation_token {continuation_token:#?}, and chunks_size {chunks_size:#?} "); - let (sender, rx) = oneshot(); - self.0 - .lock() - .try_send(BackendRequest::GetEvents(filter, continuation_token, chunks_size, sender)) - .map_err(|e| e.into_send_error())?; + ) -> Result { + trace!(target: LOG_TARGET, events = %format!("{filter:#?}, {continuation_token:#?}, {chunks_size:#?}"), "Requesting events."); + let (req, rx) = BackendRequest::events(filter, continuation_token, chunks_size); + self.request(req)?; rx.recv()? } pub fn do_get_block_with_tx_hashes( &self, block_id: BlockIdOrTag, - ) -> Result { - trace!(target: LOG_TARGET, "requesting block with tx_hashes at block {block_id:#?} "); - let (sender, rx) = oneshot(); - self.0 - .lock() - .try_send(BackendRequest::GetBlockWithTxHash(block_id, sender)) - .map_err(|e| e.into_send_error())?; + ) -> Result { + trace!(target: LOG_TARGET, block_id = %format!("{block_id:#?}"), "Requesting blocks with tx hash."); + let (req, rx) = BackendRequest::block_with_tx_hashes(block_id); + self.request(req)?; rx.recv()? } pub fn do_get_block_with_txs( &self, block_id: BlockIdOrTag, - ) -> Result { - trace!(target: LOG_TARGET, "requesting block with txs at block {block_id:#?} "); - let (sender, rx) = oneshot(); - self.0 - .lock() - .try_send(BackendRequest::GetBlockWithTxs(block_id, sender)) - .map_err(|e| e.into_send_error())?; + ) -> Result { + trace!(target: LOG_TARGET, block_id = %format!("{block_id:#?}"), "Requesting blocks with txs"); + let (req, rx) = BackendRequest::block_with_txs(block_id); + self.request(req)?; rx.recv()? } @@ -512,39 +534,30 @@ impl BackendHandle { &self, block_id: BlockIdOrTag, index: TxNumber, - ) -> Result { - trace!(target: LOG_TARGET, "requesting transaction at block {block_id:#?}, index {index:#?}"); - let (sender, rx) = oneshot(); - self.0 - .lock() - .try_send(BackendRequest::GetTransactionByBlockIdAndIndex(block_id, index, sender)) - .map_err(|e| e.into_send_error())?; + ) -> Result { + trace!(target: LOG_TARGET, block_id = %format!("{block_id:#?}"), index = %format!("{index:#?}"), "Requesting transaction with block_id and index"); + let (req, rx) = BackendRequest::transaction_by_block_id_and_index(block_id, index); + self.request(req)?; rx.recv()? } pub fn do_get_transaction_by_hash( &self, transaction_hash: TxHash, - ) -> Result { - trace!(target: LOG_TARGET, "requesting transaction at trasanction hash {transaction_hash:#?} "); - let (sender, rx) = oneshot(); - self.0 - .lock() - .try_send(BackendRequest::GetTransactionByHash(transaction_hash, sender)) - .map_err(|e| e.into_send_error())?; + ) -> Result { + trace!(target: LOG_TARGET, transaction_hash = %format!("{transaction_hash:#?}"), "Requesting transaction with trasanction hash"); + let (req, rx) = BackendRequest::transaction_by_hash(transaction_hash); + self.request(req)?; rx.recv()? } pub fn do_get_transaction_receipt( &self, transaction_hash: TxHash, - ) -> Result { - trace!(target: LOG_TARGET, "requesting transaction receipt at trasanction hash {transaction_hash:#?} "); - let (sender, rx) = oneshot(); - self.0 - .lock() - .try_send(BackendRequest::GetTransactionReceipt(transaction_hash, sender)) - .map_err(|e| e.into_send_error())?; + ) -> Result { + trace!(target: LOG_TARGET, transaction_hash = %format!("{transaction_hash:#?}"), "Requesting transaction receipt with trasanction hash"); + let (req, rx) = BackendRequest::transaction_receipt(transaction_hash); + self.request(req)?; rx.recv()? }