Skip to content

Commit

Permalink
refactor(katana): replace cursor-based api with by block basis for si…
Browse files Browse the repository at this point in the history
…mplicity (#1986)

# Description

currently, the `saya_getTransactionsExecutions` API is based on a cursor to determine how the traces are being fetched. 

https://github.com/dojoengine/dojo/blob/855da3112c87faea87646db5a406ac77b4daf149/crates/saya/provider/src/rpc/mod.rs#L160-L175

one issue it currently has it that the api params does not put any constraint on whether the returned traces are guarantee to only be from a specific block. however, the implementation seems to indicate otherwise.

anyway, this pr basically want to remove the complexity of the pagination logic (which we dont even need!) and to replace it with a basic `get Xs from block Y` api instead.


so we can simplify the endpoint itself to just return the traces per block instead of based on a cursor. the cursor is only adding more complexity to the api itself.

## Related issue

<!--
Please link related issues: Fixes #<issue_number>
More info: https://docs.github.com/en/free-pro-team@latest/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword
-->

## Tests

<!--
Please refer to the CONTRIBUTING.md file to know more about the testing process. Ensure you've tested at least the package you're modifying if running all the tests consumes too much memory on your system.
-->

- [ ] Yes
- [ ] No, because they aren't needed
- [ ] No, because I need help

## Added to documentation?

<!--
If the changes are small, code comments are enough, otherwise, the documentation is needed. It
may be a README.md file added to your module/package, a DojoBook PR or both.
-->

- [ ] README.md
- [x] [Dojo Book](https://github.com/dojoengine/book)
- [ ] No documentation needed

## Checklist

- [x] I've formatted my code (`scripts/prettier.sh`, `scripts/rust_fmt.sh`, `scripts/cairo_fmt.sh`)
- [x] I've linted my code (`scripts/clippy.sh`, `scripts/docs.sh`)
- [x] I've commented my code
- [ ] I've requested a review after addressing the comments
  • Loading branch information
kariy authored May 23, 2024
1 parent 9aa4301 commit ad94ba6
Show file tree
Hide file tree
Showing 12 changed files with 315 additions and 43 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 9 additions & 0 deletions crates/katana/rpc/rpc-api/src/saya.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
use jsonrpsee::core::RpcResult;
use jsonrpsee::proc_macros::rpc;
use katana_primitives::block::BlockIdOrTag;
use katana_rpc_types::trace::TxExecutionInfo;
use katana_rpc_types::transaction::{TransactionsExecutionsPage, TransactionsPageCursor};

#[cfg_attr(not(feature = "client"), rpc(server, namespace = "saya"))]
Expand All @@ -17,4 +19,11 @@ pub trait SayaApi {
&self,
cursor: TransactionsPageCursor,
) -> RpcResult<TransactionsExecutionsPage>;

/// Retrieves a list of transaction execution informations of a given block.
#[method(name = "getTransactionExecutionsByBlock")]
async fn transaction_executions_by_block(
&self,
block_id: BlockIdOrTag,
) -> RpcResult<Vec<TxExecutionInfo>>;
}
13 changes: 12 additions & 1 deletion crates/katana/rpc/rpc-types/src/trace.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
use katana_primitives::trace::CallInfo;
use katana_primitives::trace::{CallInfo, TxExecInfo};
use katana_primitives::transaction::TxHash;
use serde::{Deserialize, Serialize};
use starknet::core::types::{
CallType, EntryPointType, ExecutionResources, OrderedEvent, OrderedMessage,
};
Expand Down Expand Up @@ -72,3 +74,12 @@ impl From<CallInfo> for FunctionInvocation {
})
}
}

/// The type returned by the `saya_getTransactionExecutionsByBlock` RPC method.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TxExecutionInfo {
/// The transaction hash.
pub hash: TxHash,
/// The transaction execution trace.
pub trace: TxExecInfo,
}
1 change: 1 addition & 0 deletions crates/katana/rpc/rpc/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ assert_matches.workspace = true
cairo-lang-starknet-classes.workspace = true
cairo-lang-starknet.workspace = true
dojo-test-utils.workspace = true
dojo-world.workspace = true
jsonrpsee = { workspace = true, features = [ "client" ] }
katana-rpc-api = { workspace = true, features = [ "client" ] }
url.workspace = true
79 changes: 76 additions & 3 deletions crates/katana/rpc/rpc/src/saya.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,13 @@ use std::sync::Arc;
use jsonrpsee::core::{async_trait, RpcResult};
use katana_core::sequencer::KatanaSequencer;
use katana_executor::ExecutorFactory;
use katana_primitives::block::BlockHashOrNumber;
use katana_provider::traits::transaction::TransactionTraceProvider;
use katana_primitives::block::{BlockHashOrNumber, BlockIdOrTag, BlockTag};
use katana_provider::error::ProviderError;
use katana_provider::traits::block::{BlockIdReader, BlockProvider};
use katana_provider::traits::transaction::{TransactionTraceProvider, TransactionsProviderExt};
use katana_rpc_api::saya::SayaApiServer;
use katana_rpc_types::error::saya::SayaApiError;
use katana_rpc_types::trace::TxExecutionInfo;
use katana_rpc_types::transaction::{TransactionsExecutionsPage, TransactionsPageCursor};
use katana_tasks::TokioTaskSpawner;

Expand Down Expand Up @@ -46,7 +49,7 @@ impl<EF: ExecutorFactory> SayaApiServer for SayaApi<EF> {
let mut next_cursor = cursor;

let transactions_executions = provider
.transactions_executions_by_block(BlockHashOrNumber::Num(cursor.block_number))
.transaction_executions_by_block(BlockHashOrNumber::Num(cursor.block_number))
.map_err(SayaApiError::from)?
.ok_or(SayaApiError::BlockNotFound)?;

Expand All @@ -73,4 +76,74 @@ impl<EF: ExecutorFactory> SayaApiServer for SayaApi<EF> {
})
.await
}

async fn transaction_executions_by_block(
&self,
block_id: BlockIdOrTag,
) -> RpcResult<Vec<TxExecutionInfo>> {
self.on_io_blocking_task(move |this| {
let provider = this.sequencer.backend.blockchain.provider();

match block_id {
BlockIdOrTag::Tag(BlockTag::Pending) => {
// if there is no pending block (eg on instant mining), return an empty list
let Some(pending) = this.sequencer.pending_executor() else {
return Ok(Vec::new());
};

// get the read lock on the pending block
let lock = pending.read();

// extract the traces from the pending block
let mut traces = Vec::new();
for (tx, res) in lock.transactions() {
if let Some(trace) = res.trace().cloned() {
traces.push(TxExecutionInfo { hash: tx.hash, trace });
}
}

Ok(traces)
}

id => {
let number = provider
.convert_block_id(id)
.map_err(SayaApiError::from)?
.ok_or(SayaApiError::BlockNotFound)?;

// get the transaction traces and their corresponding hashes

let traces = provider
.transaction_executions_by_block(number.into())
.map_err(SayaApiError::from)?
.expect("qed; must be Some if block exists");

// get the block body indices for the requested block to determine its tx range
// in the db for the tx hashes

let block_indices = provider
.block_body_indices(number.into())
.map_err(SayaApiError::from)?
.ok_or(ProviderError::MissingBlockBodyIndices(number))
.expect("qed; must be Some if block exists");

// TODO: maybe we should add a `_by_block` method for the tx hashes as well?
let hashes = provider
.transaction_hashes_in_range(block_indices.clone().into())
.map_err(SayaApiError::from)?;

// build the rpc response

let traces = hashes
.into_iter()
.zip(traces)
.map(|(hash, trace)| TxExecutionInfo { hash, trace })
.collect::<Vec<_>>();

Ok(traces)
}
}
})
.await
}
}
147 changes: 143 additions & 4 deletions crates/katana/rpc/rpc/tests/saya.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,19 +3,22 @@ use std::sync::Arc;
use std::time::Duration;

use dojo_test_utils::sequencer::{get_default_test_starknet_config, TestSequencer};
use dojo_world::utils::TransactionWaiter;
use jsonrpsee::http_client::HttpClientBuilder;
use katana_core::sequencer::SequencerConfig;
use katana_primitives::block::{BlockIdOrTag, BlockTag};
use katana_rpc_api::dev::DevApiClient;
use katana_rpc_api::saya::SayaApiClient;
use katana_rpc_api::starknet::StarknetApiClient;
use katana_rpc_types::transaction::{
TransactionsExecutionsPage, TransactionsPageCursor, CHUNK_SIZE_DEFAULT,
};
use starknet::accounts::Account;
use starknet::accounts::{Account, ConnectedAccount};
use starknet::core::types::{FieldElement, TransactionStatus};
use starknet::macros::felt;
use tokio::time::sleep;

pub const ENOUGH_GAS: &str = "0x100000000000000000";
const ENOUGH_GAS: FieldElement = felt!("0x100000000000000000");

mod common;

Expand Down Expand Up @@ -134,15 +137,14 @@ async fn executions_chunks_logic_ok() {

let declare_res = account.declare(contract.clone(), compiled_class_hash).send().await.unwrap();

let max_fee = FieldElement::from_hex_be(ENOUGH_GAS).unwrap();
let mut nonce = FieldElement::ONE;
let mut last_tx_hash = FieldElement::ZERO;

// Prepare 29 transactions to test chunks (30 at total with the previous declare).
for i in 0..29 {
let deploy_call =
common::build_deploy_cairo1_contract_call(declare_res.class_hash, (i + 2_u32).into());
let deploy_txn = account.execute(vec![deploy_call]).nonce(nonce).max_fee(max_fee);
let deploy_txn = account.execute(vec![deploy_call]).nonce(nonce).max_fee(ENOUGH_GAS);
let tx_hash = deploy_txn.send().await.unwrap().transaction_hash;
nonce += FieldElement::ONE;

Expand Down Expand Up @@ -202,3 +204,140 @@ async fn executions_chunks_logic_ok() {

sequencer.stop().expect("failed to stop sequencer");
}

#[tokio::test(flavor = "multi_thread")]
async fn fetch_traces_from_block() {
let sequencer = TestSequencer::start(
SequencerConfig { no_mining: true, ..Default::default() },
get_default_test_starknet_config(),
)
.await;

let client = HttpClientBuilder::default().build(sequencer.url()).unwrap();

let account = sequencer.account();

let path: PathBuf = PathBuf::from("tests/test_data/cairo1_contract.json");
let (contract, compiled_class_hash) =
common::prepare_contract_declaration_params(&path).unwrap();
let contract = Arc::new(contract);

let res = account.declare(contract.clone(), compiled_class_hash).send().await.unwrap();
// wait for the tx to be mined
TransactionWaiter::new(res.transaction_hash, account.provider())
.with_interval(200)
.await
.expect("tx failed");

// Store the tx hashes to check the retrieved traces later.
let mut tx_hashes = vec![res.transaction_hash];

for i in 0..29 {
let call = common::build_deploy_cairo1_contract_call(res.class_hash, (i + 2_u32).into());

let res = account
.execute(vec![call])
.max_fee(ENOUGH_GAS)
.send()
.await
.expect("failed to send tx");

// wait for the tx to be mined
TransactionWaiter::new(res.transaction_hash, account.provider())
.with_interval(200)
.await
.expect("tx failed");

tx_hashes.push(res.transaction_hash);
}

// Generate a new block.
let _: () = client.generate_block().await.unwrap();

// Get the traces from the latest block.
let traces = client
.transaction_executions_by_block(BlockIdOrTag::Tag(BlockTag::Latest))
.await
.expect("failed to get traces from latest block");

assert_eq!(
tx_hashes.len(),
traces.len(),
"traces count in the latest block must equal to the total txs"
);

for (expected, actual) in tx_hashes.iter().zip(traces) {
// Assert that the traces are from the txs in the requested block.
assert_eq!(expected, &actual.hash);
}

sequencer.stop().expect("failed to stop sequencer");
}

#[tokio::test(flavor = "multi_thread")]
async fn fetch_traces_from_pending_block() {
let sequencer = TestSequencer::start(
SequencerConfig { no_mining: true, ..Default::default() },
get_default_test_starknet_config(),
)
.await;

let client = HttpClientBuilder::default().build(sequencer.url()).unwrap();

let account = sequencer.account();

let path: PathBuf = PathBuf::from("tests/test_data/cairo1_contract.json");
let (contract, compiled_class_hash) =
common::prepare_contract_declaration_params(&path).unwrap();
let contract = Arc::new(contract);

let res = account.declare(contract.clone(), compiled_class_hash).send().await.unwrap();
// wait for the tx to be mined
TransactionWaiter::new(res.transaction_hash, account.provider())
.with_interval(200)
.await
.expect("tx failed");

// Store the tx hashes to check the retrieved traces later.
let mut tx_hashes = vec![res.transaction_hash];

for i in 0..29 {
let call = common::build_deploy_cairo1_contract_call(res.class_hash, (i + 2_u32).into());

// we set the nonce manually so that we can send the tx rapidly without waiting for the
// previous tx to be mined first.
let res = account
.execute(vec![call])
.max_fee(ENOUGH_GAS)
.send()
.await
.expect("failed to send tx");

// wait for the tx to be mined
TransactionWaiter::new(res.transaction_hash, account.provider())
.with_interval(200)
.await
.expect("tx failed");

tx_hashes.push(res.transaction_hash);
}

// Get the traces from the pending block.
let traces = client
.transaction_executions_by_block(BlockIdOrTag::Tag(BlockTag::Pending))
.await
.expect("failed to get traces from pending block");

assert_eq!(
tx_hashes.len(),
traces.len(),
"traces count in the pending block must equal to the total txs"
);

for (expected, actual) in tx_hashes.iter().zip(traces) {
// Assert that the traces are from the txs in the requested block.
assert_eq!(expected, &actual.hash);
}

sequencer.stop().expect("failed to stop sequencer");
}
11 changes: 9 additions & 2 deletions crates/katana/storage/provider/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,11 +195,18 @@ where
TransactionTraceProvider::transaction_execution(&self.provider, hash)
}

fn transactions_executions_by_block(
fn transaction_executions_by_block(
&self,
block_id: BlockHashOrNumber,
) -> ProviderResult<Option<Vec<TxExecInfo>>> {
TransactionTraceProvider::transactions_executions_by_block(&self.provider, block_id)
TransactionTraceProvider::transaction_executions_by_block(&self.provider, block_id)
}

fn transaction_executions_in_range(
&self,
range: Range<TxNumber>,
) -> ProviderResult<Vec<TxExecInfo>> {
TransactionTraceProvider::transaction_executions_in_range(&self.provider, range)
}
}

Expand Down
Loading

0 comments on commit ad94ba6

Please sign in to comment.