diff --git a/crates/iota-cluster-test/src/cluster.rs b/crates/iota-cluster-test/src/cluster.rs index be7dfdca080..4d6a6244225 100644 --- a/crates/iota-cluster-test/src/cluster.rs +++ b/crates/iota-cluster-test/src/cluster.rs @@ -259,6 +259,7 @@ impl Cluster for LocalNewCluster { Some(pg_address.clone()), fullnode_url.clone(), ReaderWriterConfig::writer_mode(None), + None, ) .await; @@ -267,6 +268,7 @@ impl Cluster for LocalNewCluster { Some(pg_address), fullnode_url.clone(), ReaderWriterConfig::reader_mode(indexer_address.to_string()), + None, ) .await; } diff --git a/crates/iota-graphql-rpc/src/test_infra/cluster.rs b/crates/iota-graphql-rpc/src/test_infra/cluster.rs index f23e774a75f..30e604fd905 100644 --- a/crates/iota-graphql-rpc/src/test_infra/cluster.rs +++ b/crates/iota-graphql-rpc/src/test_infra/cluster.rs @@ -65,6 +65,7 @@ pub async fn start_cluster( Some(db_url), val_fn.rpc_url().to_string(), ReaderWriterConfig::writer_mode(None), + None, ) .await; diff --git a/crates/iota-indexer/src/test_utils.rs b/crates/iota-indexer/src/test_utils.rs index 58b452ddc29..359bbf1de6a 100644 --- a/crates/iota-indexer/src/test_utils.rs +++ b/crates/iota-indexer/src/test_utils.rs @@ -42,8 +42,9 @@ pub async fn start_test_indexer( db_url: Option, rpc_url: String, reader_writer_config: ReaderWriterConfig, + new_database: Option, ) -> (PgIndexerStore, JoinHandle>) { - start_test_indexer_impl(db_url, rpc_url, reader_writer_config, None).await + start_test_indexer_impl(db_url, rpc_url, reader_writer_config, new_database).await } pub async fn start_test_indexer_impl( diff --git a/crates/iota-indexer/tests/common/mod.rs b/crates/iota-indexer/tests/common/mod.rs index c3ab57b8634..feac1c9abd0 100644 --- a/crates/iota-indexer/tests/common/mod.rs +++ b/crates/iota-indexer/tests/common/mod.rs @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use std::{ - net::SocketAddr, + net::{SocketAddr, TcpListener}, sync::{Arc, OnceLock}, time::Duration, }; @@ -46,7 +46,10 @@ impl ApiTestSetup { let runtime = tokio::runtime::Runtime::new().unwrap(); let (cluster, store, client) = - runtime.block_on(start_test_cluster_with_read_write_indexer(None)); + runtime.block_on(start_test_cluster_with_read_write_indexer( + None, + Some("shared_test_indexer_db".to_string()), + )); Self { runtime, @@ -58,10 +61,47 @@ impl ApiTestSetup { } } +pub struct SimulacrumApiTestEnvDefinition { + pub unique_env_name: String, + pub env_initializer: Box Simulacrum>, +} + +pub struct InitializedSimulacrumEnv { + pub runtime: Runtime, + pub sim: Arc, + pub store: PgIndexerStore, + /// Indexer RPC Client + pub client: HttpClient, +} + +impl SimulacrumApiTestEnvDefinition { + pub fn get_or_init_env<'a>( + &self, + initialized_env_container: &'a OnceLock, + ) -> &'a InitializedSimulacrumEnv { + initialized_env_container.get_or_init(|| { + let runtime = tokio::runtime::Runtime::new().unwrap(); + let sim = Arc::new((self.env_initializer)()); + let db_name = format!("simulacrum_env_db_{}", self.unique_env_name); + let (_, store, _, client) = runtime.block_on( + start_simulacrum_rest_api_with_read_write_indexer(sim.clone(), Some(db_name)), + ); + + InitializedSimulacrumEnv { + runtime, + sim, + store, + client, + } + }) + } +} + /// Start a [`TestCluster`][`test_cluster::TestCluster`] with a `Read` & /// `Write` indexer pub async fn start_test_cluster_with_read_write_indexer( stop_cluster_after_checkpoint_seq: Option, + database_name: Option, ) -> (TestCluster, PgIndexerStore, HttpClient) { let mut builder = TestClusterBuilder::new(); @@ -79,17 +119,16 @@ pub async fn start_test_cluster_with_read_write_indexer( Some(DEFAULT_DB_URL.to_owned()), cluster.rpc_url().to_string(), ReaderWriterConfig::writer_mode(None), + database_name.clone(), ) .await; // start indexer in read mode - start_indexer_reader(cluster.rpc_url().to_owned()); + let indexer_port = start_indexer_reader(cluster.rpc_url().to_owned(), database_name); // create an RPC client by using the indexer url let rpc_client = HttpClientBuilder::default() - .build(format!( - "http://{DEFAULT_INDEXER_IP}:{DEFAULT_INDEXER_PORT}" - )) + .build(format!("http://{DEFAULT_INDEXER_IP}:{indexer_port}")) .unwrap(); (cluster, pg_store, rpc_client) @@ -117,24 +156,44 @@ pub async fn indexer_wait_for_checkpoint( .expect("Timeout waiting for indexer to catchup to checkpoint"); } +fn get_available_port() -> u16 { + // Let the OS assign some available port, read it, and then make it available + // again. + // This results in a race condition, some other app can use this port in a short + // time window between this function call and a place where we use it + let tl = TcpListener::bind(("127.0.0.1", 0)).unwrap(); + tl.local_addr().unwrap().port() +} + +fn replace_db_name(db_url: &str, new_db_name: &str) -> String { + let pos = db_url.rfind('/').expect("Unable to find / in db_url"); + format!("{}/{}", &db_url[..pos], new_db_name) +} + /// Start an Indexer instance in `Read` mode -fn start_indexer_reader(fullnode_rpc_url: impl Into) { +fn start_indexer_reader(fullnode_rpc_url: impl Into, database_name: Option) -> u16 { + let db_url = match database_name { + Some(database_name) => replace_db_name(&DEFAULT_DB_URL.to_owned(), &database_name), + None => DEFAULT_DB_URL.to_owned(), + }; + + let port = get_available_port(); let config = IndexerConfig { - db_url: Some(DEFAULT_DB_URL.to_owned()), + db_url: Some(db_url.clone()), rpc_client_url: fullnode_rpc_url.into(), reset_db: true, rpc_server_worker: true, rpc_server_url: DEFAULT_INDEXER_IP.to_owned(), - rpc_server_port: DEFAULT_INDEXER_PORT, + rpc_server_port: port, ..Default::default() }; let registry = prometheus::Registry::default(); init_metrics(®istry); - tokio::spawn(async move { - Indexer::start_reader(&config, ®istry, DEFAULT_DB_URL.to_owned()).await - }); + tokio::spawn(async move { Indexer::start_reader(&config, ®istry, db_url).await }); + + port } /// Check if provided error message does match with @@ -153,23 +212,23 @@ pub fn rpc_call_error_msg_matches( }) } -pub fn get_default_fullnode_rpc_api_addr() -> SocketAddr { - format!("127.0.0.1:{}", DEFAULT_SERVER_PORT) - .parse() - .unwrap() +pub fn get_available_fullnode_rpc_api_addr() -> SocketAddr { + let port = get_available_port(); + format!("127.0.0.1:{}", port).parse().unwrap() } /// Set up a test indexer fetching from a REST endpoint served by the given /// Simulacrum. pub async fn start_simulacrum_rest_api_with_write_indexer( sim: Arc, + server_url: Option, + database_name: Option, ) -> ( JoinHandle<()>, PgIndexerStore, JoinHandle>, ) { - let server_url = get_default_fullnode_rpc_api_addr(); - + let server_url = server_url.unwrap_or_else(get_available_fullnode_rpc_api_addr); let server_handle = tokio::spawn(async move { let chain_id = (*sim .get_checkpoint_by_sequence_number(0) @@ -187,6 +246,7 @@ pub async fn start_simulacrum_rest_api_with_write_indexer( Some(DEFAULT_DB_URL.to_owned()), format!("http://{}", server_url), ReaderWriterConfig::writer_mode(None), + database_name, ) .await; (server_handle, pg_store, pg_handle) @@ -194,24 +254,24 @@ pub async fn start_simulacrum_rest_api_with_write_indexer( pub async fn start_simulacrum_rest_api_with_read_write_indexer( sim: Arc, + database_name: Option, ) -> ( JoinHandle<()>, PgIndexerStore, JoinHandle>, HttpClient, ) { - let server_url = get_default_fullnode_rpc_api_addr(); + let server_url = get_available_fullnode_rpc_api_addr(); let (server_handle, pg_store, pg_handle) = - start_simulacrum_rest_api_with_write_indexer(sim).await; + start_simulacrum_rest_api_with_write_indexer(sim, Some(server_url), database_name.clone()) + .await; // start indexer in read mode - start_indexer_reader(format!("http://{}", server_url)); + let indexer_port = start_indexer_reader(format!("http://{}", server_url), database_name); // create an RPC client by using the indexer url let rpc_client = HttpClientBuilder::default() - .build(format!( - "http://{DEFAULT_INDEXER_IP}:{DEFAULT_INDEXER_PORT}" - )) + .build(format!("http://{DEFAULT_INDEXER_IP}:{indexer_port}")) .unwrap(); (server_handle, pg_store, pg_handle, rpc_client) diff --git a/crates/iota-indexer/tests/ingestion_tests.rs b/crates/iota-indexer/tests/ingestion_tests.rs index 4488e27db5e..6f5897a8ebd 100644 --- a/crates/iota-indexer/tests/ingestion_tests.rs +++ b/crates/iota-indexer/tests/ingestion_tests.rs @@ -46,7 +46,12 @@ mod ingestion_tests { // Create a checkpoint which should include the transaction we executed. let checkpoint = sim.create_checkpoint(); - let (_, pg_store, _) = start_simulacrum_rest_api_with_write_indexer(Arc::new(sim)).await; + let (_, pg_store, _) = start_simulacrum_rest_api_with_write_indexer( + Arc::new(sim), + None, + Some("indexer_ingestion_tests_db".to_string()), + ) + .await; // Wait for the indexer to catch up to the checkpoint. indexer_wait_for_checkpoint(&pg_store, 1).await; diff --git a/crates/iota-indexer/tests/rpc-tests/extended_api.rs b/crates/iota-indexer/tests/rpc-tests/extended_api.rs index 7016b705d7d..417ad624d8f 100644 --- a/crates/iota-indexer/tests/rpc-tests/extended_api.rs +++ b/crates/iota-indexer/tests/rpc-tests/extended_api.rs @@ -1,7 +1,7 @@ // Copyright (c) 2024 IOTA Stiftung // SPDX-License-Identifier: Apache-2.0 -use std::{str::FromStr, sync::Arc}; +use std::{str::FromStr, sync::OnceLock}; use iota_json::{call_args, type_args}; use iota_json_rpc_api::{ @@ -18,414 +18,401 @@ use iota_types::{ storage::ReadStore, IOTA_FRAMEWORK_ADDRESS, }; -use serial_test::serial; use simulacrum::Simulacrum; use test_cluster::TestCluster; use crate::common::{ - indexer_wait_for_checkpoint, start_simulacrum_rest_api_with_read_write_indexer, - start_test_cluster_with_read_write_indexer, + indexer_wait_for_checkpoint, ApiTestSetup, InitializedSimulacrumEnv, + SimulacrumApiTestEnvDefinition, }; -#[tokio::test] -#[serial] -async fn get_epochs() { - let mut sim = Simulacrum::new(); +static EXTENDED_API_SHARED_SIMULACRUM_INITIALIZED_ENV: OnceLock = + OnceLock::new(); - execute_simulacrum_transactions(&mut sim, 15); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); +fn get_or_init_shared_extended_api_simulacrum_env() -> &'static InitializedSimulacrumEnv { + let extended_api_env = SimulacrumApiTestEnvDefinition { + unique_env_name: "extended_api".to_string(), + env_initializer: Box::new(|| { + let mut sim = Simulacrum::new(); - execute_simulacrum_transactions(&mut sim, 10); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); + execute_simulacrum_transactions(&mut sim, 15); + add_checkpoints(&mut sim, 300); + sim.advance_epoch(false); - execute_simulacrum_transactions(&mut sim, 5); - add_checkpoints(&mut sim, 300); + execute_simulacrum_transactions(&mut sim, 10); + add_checkpoints(&mut sim, 300); + sim.advance_epoch(false); - let last_checkpoint = sim.get_latest_checkpoint().unwrap(); + execute_simulacrum_transactions(&mut sim, 5); + add_checkpoints(&mut sim, 300); - let (_, pg_store, _, indexer_client) = - start_simulacrum_rest_api_with_read_write_indexer(Arc::new(sim)).await; - indexer_wait_for_checkpoint(&pg_store, last_checkpoint.sequence_number).await; - - let epochs = indexer_client.get_epochs(None, None, None).await.unwrap(); - - assert_eq!(epochs.data.len(), 3); - assert!(!epochs.has_next_page); - - let end_of_epoch_info = epochs.data[0].end_of_epoch_info.as_ref().unwrap(); - assert_eq!(epochs.data[0].epoch, 0); - assert_eq!(epochs.data[0].first_checkpoint_id, 0); - assert_eq!(epochs.data[0].epoch_total_transactions, 17); - assert_eq!(end_of_epoch_info.last_checkpoint_id, 301); - - let end_of_epoch_info = epochs.data[1].end_of_epoch_info.as_ref().unwrap(); - assert_eq!(epochs.data[1].epoch, 1); - assert_eq!(epochs.data[1].first_checkpoint_id, 302); - assert_eq!(epochs.data[1].epoch_total_transactions, 11); - assert_eq!(end_of_epoch_info.last_checkpoint_id, 602); - - assert_eq!(epochs.data[2].epoch, 2); - assert_eq!(epochs.data[2].first_checkpoint_id, 603); - assert_eq!(epochs.data[2].epoch_total_transactions, 0); - assert!(epochs.data[2].end_of_epoch_info.is_none()); + sim + }), + }; + extended_api_env.get_or_init_env(&EXTENDED_API_SHARED_SIMULACRUM_INITIALIZED_ENV) } -#[tokio::test] -#[serial] -async fn get_epochs_descending() { - let mut sim = Simulacrum::new(); - - execute_simulacrum_transactions(&mut sim, 15); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); - - execute_simulacrum_transactions(&mut sim, 10); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); - - execute_simulacrum_transactions(&mut sim, 5); - add_checkpoints(&mut sim, 300); - - let last_checkpoint = sim.get_latest_checkpoint().unwrap(); - - let (_, pg_store, _, indexer_client) = - start_simulacrum_rest_api_with_read_write_indexer(Arc::new(sim)).await; - indexer_wait_for_checkpoint(&pg_store, last_checkpoint.sequence_number).await; - - let epochs = indexer_client - .get_epochs(None, None, Some(true)) - .await - .unwrap(); - - let actual_epochs_order = epochs - .data - .iter() - .map(|epoch| epoch.epoch) - .collect::>(); - - assert_eq!(epochs.data.len(), 3); - assert!(!epochs.has_next_page); - assert_eq!(actual_epochs_order, [2, 1, 0]) +#[test] +fn get_epochs() { + let InitializedSimulacrumEnv { + runtime, + sim, + store, + client, + } = get_or_init_shared_extended_api_simulacrum_env(); + + runtime.block_on(async move { + let last_checkpoint = sim.get_latest_checkpoint().unwrap(); + indexer_wait_for_checkpoint(&store, last_checkpoint.sequence_number).await; + + let epochs = client.get_epochs(None, None, None).await.unwrap(); + + assert_eq!(epochs.data.len(), 3); + assert!(!epochs.has_next_page); + + let end_of_epoch_info = epochs.data[0].end_of_epoch_info.as_ref().unwrap(); + assert_eq!(epochs.data[0].epoch, 0); + assert_eq!(epochs.data[0].first_checkpoint_id, 0); + assert_eq!(epochs.data[0].epoch_total_transactions, 17); + assert_eq!(end_of_epoch_info.last_checkpoint_id, 301); + + let end_of_epoch_info = epochs.data[1].end_of_epoch_info.as_ref().unwrap(); + assert_eq!(epochs.data[1].epoch, 1); + assert_eq!(epochs.data[1].first_checkpoint_id, 302); + assert_eq!(epochs.data[1].epoch_total_transactions, 11); + assert_eq!(end_of_epoch_info.last_checkpoint_id, 602); + + assert_eq!(epochs.data[2].epoch, 2); + assert_eq!(epochs.data[2].first_checkpoint_id, 603); + assert_eq!(epochs.data[2].epoch_total_transactions, 0); + assert!(epochs.data[2].end_of_epoch_info.is_none()); + }); } -#[tokio::test] -#[serial] -async fn get_epochs_paging() { - let mut sim = Simulacrum::new(); - - execute_simulacrum_transactions(&mut sim, 15); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); - - execute_simulacrum_transactions(&mut sim, 10); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); - - execute_simulacrum_transactions(&mut sim, 5); - add_checkpoints(&mut sim, 300); - - let last_checkpoint = sim.get_latest_checkpoint().unwrap(); - - let (_, pg_store, _, indexer_client) = - start_simulacrum_rest_api_with_read_write_indexer(Arc::new(sim)).await; - indexer_wait_for_checkpoint(&pg_store, last_checkpoint.sequence_number).await; - - let epochs = indexer_client - .get_epochs(None, Some(2), None) - .await - .unwrap(); - let actual_epochs_order = epochs - .data - .iter() - .map(|epoch| epoch.epoch) - .collect::>(); - - assert_eq!(epochs.data.len(), 2); - assert!(epochs.has_next_page); - assert_eq!(epochs.next_cursor, Some(1.into())); - assert_eq!(actual_epochs_order, [0, 1]); - - let epochs = indexer_client - .get_epochs(Some(1.into()), Some(2), None) - .await - .unwrap(); - let actual_epochs_order = epochs - .data - .iter() - .map(|epoch| epoch.epoch) - .collect::>(); - - assert_eq!(epochs.data.len(), 1); - assert!(!epochs.has_next_page); - assert_eq!(epochs.next_cursor, Some(2.into())); - assert_eq!(actual_epochs_order, [2]); +#[test] +fn get_epochs_descending() { + let InitializedSimulacrumEnv { + runtime, + sim, + store, + client, + } = get_or_init_shared_extended_api_simulacrum_env(); + + runtime.block_on(async move { + let last_checkpoint = sim.get_latest_checkpoint().unwrap(); + indexer_wait_for_checkpoint(&store, last_checkpoint.sequence_number).await; + + let epochs = client.get_epochs(None, None, Some(true)).await.unwrap(); + + let actual_epochs_order = epochs + .data + .iter() + .map(|epoch| epoch.epoch) + .collect::>(); + + assert_eq!(epochs.data.len(), 3); + assert!(!epochs.has_next_page); + assert_eq!(actual_epochs_order, [2, 1, 0]) + }); } -#[tokio::test] -#[serial] -async fn get_epoch_metrics() { - let mut sim = Simulacrum::new(); - - execute_simulacrum_transactions(&mut sim, 15); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); - - execute_simulacrum_transactions(&mut sim, 10); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); - - execute_simulacrum_transactions(&mut sim, 5); - add_checkpoints(&mut sim, 300); - - let last_checkpoint = sim.get_latest_checkpoint().unwrap(); - - let (_, pg_store, _, indexer_client) = - start_simulacrum_rest_api_with_read_write_indexer(Arc::new(sim)).await; - indexer_wait_for_checkpoint(&pg_store, last_checkpoint.sequence_number).await; - - let epoch_metrics = indexer_client - .get_epoch_metrics(None, None, None) - .await - .unwrap(); - - assert_eq!(epoch_metrics.data.len(), 3); - assert!(!epoch_metrics.has_next_page); - - let end_of_epoch_info = epoch_metrics.data[0].end_of_epoch_info.as_ref().unwrap(); - assert_eq!(epoch_metrics.data[0].epoch, 0); - assert_eq!(epoch_metrics.data[0].first_checkpoint_id, 0); - assert_eq!(epoch_metrics.data[0].epoch_total_transactions, 17); - assert_eq!(end_of_epoch_info.last_checkpoint_id, 301); - - let end_of_epoch_info = epoch_metrics.data[1].end_of_epoch_info.as_ref().unwrap(); - assert_eq!(epoch_metrics.data[1].epoch, 1); - assert_eq!(epoch_metrics.data[1].first_checkpoint_id, 302); - assert_eq!(epoch_metrics.data[1].epoch_total_transactions, 11); - assert_eq!(end_of_epoch_info.last_checkpoint_id, 602); - - assert_eq!(epoch_metrics.data[2].epoch, 2); - assert_eq!(epoch_metrics.data[2].first_checkpoint_id, 603); - assert_eq!(epoch_metrics.data[2].epoch_total_transactions, 0); - assert!(epoch_metrics.data[2].end_of_epoch_info.is_none()); +#[test] +fn get_epochs_paging() { + let InitializedSimulacrumEnv { + runtime, + sim, + store, + client, + } = get_or_init_shared_extended_api_simulacrum_env(); + + runtime.block_on(async move { + let last_checkpoint = sim.get_latest_checkpoint().unwrap(); + indexer_wait_for_checkpoint(&store, last_checkpoint.sequence_number).await; + + let epochs = client.get_epochs(None, Some(2), None).await.unwrap(); + let actual_epochs_order = epochs + .data + .iter() + .map(|epoch| epoch.epoch) + .collect::>(); + + assert_eq!(epochs.data.len(), 2); + assert!(epochs.has_next_page); + assert_eq!(epochs.next_cursor, Some(1.into())); + assert_eq!(actual_epochs_order, [0, 1]); + + let epochs = client + .get_epochs(Some(1.into()), Some(2), None) + .await + .unwrap(); + let actual_epochs_order = epochs + .data + .iter() + .map(|epoch| epoch.epoch) + .collect::>(); + + assert_eq!(epochs.data.len(), 1); + assert!(!epochs.has_next_page); + assert_eq!(epochs.next_cursor, Some(2.into())); + assert_eq!(actual_epochs_order, [2]); + }); } -#[tokio::test] -#[serial] -async fn get_epoch_metrics_descending() { - let mut sim = Simulacrum::new(); - - execute_simulacrum_transactions(&mut sim, 15); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); - - execute_simulacrum_transactions(&mut sim, 10); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); - - execute_simulacrum_transactions(&mut sim, 5); - add_checkpoints(&mut sim, 300); - - let last_checkpoint = sim.get_latest_checkpoint().unwrap(); - - let (_, pg_store, _, indexer_client) = - start_simulacrum_rest_api_with_read_write_indexer(Arc::new(sim)).await; - indexer_wait_for_checkpoint(&pg_store, last_checkpoint.sequence_number).await; - - let epochs = indexer_client - .get_epoch_metrics(None, None, Some(true)) - .await - .unwrap(); - - let actual_epochs_order = epochs - .data - .iter() - .map(|epoch| epoch.epoch) - .collect::>(); - - assert_eq!(epochs.data.len(), 3); - assert!(!epochs.has_next_page); - assert_eq!(actual_epochs_order, [2, 1, 0]) +#[test] +fn get_epoch_metrics() { + let InitializedSimulacrumEnv { + runtime, + sim, + store, + client, + } = get_or_init_shared_extended_api_simulacrum_env(); + + runtime.block_on(async move { + let last_checkpoint = sim.get_latest_checkpoint().unwrap(); + indexer_wait_for_checkpoint(&store, last_checkpoint.sequence_number).await; + + let epoch_metrics = client.get_epoch_metrics(None, None, None).await.unwrap(); + + assert_eq!(epoch_metrics.data.len(), 3); + assert!(!epoch_metrics.has_next_page); + + let end_of_epoch_info = epoch_metrics.data[0].end_of_epoch_info.as_ref().unwrap(); + assert_eq!(epoch_metrics.data[0].epoch, 0); + assert_eq!(epoch_metrics.data[0].first_checkpoint_id, 0); + assert_eq!(epoch_metrics.data[0].epoch_total_transactions, 17); + assert_eq!(end_of_epoch_info.last_checkpoint_id, 301); + + let end_of_epoch_info = epoch_metrics.data[1].end_of_epoch_info.as_ref().unwrap(); + assert_eq!(epoch_metrics.data[1].epoch, 1); + assert_eq!(epoch_metrics.data[1].first_checkpoint_id, 302); + assert_eq!(epoch_metrics.data[1].epoch_total_transactions, 11); + assert_eq!(end_of_epoch_info.last_checkpoint_id, 602); + + assert_eq!(epoch_metrics.data[2].epoch, 2); + assert_eq!(epoch_metrics.data[2].first_checkpoint_id, 603); + assert_eq!(epoch_metrics.data[2].epoch_total_transactions, 0); + assert!(epoch_metrics.data[2].end_of_epoch_info.is_none()); + }); } -#[tokio::test] -#[serial] -async fn get_epoch_metrics_paging() { - let mut sim = Simulacrum::new(); - - execute_simulacrum_transactions(&mut sim, 15); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); - - execute_simulacrum_transactions(&mut sim, 10); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); - - execute_simulacrum_transactions(&mut sim, 5); - add_checkpoints(&mut sim, 300); - - let last_checkpoint = sim.get_latest_checkpoint().unwrap(); - - let (_, pg_store, _, indexer_client) = - start_simulacrum_rest_api_with_read_write_indexer(Arc::new(sim)).await; - indexer_wait_for_checkpoint(&pg_store, last_checkpoint.sequence_number).await; - - let epochs = indexer_client - .get_epoch_metrics(None, Some(2), None) - .await - .unwrap(); - let actual_epochs_order = epochs - .data - .iter() - .map(|epoch| epoch.epoch) - .collect::>(); - - assert_eq!(epochs.data.len(), 2); - assert!(epochs.has_next_page); - assert_eq!(epochs.next_cursor, Some(1.into())); - assert_eq!(actual_epochs_order, [0, 1]); - - let epochs = indexer_client - .get_epoch_metrics(Some(1.into()), Some(2), None) - .await - .unwrap(); - let actual_epochs_order = epochs - .data - .iter() - .map(|epoch| epoch.epoch) - .collect::>(); - - assert_eq!(epochs.data.len(), 1); - assert!(!epochs.has_next_page); - assert_eq!(epochs.next_cursor, Some(2.into())); - assert_eq!(actual_epochs_order, [2]); +#[test] +fn get_epoch_metrics_descending() { + let InitializedSimulacrumEnv { + runtime, + sim, + store, + client, + } = get_or_init_shared_extended_api_simulacrum_env(); + + runtime.block_on(async move { + let last_checkpoint = sim.get_latest_checkpoint().unwrap(); + indexer_wait_for_checkpoint(&store, last_checkpoint.sequence_number).await; + + let epochs = client + .get_epoch_metrics(None, None, Some(true)) + .await + .unwrap(); + + let actual_epochs_order = epochs + .data + .iter() + .map(|epoch| epoch.epoch) + .collect::>(); + + assert_eq!(epochs.data.len(), 3); + assert!(!epochs.has_next_page); + assert_eq!(actual_epochs_order, [2, 1, 0]); + }); } -#[tokio::test] -#[serial] -async fn get_current_epoch() { - let mut sim = Simulacrum::new(); - - execute_simulacrum_transactions(&mut sim, 15); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); - - execute_simulacrum_transactions(&mut sim, 10); - add_checkpoints(&mut sim, 300); - sim.advance_epoch(false); - - execute_simulacrum_transactions(&mut sim, 5); - add_checkpoints(&mut sim, 300); - - let last_checkpoint = sim.get_latest_checkpoint().unwrap(); - - let (_, pg_store, _, indexer_client) = - start_simulacrum_rest_api_with_read_write_indexer(Arc::new(sim)).await; - indexer_wait_for_checkpoint(&pg_store, last_checkpoint.sequence_number).await; - - let current_epoch = indexer_client.get_current_epoch().await.unwrap(); +#[test] +fn get_epoch_metrics_paging() { + let InitializedSimulacrumEnv { + runtime, + sim, + store, + client, + } = get_or_init_shared_extended_api_simulacrum_env(); + + runtime.block_on(async move { + let last_checkpoint = sim.get_latest_checkpoint().unwrap(); + indexer_wait_for_checkpoint(&store, last_checkpoint.sequence_number).await; + + let epochs = client.get_epoch_metrics(None, Some(2), None).await.unwrap(); + let actual_epochs_order = epochs + .data + .iter() + .map(|epoch| epoch.epoch) + .collect::>(); + + assert_eq!(epochs.data.len(), 2); + assert!(epochs.has_next_page); + assert_eq!(epochs.next_cursor, Some(1.into())); + assert_eq!(actual_epochs_order, [0, 1]); + + let epochs = client + .get_epoch_metrics(Some(1.into()), Some(2), None) + .await + .unwrap(); + let actual_epochs_order = epochs + .data + .iter() + .map(|epoch| epoch.epoch) + .collect::>(); + + assert_eq!(epochs.data.len(), 1); + assert!(!epochs.has_next_page); + assert_eq!(epochs.next_cursor, Some(2.into())); + assert_eq!(actual_epochs_order, [2]); + }); +} - assert_eq!(current_epoch.epoch, 2); - assert_eq!(current_epoch.first_checkpoint_id, 603); - assert_eq!(current_epoch.epoch_total_transactions, 0); - assert!(current_epoch.end_of_epoch_info.is_none()); +#[test] +fn get_current_epoch() { + let InitializedSimulacrumEnv { + runtime, + sim, + store, + client, + } = get_or_init_shared_extended_api_simulacrum_env(); + + runtime.block_on(async move { + let last_checkpoint = sim.get_latest_checkpoint().unwrap(); + indexer_wait_for_checkpoint(&store, last_checkpoint.sequence_number).await; + + let current_epoch = client.get_current_epoch().await.unwrap(); + + assert_eq!(current_epoch.epoch, 2); + assert_eq!(current_epoch.first_checkpoint_id, 603); + assert_eq!(current_epoch.epoch_total_transactions, 0); + assert!(current_epoch.end_of_epoch_info.is_none()); + }); } #[ignore = "https://github.com/iotaledger/iota/issues/2197#issuecomment-2371642744"] -#[tokio::test] -#[serial] -async fn get_network_metrics() { - let (_, pg_store, indexer_client) = start_test_cluster_with_read_write_indexer(None).await; - indexer_wait_for_checkpoint(&pg_store, 10).await; - - let network_metrics = indexer_client.get_network_metrics().await.unwrap(); - - println!("{:#?}", network_metrics); +#[test] +fn get_network_metrics() { + let ApiTestSetup { + runtime, + store, + client, + .. + } = ApiTestSetup::get_or_init(); + + runtime.block_on(async move { + indexer_wait_for_checkpoint(&store, 10).await; + + let network_metrics = client.get_network_metrics().await.unwrap(); + + println!("{:#?}", network_metrics); + }); } #[ignore = "https://github.com/iotaledger/iota/issues/2197#issuecomment-2371642744"] -#[tokio::test] -#[serial] -async fn get_move_call_metrics() { - let (cluster, pg_store, indexer_client) = - start_test_cluster_with_read_write_indexer(None).await; - - execute_move_fn(&cluster).await.unwrap(); - - let latest_checkpoint_sn = cluster - .rpc_client() - .get_latest_checkpoint_sequence_number() - .await - .unwrap(); - indexer_wait_for_checkpoint(&pg_store, latest_checkpoint_sn.into_inner()).await; - - let move_call_metrics = indexer_client.get_move_call_metrics().await.unwrap(); - - // TODO: Why is the move call not included in the stats? - assert_eq!(move_call_metrics.rank_3_days.len(), 0); - assert_eq!(move_call_metrics.rank_7_days.len(), 0); - assert_eq!(move_call_metrics.rank_30_days.len(), 0); +#[test] +fn get_move_call_metrics() { + let ApiTestSetup { + runtime, + store, + client, + cluster, + .. + } = ApiTestSetup::get_or_init(); + + runtime.block_on(async move { + execute_move_fn(&cluster).await.unwrap(); + + let latest_checkpoint_sn = cluster + .rpc_client() + .get_latest_checkpoint_sequence_number() + .await + .unwrap(); + indexer_wait_for_checkpoint(&store, latest_checkpoint_sn.into_inner()).await; + + let move_call_metrics = client.get_move_call_metrics().await.unwrap(); + + // TODO: Why is the move call not included in the stats? + assert_eq!(move_call_metrics.rank_3_days.len(), 0); + assert_eq!(move_call_metrics.rank_7_days.len(), 0); + assert_eq!(move_call_metrics.rank_30_days.len(), 0); + }); } #[ignore = "https://github.com/iotaledger/iota/issues/2197#issuecomment-2371642744"] -#[tokio::test] -#[serial] -async fn get_latest_address_metrics() { - let (_, pg_store, indexer_client) = start_test_cluster_with_read_write_indexer(None).await; - indexer_wait_for_checkpoint(&pg_store, 10).await; - - let address_metrics = indexer_client.get_latest_address_metrics().await.unwrap(); - - println!("{:#?}", address_metrics); +#[test] +fn get_latest_address_metrics() { + let ApiTestSetup { + runtime, + store, + client, + .. + } = ApiTestSetup::get_or_init(); + + runtime.block_on(async move { + indexer_wait_for_checkpoint(&store, 10).await; + + let address_metrics = client.get_latest_address_metrics().await.unwrap(); + + println!("{:#?}", address_metrics); + }); } #[ignore = "https://github.com/iotaledger/iota/issues/2197#issuecomment-2371642744"] -#[tokio::test] -#[serial] -async fn get_checkpoint_address_metrics() { - let (_, pg_store, indexer_client) = start_test_cluster_with_read_write_indexer(None).await; - indexer_wait_for_checkpoint(&pg_store, 10).await; - - let address_metrics = indexer_client - .get_checkpoint_address_metrics(0) - .await - .unwrap(); - - println!("{:#?}", address_metrics); +#[test] +fn get_checkpoint_address_metrics() { + let ApiTestSetup { + runtime, + store, + client, + .. + } = ApiTestSetup::get_or_init(); + + runtime.block_on(async move { + indexer_wait_for_checkpoint(&store, 10).await; + + let address_metrics = client.get_checkpoint_address_metrics(0).await.unwrap(); + + println!("{:#?}", address_metrics); + }); } #[ignore = "https://github.com/iotaledger/iota/issues/2197#issuecomment-2371642744"] -#[tokio::test] -#[serial] -async fn get_all_epoch_address_metrics() { - let (_, pg_store, indexer_client) = start_test_cluster_with_read_write_indexer(None).await; - indexer_wait_for_checkpoint(&pg_store, 10).await; - - let address_metrics = indexer_client - .get_all_epoch_address_metrics(None) - .await - .unwrap(); - - println!("{:#?}", address_metrics); +#[test] +fn get_all_epoch_address_metrics() { + let ApiTestSetup { + runtime, + store, + client, + .. + } = ApiTestSetup::get_or_init(); + + runtime.block_on(async move { + indexer_wait_for_checkpoint(&store, 10).await; + + let address_metrics = client.get_all_epoch_address_metrics(None).await.unwrap(); + + println!("{:#?}", address_metrics); + }); } -#[tokio::test] -#[serial] -async fn get_total_transactions() { - let mut sim = Simulacrum::new(); - execute_simulacrum_transactions(&mut sim, 5); - - let latest_checkpoint = sim.create_checkpoint(); - let total_transactions_count = latest_checkpoint.network_total_transactions; - - let (_, pg_store, _, indexer_client) = - start_simulacrum_rest_api_with_read_write_indexer(Arc::new(sim)).await; - indexer_wait_for_checkpoint(&pg_store, latest_checkpoint.sequence_number).await; - - let transactions_cnt = indexer_client.get_total_transactions().await.unwrap(); - assert_eq!(transactions_cnt.into_inner(), total_transactions_count); - assert_eq!(transactions_cnt.into_inner(), 6); +#[test] +fn get_total_transactions() { + let InitializedSimulacrumEnv { + runtime, + sim, + store, + client, + } = get_or_init_shared_extended_api_simulacrum_env(); + + runtime.block_on(async move { + let latest_checkpoint = sim.get_latest_checkpoint().unwrap(); + let total_transactions_count = latest_checkpoint.network_total_transactions; + indexer_wait_for_checkpoint(&store, latest_checkpoint.sequence_number).await; + + let transactions_cnt = client.get_total_transactions().await.unwrap(); + assert_eq!(transactions_cnt.into_inner(), total_transactions_count); + assert_eq!(transactions_cnt.into_inner(), 33); + }); } async fn execute_move_fn(cluster: &TestCluster) -> Result<(), anyhow::Error> { diff --git a/crates/iota-indexer/tests/rpc-tests/main.rs b/crates/iota-indexer/tests/rpc-tests/main.rs index 10826b1da04..c4bb2b3842b 100644 --- a/crates/iota-indexer/tests/rpc-tests/main.rs +++ b/crates/iota-indexer/tests/rpc-tests/main.rs @@ -5,7 +5,7 @@ #[path = "../common/mod.rs"] mod common; -#[cfg(feature = "pg_integration")] +#[cfg(feature = "shared_test_runtime")] mod extended_api; #[cfg(feature = "shared_test_runtime")]