diff --git a/crates/iota-graphql-rpc/src/test_infra/cluster.rs b/crates/iota-graphql-rpc/src/test_infra/cluster.rs index ca02d6c00ab..36aff4343d1 100644 --- a/crates/iota-graphql-rpc/src/test_infra/cluster.rs +++ b/crates/iota-graphql-rpc/src/test_infra/cluster.rs @@ -69,11 +69,11 @@ pub async fn start_cluster( Some(db_url), val_fn.rpc_url().to_string(), ReaderWriterConfig::writer_mode(None), - None, // reset_database true, Some(data_ingestion_path), cancellation_token.clone(), + None, ) .await; @@ -134,11 +134,11 @@ pub async fn serve_executor( Some(db_url), format!("http://{}", executor_server_url), ReaderWriterConfig::writer_mode(snapshot_config.clone()), - Some(&graphql_connection_config.db_name()), // reset_database true, Some(data_ingestion_path), cancellation_token.clone(), + Some(&graphql_connection_config.db_name()), ) .await; diff --git a/crates/iota-indexer/src/test_utils.rs b/crates/iota-indexer/src/test_utils.rs index 8cdb5686643..4058acc5634 100644 --- a/crates/iota-indexer/src/test_utils.rs +++ b/crates/iota-indexer/src/test_utils.rs @@ -51,11 +51,11 @@ pub async fn start_test_indexer( db_url, rpc_url, reader_writer_config, - new_database, // reset_database false, Some(data_ingestion_path), CancellationToken::new(), + new_database, ) .await } @@ -67,18 +67,23 @@ pub async fn start_test_indexer_impl( db_url: Option, rpc_url: String, reader_writer_config: ReaderWriterConfig, - new_database: Option<&str>, - reset_database: bool, + mut reset_database: bool, data_ingestion_path: Option, cancel: CancellationToken, + new_database: Option<&str>, ) -> (PgIndexerStore, JoinHandle>) { - let db_url = db_url.unwrap_or_else(|| { + let mut db_url = db_url.unwrap_or_else(|| { let pg_host = env::var("POSTGRES_HOST").unwrap_or_else(|_| "localhost".into()); let pg_port = env::var("POSTGRES_PORT").unwrap_or_else(|_| "32770".into()); let pw = env::var("POSTGRES_PASSWORD").unwrap_or_else(|_| "postgrespw".into()); format!("postgres://postgres:{pw}@{pg_host}:{pg_port}") }); + if let Some(new_database) = new_database { + db_url = replace_db_name(&db_url, new_database).0; + reset_database = true; + }; + let mut config = IndexerConfig { db_url: Some(db_url.clone().into()), rpc_client_url: rpc_url, @@ -140,7 +145,6 @@ pub async fn start_test_indexer_impl( pub fn create_pg_store( db_url: Secret, reset_database: bool, - new_database: Option<&str> ) -> PgIndexerStore { // Reduce the connection pool size to 10 for testing // to prevent maxing out diff --git a/crates/iota-indexer/tests/common/mod.rs b/crates/iota-indexer/tests/common/mod.rs index d744b14a9a0..66faf203388 100644 --- a/crates/iota-indexer/tests/common/mod.rs +++ b/crates/iota-indexer/tests/common/mod.rs @@ -70,7 +70,7 @@ impl ApiTestSetup { pub struct SimulacrumTestSetup { pub runtime: Runtime, pub sim: Arc, - pub store: PgIndexerStore, + pub store: PgIndexerStore, /// Indexer RPC Client pub client: HttpClient, } @@ -78,16 +78,23 @@ pub struct SimulacrumTestSetup { impl SimulacrumTestSetup { pub fn get_or_init<'a>( unique_env_name: &str, - env_initializer: impl Fn() -> Simulacrum, + env_initializer: impl Fn(PathBuf) -> Simulacrum, initialized_env_container: &'a OnceLock, ) -> &'a SimulacrumTestSetup { initialized_env_container.get_or_init(|| { let runtime = tokio::runtime::Runtime::new().unwrap(); - let sim = Arc::new(env_initializer()); + let data_ingestion_path = tempdir().unwrap().into_path(); + + let sim = env_initializer(data_ingestion_path.clone()); + let sim = Arc::new(sim); + let db_name = format!("simulacrum_env_db_{}", unique_env_name); - let (_, store, _, client) = runtime.block_on( - start_simulacrum_rest_api_with_read_write_indexer(sim.clone(), Some(&db_name)), - ); + let (_, store, _, client) = + runtime.block_on(start_simulacrum_rest_api_with_read_write_indexer( + sim.clone(), + data_ingestion_path, + Some(&db_name), + )); SimulacrumTestSetup { runtime, @@ -193,11 +200,15 @@ pub async fn indexer_wait_for_object( } /// Start an Indexer instance in `Read` mode -fn start_indexer_reader(fullnode_rpc_url: impl Into, data_ingestion_path: PathBuf, database_name: Option<&str>) -> u16 { +fn start_indexer_reader( + fullnode_rpc_url: impl Into, + data_ingestion_path: PathBuf, + database_name: Option<&str>, +) -> u16 { let db_url = get_indexer_db_url(database_name); let port = get_available_port(DEFAULT_INDEXER_IP); let config = IndexerConfig { - db_url: Some(db_url.clone()), + db_url: Some(db_url.clone().into()), rpc_client_url: fullnode_rpc_url.into(), reset_db: true, rpc_server_worker: true, @@ -210,9 +221,9 @@ fn start_indexer_reader(fullnode_rpc_url: impl Into, data_ingestion_path let registry = prometheus::Registry::default(); init_metrics(®istry); - tokio::spawn(async move { - Indexer::start_reader::(&config, ®istry, db_url).await - }); + tokio::spawn( + async move { Indexer::start_reader::(&config, ®istry, db_url).await }, + ); port } @@ -275,15 +286,18 @@ pub async fn start_simulacrum_rest_api_with_read_write_indexer( let simulacrum_server_url = new_local_tcp_socket_for_testing(); let (server_handle, pg_store, pg_handle) = start_simulacrum_rest_api_with_write_indexer( sim, - data_ingestion_path.clone() + data_ingestion_path.clone(), Some(simulacrum_server_url), database_name, ) .await; // start indexer in read mode - let indexer_port = - start_indexer_reader(format!("http://{}", simulacrum_server_url), data_ingestion_path, database_name); + let indexer_port = start_indexer_reader( + format!("http://{}", simulacrum_server_url), + data_ingestion_path, + database_name, + ); // create an RPC client by using the indexer url let rpc_client = HttpClientBuilder::default() diff --git a/crates/iota-indexer/tests/ingestion_tests.rs b/crates/iota-indexer/tests/ingestion_tests.rs index 34fa6336049..30f115c10f0 100644 --- a/crates/iota-indexer/tests/ingestion_tests.rs +++ b/crates/iota-indexer/tests/ingestion_tests.rs @@ -54,9 +54,9 @@ mod ingestion_tests { let (_, pg_store, _) = start_simulacrum_rest_api_with_write_indexer( Arc::new(sim), - data_ingestion_path + data_ingestion_path, None, - Some("indexer_ingestion_tests_db".to_string()), + Some("indexer_ingestion_tests_db"), ) .await; @@ -102,8 +102,13 @@ mod ingestion_tests { // Create a checkpoint which should include the transaction we executed. let _ = sim.create_checkpoint(); - let (_, pg_store, _) = - start_simulacrum_rest_api_with_write_indexer(Arc::new(sim), data_ingestion_path).await; + let (_, pg_store, _) = start_simulacrum_rest_api_with_write_indexer( + Arc::new(sim), + data_ingestion_path, + None, + Some("indexer_ingestion_tests_db"), + ) + .await; indexer_wait_for_checkpoint(&pg_store, 1).await; diff --git a/crates/iota-indexer/tests/rpc-tests/extended_api.rs b/crates/iota-indexer/tests/rpc-tests/extended_api.rs index 1ba66aa75bf..f77c156b3d4 100644 --- a/crates/iota-indexer/tests/rpc-tests/extended_api.rs +++ b/crates/iota-indexer/tests/rpc-tests/extended_api.rs @@ -28,20 +28,19 @@ static EXTENDED_API_SHARED_SIMULACRUM_INITIALIZED_ENV: OnceLock &'static SimulacrumTestSetup { - let data_ingestion_path = tempdir().unwrap().into_path(); SimulacrumTestSetup::get_or_init( "extended_api", - || { + |data_ingestion_path| { let mut sim = Simulacrum::new(); - sim.set_data_ingestion_path(data_ingestion_path.clone()); + sim.set_data_ingestion_path(data_ingestion_path); execute_simulacrum_transactions(&mut sim, 15); add_checkpoints(&mut sim, 300); - sim.advance_epoch(); + sim.advance_epoch(); execute_simulacrum_transactions(&mut sim, 10); add_checkpoints(&mut sim, 300); - sim.advance_epoch(); + sim.advance_epoch(); execute_simulacrum_transactions(&mut sim, 5); add_checkpoints(&mut sim, 300);