diff --git a/config/datanode.example.toml b/config/datanode.example.toml index 2b4b7b4b1b9a..2963327426bc 100644 --- a/config/datanode.example.toml +++ b/config/datanode.example.toml @@ -43,6 +43,7 @@ sync_write = false [storage] # The working home directory. data_home = "/tmp/greptimedb/" +# Storage type. type = "File" # TTL for all tables. Disabled by default. # global_ttl = "7d" @@ -53,6 +54,9 @@ type = "File" # The local file cache capacity in bytes. # cache_capacity = "256MB" +[storage.default_store] +type = "File" + # Mito engine options [[region_engine]] [region_engine.mito] diff --git a/src/client/src/error.rs b/src/client/src/error.rs index 6b25e5c58ac3..ae573d037de3 100644 --- a/src/client/src/error.rs +++ b/src/client/src/error.rs @@ -131,3 +131,15 @@ impl From for Error { Self::Server { code, msg } } } + +impl Error { + pub fn should_retry(&self) -> bool { + !matches!( + self, + Self::RegionServer { + code: Code::InvalidArgument, + .. + } + ) + } +} diff --git a/src/client/src/region.rs b/src/client/src/region.rs index 8a5895d35fc7..95bef40b2ac2 100644 --- a/src/client/src/region.rs +++ b/src/client/src/region.rs @@ -29,7 +29,6 @@ use prost::Message; use snafu::{location, Location, OptionExt, ResultExt}; use tokio_stream::StreamExt; -use crate::error::Error::RegionServer; use crate::error::{ self, ConvertFlightDataSnafu, IllegalDatabaseResponseSnafu, IllegalFlightMessagesSnafu, MissingFieldSnafu, Result, ServerSnafu, @@ -45,7 +44,7 @@ pub struct RegionRequester { impl Datanode for RegionRequester { async fn handle(&self, request: RegionRequest) -> MetaResult { self.handle_inner(request).await.map_err(|err| { - if matches!(err, RegionServer { .. }) { + if err.should_retry() { meta_error::Error::RetryLater { source: BoxedError::new(err), } diff --git a/src/cmd/src/datanode.rs b/src/cmd/src/datanode.rs index c007ef8a70e9..e1d3163d0164 100644 --- a/src/cmd/src/datanode.rs +++ b/src/cmd/src/datanode.rs @@ -214,7 +214,7 @@ mod tests { use std::time::Duration; use common_test_util::temp_dir::create_named_temp_file; - use datanode::config::{FileConfig, ObjectStoreConfig}; + use datanode::config::{FileConfig, GcsConfig, ObjectStoreConfig, S3Config}; use servers::heartbeat_options::HeartbeatOptions; use servers::Mode; @@ -251,8 +251,17 @@ mod tests { sync_write = false [storage] - type = "File" data_home = "/tmp/greptimedb/" + type = "File" + + [[storage.providers]] + type = "Gcs" + bucket = "foo" + endpoint = "bar" + + [[storage.providers]] + type = "S3" + bucket = "foo" [logging] level = "debug" @@ -305,6 +314,15 @@ mod tests { &options.storage.store, ObjectStoreConfig::File(FileConfig { .. }) )); + assert_eq!(options.storage.providers.len(), 2); + assert!(matches!( + options.storage.providers[0], + ObjectStoreConfig::Gcs(GcsConfig { .. }) + )); + assert!(matches!( + options.storage.providers[1], + ObjectStoreConfig::S3(S3Config { .. }) + )); assert_eq!("debug", options.logging.level.unwrap()); assert_eq!("/tmp/greptimedb/test/logs".to_string(), options.logging.dir); diff --git a/src/cmd/src/options.rs b/src/cmd/src/options.rs index bc340d588a3e..5e730de7d2c5 100644 --- a/src/cmd/src/options.rs +++ b/src/cmd/src/options.rs @@ -238,7 +238,7 @@ mod tests { .unwrap(); // Check the configs from environment variables. - match opts.storage.store { + match &opts.storage.store { ObjectStoreConfig::S3(s3_config) => { assert_eq!(s3_config.bucket, "mybucket".to_string()); } diff --git a/src/cmd/src/standalone.rs b/src/cmd/src/standalone.rs index 30cb33234866..2ebd29f7987d 100644 --- a/src/cmd/src/standalone.rs +++ b/src/cmd/src/standalone.rs @@ -426,6 +426,7 @@ mod tests { use auth::{Identity, Password, UserProviderRef}; use common_base::readable_size::ReadableSize; use common_test_util::temp_dir::create_named_temp_file; + use datanode::config::{FileConfig, GcsConfig}; use servers::Mode; use super::*; @@ -473,8 +474,16 @@ mod tests { purge_interval = "10m" read_batch_size = 128 sync_write = false - [storage] + data_home = "/tmp/greptimedb/" + type = "File" + + [[storage.providers]] + type = "Gcs" + bucket = "foo" + endpoint = "bar" + + [[storage.providers]] type = "S3" access_key_id = "access_key_id" secret_access_key = "secret_access_key" @@ -524,7 +533,16 @@ mod tests { assert_eq!("/tmp/greptimedb/test/wal", dn_opts.wal.dir.unwrap()); - match &dn_opts.storage.store { + assert!(matches!( + &dn_opts.storage.store, + datanode::config::ObjectStoreConfig::File(FileConfig { .. }) + )); + assert_eq!(dn_opts.storage.providers.len(), 2); + assert!(matches!( + dn_opts.storage.providers[0], + datanode::config::ObjectStoreConfig::Gcs(GcsConfig { .. }) + )); + match &dn_opts.storage.providers[1] { datanode::config::ObjectStoreConfig::S3(s3_config) => { assert_eq!( "Secret([REDACTED alloc::string::String])".to_string(), diff --git a/src/datanode/src/config.rs b/src/datanode/src/config.rs index 3c2e9ff88774..f26b0828c873 100644 --- a/src/datanode/src/config.rs +++ b/src/datanode/src/config.rs @@ -48,6 +48,18 @@ pub enum ObjectStoreConfig { Gcs(GcsConfig), } +impl ObjectStoreConfig { + pub fn name(&self) -> &'static str { + match self { + Self::File(_) => "File", + Self::S3(_) => "S3", + Self::Oss(_) => "Oss", + Self::Azblob(_) => "Azblob", + Self::Gcs(_) => "Gcs", + } + } +} + /// Storage engine config #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(default)] @@ -63,6 +75,7 @@ pub struct StorageConfig { pub data_home: String, #[serde(flatten)] pub store: ObjectStoreConfig, + pub providers: Vec, } impl Default for StorageConfig { @@ -71,6 +84,7 @@ impl Default for StorageConfig { global_ttl: None, data_home: DEFAULT_DATA_HOME.to_string(), store: ObjectStoreConfig::default(), + providers: vec![], } } } @@ -295,7 +309,7 @@ mod tests { secret_access_key = "secret_access_key" "#; let opts: DatanodeOptions = toml::from_str(toml_str).unwrap(); - match opts.storage.store { + match &opts.storage.store { ObjectStoreConfig::S3(cfg) => { assert_eq!( "Secret([REDACTED alloc::string::String])".to_string(), diff --git a/src/datanode/src/datanode.rs b/src/datanode/src/datanode.rs index f8a658f55aed..8910f444e328 100644 --- a/src/datanode/src/datanode.rs +++ b/src/datanode/src/datanode.rs @@ -354,7 +354,6 @@ impl DatanodeBuilder { )); } } - info!("going to open {} regions", regions.len()); let semaphore = Arc::new(tokio::sync::Semaphore::new(OPEN_REGION_PARALLELISM)); let mut tasks = vec![]; @@ -417,7 +416,6 @@ impl DatanodeBuilder { ); let table_provider_factory = Arc::new(DummyTableProviderFactory); - let mut region_server = RegionServer::with_table_provider( query_engine, runtime, @@ -425,13 +423,8 @@ impl DatanodeBuilder { table_provider_factory, ); - let object_store = store::new_object_store(opts).await?; - let object_store_manager = ObjectStoreManager::new( - "default", // TODO: use a name which is set in the configuration when #919 is done. - object_store, - ); - let engines = - Self::build_store_engines(opts, log_store, Arc::new(object_store_manager)).await?; + let object_store_manager = Self::build_object_store_manager(opts).await?; + let engines = Self::build_store_engines(opts, log_store, object_store_manager).await?; for engine in engines { region_server.register_engine(engine); } @@ -496,6 +489,21 @@ impl DatanodeBuilder { } Ok(engines) } + + /// Builds [ObjectStoreManager] + async fn build_object_store_manager(opts: &DatanodeOptions) -> Result { + let object_store = + store::new_object_store(opts.storage.store.clone(), &opts.storage.data_home).await?; + let default_name = opts.storage.store.name(); + let mut object_store_manager = ObjectStoreManager::new(default_name, object_store); + for store in &opts.storage.providers { + object_store_manager.add( + store.name(), + store::new_object_store(store.clone(), &opts.storage.data_home).await?, + ); + } + Ok(Arc::new(object_store_manager)) + } } #[cfg(test)] diff --git a/src/datanode/src/store.rs b/src/datanode/src/store.rs index ff870315f0b7..6b6eac2a09ae 100644 --- a/src/datanode/src/store.rs +++ b/src/datanode/src/store.rs @@ -32,12 +32,15 @@ use object_store::util::normalize_dir; use object_store::{util, HttpClient, ObjectStore, ObjectStoreBuilder}; use snafu::prelude::*; -use crate::config::{DatanodeOptions, ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE}; +use crate::config::{ObjectStoreConfig, DEFAULT_OBJECT_STORE_CACHE_SIZE}; use crate::error::{self, Result}; -pub(crate) async fn new_object_store(opts: &DatanodeOptions) -> Result { - let data_home = normalize_dir(&opts.storage.data_home); - let object_store = match &opts.storage.store { +pub(crate) async fn new_object_store( + store: ObjectStoreConfig, + data_home: &str, +) -> Result { + let data_home = normalize_dir(data_home); + let object_store = match &store { ObjectStoreConfig::File(file_config) => { fs::new_fs_object_store(&data_home, file_config).await } @@ -50,9 +53,8 @@ pub(crate) async fn new_object_store(opts: &DatanodeOptions) -> Result ParserContext<'a> { } ); } + // Sorts options so that `test_display_create_table` can always pass. + let options = options.into_iter().sorted().collect(); let create_table = CreateTable { if_not_exists, name: table_name, diff --git a/src/sql/src/statements/create.rs b/src/sql/src/statements/create.rs index 58957879016d..e7fabd5a2d92 100644 --- a/src/sql/src/statements/create.rs +++ b/src/sql/src/statements/create.rs @@ -241,7 +241,7 @@ mod tests { PARTITION r2 VALUES LESS THAN (MAXVALUE), ) engine=mito - with(regions=1, ttl='7d'); + with(regions=1, ttl='7d', storage='File'); "; let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}).unwrap(); assert_eq!(1, result.len()); @@ -267,6 +267,7 @@ PARTITION BY RANGE COLUMNS (ts) ( ENGINE=mito WITH( regions = 1, + storage = 'File', ttl = '7d' )"#, &new_sql diff --git a/src/table/src/requests.rs b/src/table/src/requests.rs index 9d78f21534fb..c3ffceec63e8 100644 --- a/src/table/src/requests.rs +++ b/src/table/src/requests.rs @@ -84,6 +84,7 @@ pub struct TableOptions { pub const WRITE_BUFFER_SIZE_KEY: &str = "write_buffer_size"; pub const TTL_KEY: &str = "ttl"; pub const REGIONS_KEY: &str = "regions"; +pub const STORAGE_KEY: &str = "storage"; impl TryFrom<&HashMap> for TableOptions { type Error = error::Error; @@ -340,6 +341,7 @@ pub fn valid_table_option(key: &str) -> bool { | WRITE_BUFFER_SIZE_KEY | TTL_KEY | REGIONS_KEY + | STORAGE_KEY ) | is_supported_in_s3(key) } @@ -365,6 +367,7 @@ mod tests { assert!(valid_table_option(TTL_KEY)); assert!(valid_table_option(REGIONS_KEY)); assert!(valid_table_option(WRITE_BUFFER_SIZE_KEY)); + assert!(valid_table_option(STORAGE_KEY)); assert!(!valid_table_option("foo")); } diff --git a/tests-integration/src/cluster.rs b/tests-integration/src/cluster.rs index f114e3015cac..cf0d1fd02e27 100644 --- a/tests-integration/src/cluster.rs +++ b/tests-integration/src/cluster.rs @@ -68,6 +68,7 @@ pub struct GreptimeDbClusterBuilder { cluster_name: String, kv_backend: KvBackendRef, store_config: Option, + store_providers: Option>, datanodes: Option, } @@ -92,6 +93,7 @@ impl GreptimeDbClusterBuilder { cluster_name: cluster_name.to_string(), kv_backend, store_config: None, + store_providers: None, datanodes: None, } } @@ -101,6 +103,11 @@ impl GreptimeDbClusterBuilder { self } + pub fn with_store_providers(mut self, store_providers: Vec) -> Self { + self.store_providers = Some(store_providers); + self + } + pub fn with_datanodes(mut self, datanodes: u32) -> Self { self.datanodes = Some(datanodes); self @@ -176,14 +183,15 @@ impl GreptimeDbClusterBuilder { dir_guards.push(FileDirGuard::new(home_tmp_dir)); - create_datanode_opts(store_config.clone(), home_dir) + create_datanode_opts(store_config.clone(), vec![], home_dir) } else { let (opts, guard) = create_tmp_dir_and_datanode_opts( StorageType::File, + self.store_providers.clone().unwrap_or_default(), &format!("{}-dn-{}", self.cluster_name, datanode_id), ); - storage_guards.push(guard.storage_guard); + storage_guards.push(guard.storage_guards); dir_guards.push(guard.home_guard); opts @@ -195,7 +203,11 @@ impl GreptimeDbClusterBuilder { instances.insert(datanode_id, datanode); } - (instances, storage_guards, dir_guards) + ( + instances, + storage_guards.into_iter().flatten().collect(), + dir_guards, + ) } async fn wait_datanodes_alive( diff --git a/tests-integration/src/standalone.rs b/tests-integration/src/standalone.rs index 19610333e499..f6b1c35c9e11 100644 --- a/tests-integration/src/standalone.rs +++ b/tests-integration/src/standalone.rs @@ -40,7 +40,8 @@ pub struct GreptimeDbStandalone { pub struct GreptimeDbStandaloneBuilder { instance_name: String, - store_type: Option, + store_providers: Option>, + default_store: Option, plugin: Option, } @@ -48,14 +49,23 @@ impl GreptimeDbStandaloneBuilder { pub fn new(instance_name: &str) -> Self { Self { instance_name: instance_name.to_string(), - store_type: None, + store_providers: None, plugin: None, + default_store: None, } } - pub fn with_store_type(self, store_type: StorageType) -> Self { + pub fn with_default_store_type(self, store_type: StorageType) -> Self { Self { - store_type: Some(store_type), + default_store: Some(store_type), + ..self + } + } + + #[cfg(test)] + pub fn with_store_providers(self, store_providers: Vec) -> Self { + Self { + store_providers: Some(store_providers), ..self } } @@ -69,9 +79,11 @@ impl GreptimeDbStandaloneBuilder { } pub async fn build(self) -> GreptimeDbStandalone { - let store_type = self.store_type.unwrap_or(StorageType::File); + let default_store_type = self.default_store.unwrap_or(StorageType::File); + let store_types = self.store_providers.unwrap_or_default(); - let (opts, guard) = create_tmp_dir_and_datanode_opts(store_type, &self.instance_name); + let (opts, guard) = + create_tmp_dir_and_datanode_opts(default_store_type, store_types, &self.instance_name); let procedure_config = ProcedureConfig::default(); let kv_backend_config = KvBackendConfig::default(); diff --git a/tests-integration/src/test_util.rs b/tests-integration/src/test_util.rs index ef47b3c2b974..ad69c40ada33 100644 --- a/tests-integration/src/test_util.rs +++ b/tests-integration/src/test_util.rs @@ -77,6 +77,26 @@ impl Display for StorageType { } impl StorageType { + pub fn build_storage_types_based_on_env() -> Vec { + let mut storage_types = Vec::with_capacity(4); + storage_types.push(StorageType::File); + if let Ok(bucket) = env::var("GT_S3_BUCKET") { + if !bucket.is_empty() { + storage_types.push(StorageType::S3); + } + } + if env::var("GT_OSS_BUCKET").is_ok() { + storage_types.push(StorageType::Oss); + } + if env::var("GT_AZBLOB_CONTAINER").is_ok() { + storage_types.push(StorageType::Azblob); + } + if env::var("GT_GCS_BUCKET").is_ok() { + storage_types.push(StorageType::Gcs); + } + storage_types + } + pub fn test_on(&self) -> bool { let _ = dotenv::dotenv(); @@ -244,7 +264,7 @@ pub enum TempDirGuard { pub struct TestGuard { pub home_guard: FileDirGuard, - pub storage_guard: StorageGuard, + pub storage_guards: Vec, } pub struct FileDirGuard { @@ -261,42 +281,62 @@ pub struct StorageGuard(pub TempDirGuard); impl TestGuard { pub async fn remove_all(&mut self) { - if let TempDirGuard::S3(guard) - | TempDirGuard::Oss(guard) - | TempDirGuard::Azblob(guard) - | TempDirGuard::Gcs(guard) = &mut self.storage_guard.0 - { - guard.remove_all().await.unwrap() + for storage_guard in self.storage_guards.iter_mut() { + if let TempDirGuard::S3(guard) + | TempDirGuard::Oss(guard) + | TempDirGuard::Azblob(guard) + | TempDirGuard::Gcs(guard) = &mut storage_guard.0 + { + guard.remove_all().await.unwrap() + } } } } pub fn create_tmp_dir_and_datanode_opts( - store_type: StorageType, + default_store_type: StorageType, + store_provider_types: Vec, name: &str, ) -> (DatanodeOptions, TestGuard) { let home_tmp_dir = create_temp_dir(&format!("gt_data_{name}")); let home_dir = home_tmp_dir.path().to_str().unwrap().to_string(); - let (store, data_tmp_dir) = get_test_store_config(&store_type); - let opts = create_datanode_opts(store, home_dir); + // Excludes the default object store. + let mut store_providers = Vec::with_capacity(store_provider_types.len()); + // Includes the default object store. + let mut storage_guards = Vec::with_capacity(store_provider_types.len() + 1); + + let (default_store, data_tmp_dir) = get_test_store_config(&default_store_type); + storage_guards.push(StorageGuard(data_tmp_dir)); + + for store_type in store_provider_types { + let (store, data_tmp_dir) = get_test_store_config(&store_type); + store_providers.push(store); + storage_guards.push(StorageGuard(data_tmp_dir)) + } + let opts = create_datanode_opts(default_store, store_providers, home_dir); ( opts, TestGuard { home_guard: FileDirGuard::new(home_tmp_dir), - storage_guard: StorageGuard(data_tmp_dir), + storage_guards, }, ) } -pub(crate) fn create_datanode_opts(store: ObjectStoreConfig, home_dir: String) -> DatanodeOptions { +pub(crate) fn create_datanode_opts( + default_store: ObjectStoreConfig, + providers: Vec, + home_dir: String, +) -> DatanodeOptions { DatanodeOptions { node_id: Some(0), require_lease_before_startup: true, storage: StorageConfig { data_home: home_dir, - store, + providers, + store: default_store, ..Default::default() }, mode: Mode::Standalone, @@ -325,7 +365,7 @@ async fn setup_standalone_instance( store_type: StorageType, ) -> GreptimeDbStandalone { GreptimeDbStandaloneBuilder::new(test_name) - .with_store_type(store_type) + .with_default_store_type(store_type) .build() .await } diff --git a/tests-integration/src/tests/instance_test.rs b/tests-integration/src/tests/instance_test.rs index 5e0ecf62af44..6bf5fc0d4784 100644 --- a/tests-integration/src/tests/instance_test.rs +++ b/tests-integration/src/tests/instance_test.rs @@ -31,8 +31,9 @@ use session::context::{QueryContext, QueryContextRef}; use crate::test_util::check_output_stream; use crate::tests::test_util::{ - both_instances_cases, check_unordered_output_stream, distributed, find_testing_resource, - prepare_path, standalone, standalone_instance_case, MockInstance, + both_instances_cases, both_instances_cases_with_custom_storages, check_unordered_output_stream, + distributed, distributed_with_multiple_object_stores, find_testing_resource, prepare_path, + standalone, standalone_instance_case, standalone_with_multiple_object_stores, MockInstance, }; #[apply(both_instances_cases)] @@ -1840,3 +1841,120 @@ async fn execute_sql_with( .await .unwrap() } + +#[apply(both_instances_cases_with_custom_storages)] +async fn test_custom_storage(instance: Arc) { + let frontend = instance.frontend(); + let custom_storages = [ + ("S3", "GT_S3_BUCKET"), + ("Oss", "GT_OSS_BUCKET"), + ("Azblob", "GT_AZBLOB_CONTAINER"), + ("Gcs", "GT_GCS_BUCKET"), + ]; + for (storage_name, custom_storage_env) in custom_storages { + if let Ok(env_value) = env::var(custom_storage_env) { + if env_value.is_empty() { + continue; + } + let sql = if instance.is_distributed_mode() { + format!( + r#"create table test_table( + a int null primary key, + ts timestamp time index, + ) + PARTITION BY RANGE COLUMNS (a) ( + PARTITION r0 VALUES LESS THAN (1), + PARTITION r1 VALUES LESS THAN (10), + PARTITION r2 VALUES LESS THAN (100), + PARTITION r3 VALUES LESS THAN (MAXVALUE), + ) + with(storage='{storage_name}') + "# + ) + } else { + format!( + r#"create table test_table(a int primary key, ts timestamp time index)with(storage='{storage_name}');"# + ) + }; + + let output = execute_sql(&instance.frontend(), &sql).await; + assert!(matches!(output, Output::AffectedRows(0))); + let output = execute_sql( + &frontend, + r#"insert into test_table(a, ts) values + (1, 1655276557000), + (1000, 1655276558000) + "#, + ) + .await; + assert!(matches!(output, Output::AffectedRows(2))); + + let output = execute_sql(&frontend, "select * from test_table").await; + let expected = "\ ++------+---------------------+ +| a | ts | ++------+---------------------+ +| 1 | 2022-06-15T07:02:37 | +| 1000 | 2022-06-15T07:02:38 | ++------+---------------------+"; + + check_output_stream(output, expected).await; + let output = execute_sql(&frontend, "show create table test_table").await; + let Output::RecordBatches(record_batches) = output else { + unreachable!() + }; + + let record_batches = record_batches.iter().collect::>(); + let column = record_batches[0].column_by_name("Create Table").unwrap(); + let actual = column.get(0); + + let expect = if instance.is_distributed_mode() { + format!( + r#"CREATE TABLE IF NOT EXISTS "test_table" ( + "a" INT NULL, + "ts" TIMESTAMP(3) NOT NULL, + TIME INDEX ("ts"), + PRIMARY KEY ("a") +) +PARTITION BY RANGE COLUMNS ("a") ( + PARTITION r0 VALUES LESS THAN (1), + PARTITION r1 VALUES LESS THAN (10), + PARTITION r2 VALUES LESS THAN (100), + PARTITION r3 VALUES LESS THAN (MAXVALUE) +) +ENGINE=mito +WITH( + regions = 4, + storage = '{storage_name}' +)"# + ) + } else { + format!( + r#"CREATE TABLE IF NOT EXISTS "test_table" ( + "a" INT NULL, + "ts" TIMESTAMP(3) NOT NULL, + TIME INDEX ("ts"), + PRIMARY KEY ("a") +) + +ENGINE=mito +WITH( + regions = 1, + storage = '{storage_name}' +)"# + ) + }; + assert_eq!(actual.to_string(), expect); + let output = execute_sql(&frontend, "truncate test_table").await; + assert!(matches!(output, Output::AffectedRows(0))); + let output = execute_sql(&frontend, "select * from test_table").await; + let expected = "\ +++ +++"; + + check_output_stream(output, expected).await; + let output = execute_sql(&frontend, "drop table test_table").await; + assert!(matches!(output, Output::AffectedRows(0))); + } + } +} diff --git a/tests-integration/src/tests/test_util.rs b/tests-integration/src/tests/test_util.rs index 422eba4eaa19..edf21ba7601d 100644 --- a/tests-integration/src/tests/test_util.rs +++ b/tests-integration/src/tests/test_util.rs @@ -20,7 +20,9 @@ use common_test_util::find_workspace_path; use frontend::instance::Instance; use rstest_reuse::{self, template}; +use crate::cluster::GreptimeDbClusterBuilder; use crate::standalone::{GreptimeDbStandalone, GreptimeDbStandaloneBuilder}; +use crate::test_util::StorageType; use crate::tests::{create_distributed_instance, MockDistributedInstance}; pub(crate) trait MockInstance { @@ -61,6 +63,42 @@ pub(crate) async fn distributed() -> Arc { Arc::new(instance) } +pub(crate) async fn standalone_with_multiple_object_stores() -> Arc { + let _ = dotenv::dotenv(); + let test_name = uuid::Uuid::new_v4().to_string(); + let storage_types = StorageType::build_storage_types_based_on_env(); + let instance = GreptimeDbStandaloneBuilder::new(&test_name) + .with_store_providers(storage_types) + .build() + .await; + Arc::new(instance) +} + +pub(crate) async fn distributed_with_multiple_object_stores() -> Arc { + let _ = dotenv::dotenv(); + let test_name = uuid::Uuid::new_v4().to_string(); + let providers = StorageType::build_storage_types_based_on_env(); + let cluster = GreptimeDbClusterBuilder::new(&test_name) + .await + .with_store_providers(providers) + .build() + .await; + Arc::new(MockDistributedInstance(cluster)) +} + +#[template] +#[rstest] +#[case::test_with_standalone(standalone_with_multiple_object_stores())] +#[case::test_with_distributed(distributed_with_multiple_object_stores())] +#[awt] +#[tokio::test(flavor = "multi_thread")] +pub(crate) fn both_instances_cases_with_custom_storages( + #[future] + #[case] + instance: Arc, +) { +} + #[template] #[rstest] #[case::test_with_standalone(standalone())] diff --git a/tests-integration/tests/http.rs b/tests-integration/tests/http.rs index 68b0f01d1412..07f1b903c3c4 100644 --- a/tests-integration/tests/http.rs +++ b/tests-integration/tests/http.rs @@ -712,6 +712,7 @@ sync_write = false [datanode.storage] type = "{}" +providers = [] [[datanode.region_engine]] diff --git a/tests/cases/standalone/common/show/show_create.result b/tests/cases/standalone/common/show/show_create.result index b871117b1ad9..0936ab2d7f2e 100644 --- a/tests/cases/standalone/common/show/show_create.result +++ b/tests/cases/standalone/common/show/show_create.result @@ -100,3 +100,24 @@ WITH( Error: 1004(InvalidArguments), Invalid table option key: foo +CREATE TABLE not_supported_table_storage_option ( + id INT UNSIGNED, + host STRING, + cpu DOUBLE, + disk FLOAT, + ts TIMESTAMP NOT NULL DEFAULT current_timestamp(), + TIME INDEX (ts), + PRIMARY KEY (id, host) +) +PARTITION BY RANGE COLUMNS (id) ( + PARTITION r0 VALUES LESS THAN (5), + PARTITION r1 VALUES LESS THAN (9), + PARTITION r2 VALUES LESS THAN (MAXVALUE), +) +ENGINE=mito +WITH( + storage = 'S3' +); + +Error: 1004(InvalidArguments), Object store not found: s3 + diff --git a/tests/cases/standalone/common/show/show_create.sql b/tests/cases/standalone/common/show/show_create.sql index 56b56baadcbc..50fb9b5a85da 100644 --- a/tests/cases/standalone/common/show/show_create.sql +++ b/tests/cases/standalone/common/show/show_create.sql @@ -50,3 +50,21 @@ WITH( ttl = '7d', write_buffer_size = 1024 ); +CREATE TABLE not_supported_table_storage_option ( + id INT UNSIGNED, + host STRING, + cpu DOUBLE, + disk FLOAT, + ts TIMESTAMP NOT NULL DEFAULT current_timestamp(), + TIME INDEX (ts), + PRIMARY KEY (id, host) +) +PARTITION BY RANGE COLUMNS (id) ( + PARTITION r0 VALUES LESS THAN (5), + PARTITION r1 VALUES LESS THAN (9), + PARTITION r2 VALUES LESS THAN (MAXVALUE), +) +ENGINE=mito +WITH( + storage = 'S3' +);