diff --git a/Cargo.lock b/Cargo.lock index 099d0ead5708..f2f6d2e680b0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3543,7 +3543,7 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "greptime-proto" version = "0.1.0" -source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=2b3ae45740a49ec6a0830d71fc09c3093aeb5fe7#2b3ae45740a49ec6a0830d71fc09c3093aeb5fe7" +source = "git+https://github.com/GreptimeTeam/greptime-proto.git?rev=2aaee38de81047537dfa42af9df63bcfb866e06c#2aaee38de81047537dfa42af9df63bcfb866e06c" dependencies = [ "prost 0.12.2", "serde", diff --git a/Cargo.toml b/Cargo.toml index 449e17298310..2cbecce5ce1b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,7 +87,7 @@ etcd-client = "0.12" fst = "0.4.7" futures = "0.3" futures-util = "0.3" -greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "2b3ae45740a49ec6a0830d71fc09c3093aeb5fe7" } +greptime-proto = { git = "https://github.com/GreptimeTeam/greptime-proto.git", rev = "2aaee38de81047537dfa42af9df63bcfb866e06c" } humantime-serde = "1.1" itertools = "0.10" lazy_static = "1.4" diff --git a/src/common/meta/src/ddl/drop_table.rs b/src/common/meta/src/ddl/drop_table.rs index b32243eced47..13173b01f314 100644 --- a/src/common/meta/src/ddl/drop_table.rs +++ b/src/common/meta/src/ddl/drop_table.rs @@ -86,6 +86,10 @@ impl DropTableProcedure { )) .await?; + if !exist && self.data.task.drop_if_exists { + return Ok(Status::Done); + } + ensure!( exist, error::TableNotFoundSnafu { diff --git a/src/common/meta/src/rpc/ddl.rs b/src/common/meta/src/rpc/ddl.rs index 0dfa5f507156..b9bbd762b14d 100644 --- a/src/common/meta/src/rpc/ddl.rs +++ b/src/common/meta/src/rpc/ddl.rs @@ -54,12 +54,14 @@ impl DdlTask { schema: String, table: String, table_id: TableId, + drop_if_exists: bool, ) -> Self { DdlTask::DropTable(DropTableTask { catalog, schema, table, table_id, + drop_if_exists, }) } @@ -118,6 +120,7 @@ impl TryFrom for PbSubmitDdlTaskRequest { schema_name: task.schema, table_name: task.table, table_id: Some(api::v1::TableId { id: task.table_id }), + drop_if_exists: task.drop_if_exists, }), }), DdlTask::AlterTable(task) => Task::AlterTableTask(PbAlterTableTask { @@ -176,6 +179,8 @@ pub struct DropTableTask { pub schema: String, pub table: String, pub table_id: TableId, + #[serde(default)] + pub drop_if_exists: bool, } impl DropTableTask { @@ -214,6 +219,7 @@ impl TryFrom for DropTableTask { err_msg: "expected table_id", })? .id, + drop_if_exists: drop_table.drop_if_exists, }) } } diff --git a/src/frontend/src/instance/grpc.rs b/src/frontend/src/instance/grpc.rs index 74947581dc2d..2ba67483d2dc 100644 --- a/src/frontend/src/instance/grpc.rs +++ b/src/frontend/src/instance/grpc.rs @@ -122,7 +122,9 @@ impl GrpcQueryHandler for Instance { DdlExpr::DropTable(expr) => { let table_name = TableName::new(&expr.catalog_name, &expr.schema_name, &expr.table_name); - self.statement_executor.drop_table(table_name).await? + self.statement_executor + .drop_table(table_name, expr.drop_if_exists) + .await? } DdlExpr::TruncateTable(expr) => { let table_name = diff --git a/src/meta-srv/src/procedure/tests.rs b/src/meta-srv/src/procedure/tests.rs index 49d09a2ad3fc..5321481f66a3 100644 --- a/src/meta-srv/src/procedure/tests.rs +++ b/src/meta-srv/src/procedure/tests.rs @@ -235,6 +235,7 @@ async fn test_on_datanode_drop_regions() { schema: "my_schema".to_string(), table: "my_table".to_string(), table_id: 42, + drop_if_exists: false, }; let (region_server, mut rx) = EchoRegionServer::new(); diff --git a/src/operator/src/statement.rs b/src/operator/src/statement.rs index ca31e7a18ac0..b03ff128881f 100644 --- a/src/operator/src/statement.rs +++ b/src/operator/src/statement.rs @@ -151,7 +151,7 @@ impl StatementExecutor { .map_err(BoxedError::new) .context(error::ExternalSnafu)?; let table_name = TableName::new(catalog, schema, table); - self.drop_table(table_name).await + self.drop_table(table_name, stmt.drop_if_exists()).await } Statement::TruncateTable(stmt) => { let (catalog, schema, table) = diff --git a/src/operator/src/statement/ddl.rs b/src/operator/src/statement/ddl.rs index 36cb4d93ab1b..620e3de6445d 100644 --- a/src/operator/src/statement/ddl.rs +++ b/src/operator/src/statement/ddl.rs @@ -36,7 +36,7 @@ use lazy_static::lazy_static; use partition::partition::{PartitionBound, PartitionDef}; use regex::Regex; use session::context::QueryContextRef; -use snafu::{ensure, OptionExt, ResultExt}; +use snafu::{ensure, IntoError, OptionExt, ResultExt}; use sql::ast::Value as SqlValue; use sql::statements::alter::AlterTable; use sql::statements::create::{CreateExternalTable, CreateTable, Partitions}; @@ -168,8 +168,8 @@ impl StatementExecutor { } #[tracing::instrument(skip_all)] - pub async fn drop_table(&self, table_name: TableName) -> Result { - let table = self + pub async fn drop_table(&self, table_name: TableName, drop_if_exists: bool) -> Result { + if let Some(table) = self .catalog_manager .table( &table_name.catalog_name, @@ -178,24 +178,32 @@ impl StatementExecutor { ) .await .context(CatalogSnafu)? - .with_context(|| TableNotFoundSnafu { + { + let table_id = table.table_info().table_id(); + self.drop_table_procedure(&table_name, table_id, drop_if_exists) + .await?; + + // Invalidates local cache ASAP. + self.cache_invalidator + .invalidate_table_id(&Context::default(), table_id) + .await + .context(error::InvalidateTableCacheSnafu)?; + + self.cache_invalidator + .invalidate_table_name(&Context::default(), table_name.clone()) + .await + .context(error::InvalidateTableCacheSnafu)?; + + Ok(Output::AffectedRows(0)) + } else if drop_if_exists { + // DROP TABLE IF EXISTS meets table not found - ignored + Ok(Output::AffectedRows(0)) + } else { + Err(TableNotFoundSnafu { table_name: table_name.to_string(), - })?; - let table_id = table.table_info().table_id(); - self.drop_table_procedure(&table_name, table_id).await?; - - // Invalidates local cache ASAP. - self.cache_invalidator - .invalidate_table_id(&Context::default(), table_id) - .await - .context(error::InvalidateTableCacheSnafu)?; - - self.cache_invalidator - .invalidate_table_name(&Context::default(), table_name.clone()) - .await - .context(error::InvalidateTableCacheSnafu)?; - - Ok(Output::AffectedRows(0)) + } + .into_error(snafu::NoneError)) + } } #[tracing::instrument(skip_all)] @@ -343,6 +351,7 @@ impl StatementExecutor { &self, table_name: &TableName, table_id: TableId, + drop_if_exists: bool, ) -> Result { let request = SubmitDdlTaskRequest { task: DdlTask::new_drop_table( @@ -350,6 +359,7 @@ impl StatementExecutor { table_name.schema_name.to_string(), table_name.table_name.to_string(), table_id, + drop_if_exists, ), }; diff --git a/src/sql/src/parsers/drop_parser.rs b/src/sql/src/parsers/drop_parser.rs index 92cddf728491..205fcdb457fb 100644 --- a/src/sql/src/parsers/drop_parser.rs +++ b/src/sql/src/parsers/drop_parser.rs @@ -29,6 +29,7 @@ impl<'a> ParserContext<'a> { } let _ = self.parser.next_token(); + let if_exists = self.parser.parse_keywords(&[Keyword::IF, Keyword::EXISTS]); let raw_table_ident = self.parser .parse_object_name() @@ -45,7 +46,7 @@ impl<'a> ParserContext<'a> { } ); - Ok(Statement::DropTable(DropTable::new(table_ident))) + Ok(Statement::DropTable(DropTable::new(table_ident, if_exists))) } } @@ -63,7 +64,15 @@ mod tests { let mut stmts = result.unwrap(); assert_eq!( stmts.pop().unwrap(), - Statement::DropTable(DropTable::new(ObjectName(vec![Ident::new("foo")]))) + Statement::DropTable(DropTable::new(ObjectName(vec![Ident::new("foo")]), false)) + ); + + let sql = "DROP TABLE IF EXISTS foo"; + let result = ParserContext::create_with_dialect(sql, &GreptimeDbDialect {}); + let mut stmts = result.unwrap(); + assert_eq!( + stmts.pop().unwrap(), + Statement::DropTable(DropTable::new(ObjectName(vec![Ident::new("foo")]), true)) ); let sql = "DROP TABLE my_schema.foo"; @@ -71,10 +80,10 @@ mod tests { let mut stmts = result.unwrap(); assert_eq!( stmts.pop().unwrap(), - Statement::DropTable(DropTable::new(ObjectName(vec![ - Ident::new("my_schema"), - Ident::new("foo") - ]))) + Statement::DropTable(DropTable::new( + ObjectName(vec![Ident::new("my_schema"), Ident::new("foo")]), + false + )) ); let sql = "DROP TABLE my_catalog.my_schema.foo"; @@ -82,11 +91,14 @@ mod tests { let mut stmts = result.unwrap(); assert_eq!( stmts.pop().unwrap(), - Statement::DropTable(DropTable::new(ObjectName(vec![ - Ident::new("my_catalog"), - Ident::new("my_schema"), - Ident::new("foo") - ]))) + Statement::DropTable(DropTable::new( + ObjectName(vec![ + Ident::new("my_catalog"), + Ident::new("my_schema"), + Ident::new("foo") + ]), + false + )) ) } } diff --git a/src/sql/src/statements/drop.rs b/src/sql/src/statements/drop.rs index b8e46ac3f2a7..d5cf364a4cfa 100644 --- a/src/sql/src/statements/drop.rs +++ b/src/sql/src/statements/drop.rs @@ -19,15 +19,24 @@ use sqlparser_derive::{Visit, VisitMut}; #[derive(Debug, Clone, PartialEq, Eq, Visit, VisitMut)] pub struct DropTable { table_name: ObjectName, + /// drop table if exists + drop_if_exists: bool, } impl DropTable { /// Creates a statement for `DROP TABLE` - pub fn new(table_name: ObjectName) -> Self { - Self { table_name } + pub fn new(table_name: ObjectName, if_exists: bool) -> Self { + Self { + table_name, + drop_if_exists: if_exists, + } } pub fn table_name(&self) -> &ObjectName { &self.table_name } + + pub fn drop_if_exists(&self) -> bool { + self.drop_if_exists + } } diff --git a/tests/README.md b/tests/README.md index ffdc8eaef576..58d6032523af 100644 --- a/tests/README.md +++ b/tests/README.md @@ -3,7 +3,9 @@ ## Sqlness manual ### Case file -Sqlness has two types of file + +Sqlness has two types of file: + - `.sql`: test input, SQL only - `.result`: expected test output, SQL and its results @@ -14,17 +16,21 @@ check change logs to solve the problem. You only need to write test SQL in `.sql` file, and run the test. ### Case organization -The root dir of input cases is `tests/cases`. It contains several sub-directories stand for different test + +The root dir of input cases is `tests/cases`. It contains several subdirectories stand for different test modes. E.g., `standalone/` contains all the tests to run under `greptimedb standalone start` mode. -Under the first level of sub-directory (e.g. the `cases/standalone`), you can organize your cases as you like. +Under the first level of subdirectory (e.g. the `cases/standalone`), you can organize your cases as you like. Sqlness walks through every file recursively and runs them. ## Run the test -Unlike other tests, this harness is in a binary target form. You can run it with + +Unlike other tests, this harness is in a binary target form. You can run it with: + ```shell cargo sqlness ``` + It automatically finishes the following procedures: compile `GreptimeDB`, start it, grab tests and feed it to the server, then collect and compare the results. You only need to check if the `.result` files are changed. If not, congratulations, the test is passed 🥳! diff --git a/tests/cases/standalone/common/drop/drop_table.result b/tests/cases/standalone/common/drop/drop_table.result new file mode 100644 index 000000000000..a8fad0ee78ca --- /dev/null +++ b/tests/cases/standalone/common/drop/drop_table.result @@ -0,0 +1,22 @@ +DROP TABLE IF EXISTS foo; + +Affected Rows: 0 + +create table foo ( + host string, + ts timestamp DEFAULT '2023-04-29 00:00:00+00:00', + cpu double default 0, + TIME INDEX (ts), + PRIMARY KEY(host) +) engine=mito with(regions=1); + +Affected Rows: 0 + +DROP TABLE IF EXISTS foo; + +Affected Rows: 0 + +DROP TABLE IF EXISTS foo; + +Affected Rows: 0 + diff --git a/tests/cases/standalone/common/drop/drop_table.sql b/tests/cases/standalone/common/drop/drop_table.sql new file mode 100644 index 000000000000..d6ddf52101c1 --- /dev/null +++ b/tests/cases/standalone/common/drop/drop_table.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS foo; + +create table foo ( + host string, + ts timestamp DEFAULT '2023-04-29 00:00:00+00:00', + cpu double default 0, + TIME INDEX (ts), + PRIMARY KEY(host) +) engine=mito with(regions=1); + +DROP TABLE IF EXISTS foo; + +DROP TABLE IF EXISTS foo;