diff --git a/src/binder/mod.rs b/src/binder/mod.rs index 40e25a80..007b626f 100644 --- a/src/binder/mod.rs +++ b/src/binder/mod.rs @@ -8,6 +8,7 @@ mod delete; mod drop_table; mod truncate; mod distinct; +mod show; use std::collections::BTreeMap; use sqlparser::ast::{Ident, ObjectName, ObjectType, SetExpr, Statement}; @@ -121,7 +122,10 @@ impl Binder { Statement::Truncate { table_name, .. } => { self.bind_truncate(table_name).await? } - _ => unimplemented!(), + Statement::ShowTables { .. } => { + self.bind_show_tables()? + } + _ => return Err(BindError::UnsupportedStmt(stmt.to_string())), }; Ok(plan) } diff --git a/src/binder/show.rs b/src/binder/show.rs new file mode 100644 index 00000000..04ca6657 --- /dev/null +++ b/src/binder/show.rs @@ -0,0 +1,19 @@ +use crate::binder::{Binder, BindError}; +use crate::planner::LogicalPlan; +use crate::planner::operator::Operator; +use crate::planner::operator::show::ShowTablesOperator; +use crate::storage::Storage; + +impl Binder { + pub(crate) fn bind_show_tables( + &mut self, + ) -> Result { + let plan = LogicalPlan { + operator: Operator::Show( + ShowTablesOperator {} + ), + childrens: vec![], + }; + Ok(plan) + } +} \ No newline at end of file diff --git a/src/catalog/column.rs b/src/catalog/column.rs index 49071082..cf4f076b 100644 --- a/src/catalog/column.rs +++ b/src/catalog/column.rs @@ -27,6 +27,16 @@ impl ColumnCatalog { } } + pub(crate) fn new_dummy(column_name: String)-> ColumnCatalog { + ColumnCatalog { + id: 0, + name: column_name, + table_name: None, + nullable: false, + desc: ColumnDesc::new(LogicalType::Varchar(None), false), + } + } + pub(crate) fn datatype(&self) -> &LogicalType { &self.desc.column_datatype } diff --git a/src/db.rs b/src/db.rs index eb5434fc..bc768d16 100644 --- a/src/db.rs +++ b/src/db.rs @@ -301,6 +301,10 @@ mod test { println!("drop t1:"); let _ = kipsql.run("drop table t1").await?; + println!("show tables:"); + let tuples_show_tables = kipsql.run("show tables").await?; + println!("{}", create_table(&tuples_show_tables)); + Ok(()) } } diff --git a/src/execution/executor/mod.rs b/src/execution/executor/mod.rs index 8206253e..bdc816a9 100644 --- a/src/execution/executor/mod.rs +++ b/src/execution/executor/mod.rs @@ -1,6 +1,7 @@ pub(crate) mod dql; pub(crate)mod ddl; pub(crate)mod dml; +pub(crate) mod show; use futures::stream::BoxStream; use futures::TryStreamExt; @@ -20,6 +21,7 @@ use crate::execution::executor::dql::projection::Projection; use crate::execution::executor::dql::seq_scan::SeqScan; use crate::execution::executor::dql::sort::Sort; use crate::execution::executor::dql::values::Values; +use crate::execution::executor::show::show_table::ShowTables; use crate::execution::ExecutorError; use crate::planner::LogicalPlan; use crate::planner::operator::Operator; @@ -103,6 +105,9 @@ pub fn build(plan: LogicalPlan, storage: &S) -> BoxedExecutor { Operator::Truncate(op) => { Truncate::from(op).execute(storage) } + Operator::Show(op) => { + ShowTables::from(op).execute(storage) + } } } diff --git a/src/execution/executor/show/mod.rs b/src/execution/executor/show/mod.rs new file mode 100644 index 00000000..50ed8e8a --- /dev/null +++ b/src/execution/executor/show/mod.rs @@ -0,0 +1,4 @@ +pub(crate) mod show_table; + + + diff --git a/src/execution/executor/show/show_table.rs b/src/execution/executor/show/show_table.rs new file mode 100644 index 00000000..cf424a01 --- /dev/null +++ b/src/execution/executor/show/show_table.rs @@ -0,0 +1,52 @@ +use futures_async_stream::try_stream; +use crate::execution::executor::{BoxedExecutor, Executor}; +use crate::execution::ExecutorError; +use crate::planner::operator::show::ShowTablesOperator; +use crate::storage::Storage; +use crate::types::tuple::Tuple; +use crate::catalog::ColumnCatalog; +use crate::catalog::ColumnRef; +use std::sync::Arc; +use crate::types::value::{DataValue, ValueRef}; + +pub struct ShowTables { + op: ShowTablesOperator, +} + +impl From for ShowTables { + fn from(op: ShowTablesOperator) -> Self { + ShowTables { + op + } + } +} + +impl Executor for ShowTables { + fn execute(self, storage: &S) -> BoxedExecutor { + self._execute(storage.clone()) + } +} + +impl ShowTables { + #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + pub async fn _execute(self, storage: S) { + if let Some(tables) = storage.show_tables().await { + for (table,column_count) in tables { + let columns: Vec = vec![ + Arc::new(ColumnCatalog::new_dummy("TABLES".to_string())), + Arc::new(ColumnCatalog::new_dummy("COLUMN_COUNT".to_string())), + ]; + let values: Vec = vec![ + Arc::new(DataValue::Utf8(Some(table))), + Arc::new(DataValue::UInt32(Some(column_count as u32))), + ]; + + yield Tuple { + id: None, + columns, + values, + }; + } + } + } +} \ No newline at end of file diff --git a/src/planner/operator/mod.rs b/src/planner/operator/mod.rs index b6226c0d..39cd1e18 100644 --- a/src/planner/operator/mod.rs +++ b/src/planner/operator/mod.rs @@ -12,6 +12,7 @@ pub mod update; pub mod delete; pub mod drop_table; pub mod truncate; +pub mod show; use itertools::Itertools; use crate::catalog::ColumnRef; @@ -21,6 +22,7 @@ use crate::planner::operator::delete::DeleteOperator; use crate::planner::operator::drop_table::DropTableOperator; use crate::planner::operator::insert::InsertOperator; use crate::planner::operator::join::JoinCondition; +use crate::planner::operator::show::ShowTablesOperator; use crate::planner::operator::truncate::TruncateOperator; use crate::planner::operator::update::UpdateOperator; use crate::planner::operator::values::ValuesOperator; @@ -50,6 +52,8 @@ pub enum Operator { CreateTable(CreateTableOperator), DropTable(DropTableOperator), Truncate(TruncateOperator), + // Show + Show(ShowTablesOperator), } impl Operator { diff --git a/src/planner/operator/show.rs b/src/planner/operator/show.rs new file mode 100644 index 00000000..5b65bb9a --- /dev/null +++ b/src/planner/operator/show.rs @@ -0,0 +1,2 @@ +#[derive(Debug, PartialEq, Clone)] +pub struct ShowTablesOperator {} \ No newline at end of file diff --git a/src/storage/kip.rs b/src/storage/kip.rs index 8ab6da11..22490d39 100644 --- a/src/storage/kip.rs +++ b/src/storage/kip.rs @@ -49,6 +49,11 @@ impl Storage for KipStorage { { self.inner.set(key, value).await?; } + + let (k, v)= TableCodec::encode_root_table(table_name.as_str(), table.columns.len()) + .ok_or(StorageError::Serialization)?; + self.inner.set(k, v).await?; + self.cache.put(table_name.to_string(), table); Ok(table_name) @@ -72,6 +77,9 @@ impl Storage for KipStorage { for col_key in col_keys { tx.remove(&col_key)? } + let (k, _) = TableCodec::encode_root_table(name.as_str(),0) + .ok_or(StorageError::Serialization)?; + tx.remove(&k)?; tx.commit().await?; let _ = self.cache.remove(name); @@ -139,6 +147,24 @@ impl Storage for KipStorage { option } + + async fn show_tables(&self) -> Option> { + let mut tables = vec![]; + let (min, max) = TableCodec::root_table_bound(); + + let tx = self.inner.new_transaction().await; + let mut iter = tx.iter(Bound::Included(&min), Bound::Included(&max)).ok()?; + + while let Some((key, value_option)) = iter.try_next().ok().flatten() { + if let Some(value) = value_option { + if let Some((table_name, column_count)) = TableCodec::decode_root_table(&key, &value) { + tables.push((table_name,column_count)); + } + } + } + + Some(tables) + } } pub struct KipTable { diff --git a/src/storage/memory.rs b/src/storage/memory.rs index a58eaad9..6549e32a 100644 --- a/src/storage/memory.rs +++ b/src/storage/memory.rs @@ -114,6 +114,10 @@ impl Storage for MemStorage { .get_table(name) } } + + async fn show_tables(&self) -> Option> { + todo!() + } } unsafe impl Send for MemTable { diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 1e6f7b14..98f2ccdc 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -25,6 +25,8 @@ pub trait Storage: Sync + Send + Clone + 'static { async fn table(&self, name: &String) -> Option; async fn table_catalog(&self, name: &String) -> Option<&TableCatalog>; + + async fn show_tables(&self) -> Option>; } /// Optional bounds of the reader, of the form (offset, limit). @@ -71,6 +73,10 @@ pub enum StorageError { #[error("The same primary key data already exists")] DuplicatePrimaryKey, + + #[error("Serialization error")] + Serialization, + } impl From for StorageError { diff --git a/src/storage/table_codec.rs b/src/storage/table_codec.rs index 9406a845..18de475a 100644 --- a/src/storage/table_codec.rs +++ b/src/storage/table_codec.rs @@ -98,6 +98,46 @@ impl TableCodec { }) }) } + + + /// Key: RootCatalog_0_TableName + /// Value: ColumnCount + pub fn encode_root_table(table_name: &str,column_count:usize) -> Option<(Bytes, Bytes)> { + let key = format!( + "RootCatalog_{}_{}", + BOUND_MIN_TAG, + table_name, + ); + + bincode::serialize(&column_count).ok() + .map(|bytes| { + (Bytes::from(key.into_bytes()), Bytes::from(bytes)) + }) + } + + // TODO: value is reserved for saving meta-information + pub fn decode_root_table(key: &[u8], bytes: &[u8]) -> Option<(String,usize)> { + String::from_utf8(key.to_owned()).ok()? + .split("_") + .nth(2) + .and_then(|table_name| { + bincode::deserialize::(bytes).ok() + .and_then(|name| { + Some((table_name.to_string(), name)) + }) + }) + } + + pub fn root_table_bound() -> (Vec, Vec) { + let op = |bound_id| { + format!( + "RootCatalog_{}", + bound_id, + ) + }; + + (op(BOUND_MIN_TAG).into_bytes(), op(BOUND_MAX_TAG).into_bytes()) + } } #[cfg(test)] @@ -153,6 +193,25 @@ mod tests { Ok(()) } + #[test] + fn test_root_catalog() { + let (table_catalog, _) = build_table_codec(); + let (key, bytes) = TableCodec::encode_root_table(&table_catalog.name,2).unwrap(); + + assert_eq!( + String::from_utf8(key.to_vec()).ok().unwrap(), + format!( + "RootCatalog_0_{}", + table_catalog.name, + ) + ); + + let (table_name, column_count) = TableCodec::decode_root_table(&key, &bytes).unwrap(); + + assert_eq!(table_name, table_catalog.name.as_str()); + assert_eq!(column_count, 2); + } + #[test] fn test_table_codec_column() { let (table_catalog, _) = build_table_codec(); @@ -240,4 +299,27 @@ mod tests { assert_eq!(String::from_utf8(vec[1].clone()).unwrap(), "T1_Data_0_0000000000000000001"); assert_eq!(String::from_utf8(vec[2].clone()).unwrap(), "T1_Data_0_0000000000000000002"); } + + #[test] + fn test_root_codec_name_bound(){ + let mut set = BTreeSet::new(); + let op = |str: &str| { + str.to_string().into_bytes() + }; + + set.insert(op("RootCatalog_0_T0")); + set.insert(op("RootCatalog_0_T1")); + set.insert(op("RootCatalog_0_T2")); + + let (min, max) = TableCodec::root_table_bound(); + + let vec = set + .range::, (Bound<&Vec>, Bound<&Vec>)>((Bound::Included(&min), Bound::Included(&max))) + .collect_vec(); + + assert_eq!(String::from_utf8(vec[0].clone()).unwrap(), "RootCatalog_0_T0"); + assert_eq!(String::from_utf8(vec[1].clone()).unwrap(), "RootCatalog_0_T1"); + assert_eq!(String::from_utf8(vec[2].clone()).unwrap(), "RootCatalog_0_T2"); + + } } \ No newline at end of file