diff --git a/.gitignore b/.gitignore index 42f05ef9..11f724d7 100644 --- a/.gitignore +++ b/.gitignore @@ -24,4 +24,6 @@ Cargo.lock /transaction fncksql_bench -sqlite_bench \ No newline at end of file +sqlite_bench + +tests/data/row_20000.csv \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index 1dd14f8d..41a3d78e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ [package] name = "fnck_sql" -version = "0.0.1-alpha.9" +version = "0.0.1-alpha.10" edition = "2021" authors = ["Kould ", "Xwg "] description = "Fast Insert OLTP SQL DBMS" diff --git a/src/binder/create_table.rs b/src/binder/create_table.rs index 883812bf..a7ae26ec 100644 --- a/src/binder/create_table.rs +++ b/src/binder/create_table.rs @@ -151,7 +151,7 @@ mod tests { let transaction = storage.transaction().await?; let sql = "create table t1 (id int primary key, name varchar(10) null)"; - let binder = Binder::new(BinderContext::new(&transaction)); + let mut binder = Binder::new(BinderContext::new(&transaction)); let stmt = crate::parser::parse_sql(sql).unwrap(); let plan1 = binder.bind(&stmt[0]).unwrap(); diff --git a/src/binder/explain.rs b/src/binder/explain.rs new file mode 100644 index 00000000..4ef2a9cf --- /dev/null +++ b/src/binder/explain.rs @@ -0,0 +1,14 @@ +use crate::binder::{BindError, Binder}; +use crate::planner::operator::Operator; +use crate::planner::LogicalPlan; +use crate::storage::Transaction; + +impl<'a, T: Transaction> Binder<'a, T> { + pub(crate) fn bind_explain(&mut self, plan: LogicalPlan) -> Result { + Ok(LogicalPlan { + operator: Operator::Explain, + childrens: vec![plan], + physical_option: None, + }) + } +} diff --git a/src/binder/mod.rs b/src/binder/mod.rs index 2181f180..33372463 100644 --- a/src/binder/mod.rs +++ b/src/binder/mod.rs @@ -6,6 +6,7 @@ mod create_table; mod delete; mod distinct; mod drop_table; +mod explain; pub mod expr; mod insert; mod select; @@ -118,7 +119,7 @@ impl<'a, T: Transaction> Binder<'a, T> { Binder { context } } - pub fn bind(mut self, stmt: &Statement) -> Result { + pub fn bind(&mut self, stmt: &Statement) -> Result { let plan = match stmt { Statement::Query(query) => self.bind_query(query)?, Statement::AlterTable { name, operation } => self.bind_alter_table(name, operation)?, @@ -184,6 +185,11 @@ impl<'a, T: Transaction> Binder<'a, T> { options, .. } => self.bind_copy(source.clone(), *to, target.clone(), options)?, + Statement::Explain { statement, .. } => { + let plan = self.bind(statement)?; + + self.bind_explain(plan)? + } _ => return Err(BindError::UnsupportedStmt(stmt.to_string())), }; Ok(plan) @@ -308,7 +314,7 @@ pub mod test { let temp_dir = TempDir::new().expect("unable to create temporary working directory"); let storage = build_test_catalog(temp_dir.path()).await?; let transaction = storage.transaction().await?; - let binder = Binder::new(BinderContext::new(&transaction)); + let mut binder = Binder::new(BinderContext::new(&transaction)); let stmt = crate::parser::parse_sql(sql)?; Ok(binder.bind(&stmt[0])?) diff --git a/src/binder/show.rs b/src/binder/show.rs index 4b1da3be..0d6ff53d 100644 --- a/src/binder/show.rs +++ b/src/binder/show.rs @@ -1,5 +1,4 @@ use crate::binder::{BindError, Binder}; -use crate::planner::operator::show::ShowTablesOperator; use crate::planner::operator::Operator; use crate::planner::LogicalPlan; use crate::storage::Transaction; @@ -7,7 +6,7 @@ use crate::storage::Transaction; impl<'a, T: Transaction> Binder<'a, T> { pub(crate) fn bind_show_tables(&mut self) -> Result { let plan = LogicalPlan { - operator: Operator::Show(ShowTablesOperator {}), + operator: Operator::Show, childrens: vec![], physical_option: None, }; diff --git a/src/db.rs b/src/db.rs index a3b4fd38..17521e42 100644 --- a/src/db.rs +++ b/src/db.rs @@ -114,7 +114,7 @@ impl Database { if stmts.is_empty() { return Err(DatabaseError::EmptyStatement); } - let binder = Binder::new(BinderContext::new(transaction)); + let mut binder = Binder::new(BinderContext::new(transaction)); /// Build a logical plan. /// /// SELECT a,b FROM t1 ORDER BY a LIMIT 1; diff --git a/src/execution/codegen/dql/index_scan.rs b/src/execution/codegen/dql/index_scan.rs index f6d9fc88..dd7e821b 100644 --- a/src/execution/codegen/dql/index_scan.rs +++ b/src/execution/codegen/dql/index_scan.rs @@ -30,7 +30,7 @@ impl KipChannelIndexNext { let ScanOperator { table_name, - columns, + projection_columns: columns, limit, index_by, .. diff --git a/src/execution/codegen/dql/seq_scan.rs b/src/execution/codegen/dql/seq_scan.rs index 6976d211..7d7b14a3 100644 --- a/src/execution/codegen/dql/seq_scan.rs +++ b/src/execution/codegen/dql/seq_scan.rs @@ -30,7 +30,7 @@ impl KipChannelSeqNext { let ScanOperator { table_name, - columns, + projection_columns: columns, limit, .. } = op; diff --git a/src/execution/mod.rs b/src/execution/mod.rs index 5621fcd1..2b2eb335 100644 --- a/src/execution/mod.rs +++ b/src/execution/mod.rs @@ -51,13 +51,13 @@ pub enum ExecutorError { ), #[error("Internal error: {0}")] InternalError(String), - #[error("io error")] + #[error("io error: {0}")] Io( #[from] #[source] std::io::Error, ), - #[error("csv error")] + #[error("csv error: {0}")] Csv( #[from] #[source] diff --git a/src/execution/volcano/dml/analyze.rs b/src/execution/volcano/dml/analyze.rs index 3194c56e..227d9ad4 100644 --- a/src/execution/volcano/dml/analyze.rs +++ b/src/execution/volcano/dml/analyze.rs @@ -12,9 +12,10 @@ use crate::types::value::DataValue; use futures_async_stream::try_stream; use itertools::Itertools; use std::collections::HashMap; -use std::fs; +use std::fmt::Formatter; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; +use std::{fmt, fs}; const DEFAULT_NUM_OF_BUCKETS: usize = 100; const DEFAULT_COLUMN_METAS_PATH: &str = "fnck_sql_column_metas"; @@ -123,3 +124,17 @@ impl Analyze { }; } } + +impl fmt::Display for AnalyzeOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let columns = self + .columns + .iter() + .map(|column| format!("{}", column.name())) + .join(", "); + + write!(f, "Analyze {} -> [{}]", self.table_name, columns)?; + + Ok(()) + } +} diff --git a/src/execution/volcano/dql/explain.rs b/src/execution/volcano/dql/explain.rs new file mode 100644 index 00000000..20cb0486 --- /dev/null +++ b/src/execution/volcano/dql/explain.rs @@ -0,0 +1,41 @@ +use crate::catalog::ColumnCatalog; +use crate::catalog::ColumnRef; +use crate::execution::volcano::{BoxedExecutor, ReadExecutor}; +use crate::execution::ExecutorError; +use crate::planner::LogicalPlan; +use crate::storage::Transaction; +use crate::types::tuple::Tuple; +use crate::types::value::DataValue; +use crate::types::value::ValueRef; +use futures_async_stream::try_stream; +use std::sync::Arc; + +pub struct Explain { + plan: LogicalPlan, +} + +impl From for Explain { + fn from(plan: LogicalPlan) -> Self { + Explain { plan } + } +} + +impl ReadExecutor for Explain { + fn execute(self, _: &T) -> BoxedExecutor { + self._execute() + } +} + +impl Explain { + #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + pub async fn _execute(self) { + let columns: Vec = vec![Arc::new(ColumnCatalog::new_dummy("PLAN".to_string()))]; + let values: Vec = vec![Arc::new(DataValue::Utf8(Some(self.plan.explain(0))))]; + + yield Tuple { + id: None, + columns, + values, + }; + } +} diff --git a/src/execution/volcano/dql/index_scan.rs b/src/execution/volcano/dql/index_scan.rs index 6f5945aa..f3f0ffca 100644 --- a/src/execution/volcano/dql/index_scan.rs +++ b/src/execution/volcano/dql/index_scan.rs @@ -34,7 +34,7 @@ impl IndexScan { pub async fn _execute(self, transaction: &T) { let ScanOperator { table_name, - columns, + projection_columns: columns, limit, .. } = self.op; diff --git a/src/execution/volcano/dql/mod.rs b/src/execution/volcano/dql/mod.rs index 464255ba..36eef721 100644 --- a/src/execution/volcano/dql/mod.rs +++ b/src/execution/volcano/dql/mod.rs @@ -1,11 +1,13 @@ pub(crate) mod aggregate; pub(crate) mod dummy; +pub(crate) mod explain; pub(crate) mod filter; pub(crate) mod index_scan; pub(crate) mod join; pub(crate) mod limit; pub(crate) mod projection; pub(crate) mod seq_scan; +pub(crate) mod show_table; pub(crate) mod sort; pub(crate) mod values; diff --git a/src/execution/volcano/dql/seq_scan.rs b/src/execution/volcano/dql/seq_scan.rs index fd1e70d7..364738ca 100644 --- a/src/execution/volcano/dql/seq_scan.rs +++ b/src/execution/volcano/dql/seq_scan.rs @@ -26,7 +26,7 @@ impl SeqScan { pub async fn _execute(self, transaction: &T) { let ScanOperator { table_name, - columns, + projection_columns: columns, limit, .. } = self.op; diff --git a/src/execution/volcano/show/show_table.rs b/src/execution/volcano/dql/show_table.rs similarity index 85% rename from src/execution/volcano/show/show_table.rs rename to src/execution/volcano/dql/show_table.rs index f8e64ea8..53f9fde6 100644 --- a/src/execution/volcano/show/show_table.rs +++ b/src/execution/volcano/dql/show_table.rs @@ -2,22 +2,13 @@ use crate::catalog::ColumnRef; use crate::catalog::{ColumnCatalog, TableMeta}; use crate::execution::volcano::{BoxedExecutor, ReadExecutor}; use crate::execution::ExecutorError; -use crate::planner::operator::show::ShowTablesOperator; use crate::storage::Transaction; use crate::types::tuple::Tuple; use crate::types::value::{DataValue, ValueRef}; use futures_async_stream::try_stream; use std::sync::Arc; -pub struct ShowTables { - _op: ShowTablesOperator, -} - -impl From for ShowTables { - fn from(op: ShowTablesOperator) -> Self { - ShowTables { _op: op } - } -} +pub struct ShowTables; impl ReadExecutor for ShowTables { fn execute(self, transaction: &T) -> BoxedExecutor { diff --git a/src/execution/volcano/mod.rs b/src/execution/volcano/mod.rs index 3cb80f27..4d685f53 100644 --- a/src/execution/volcano/mod.rs +++ b/src/execution/volcano/mod.rs @@ -1,7 +1,6 @@ pub(crate) mod ddl; pub(crate) mod dml; pub(crate) mod dql; -pub(crate) mod show; use crate::execution::volcano::ddl::create_table::CreateTable; use crate::execution::volcano::ddl::drop_column::DropColumn; @@ -15,15 +14,16 @@ use crate::execution::volcano::dml::update::Update; use crate::execution::volcano::dql::aggregate::hash_agg::HashAggExecutor; use crate::execution::volcano::dql::aggregate::simple_agg::SimpleAggExecutor; use crate::execution::volcano::dql::dummy::Dummy; +use crate::execution::volcano::dql::explain::Explain; use crate::execution::volcano::dql::filter::Filter; use crate::execution::volcano::dql::index_scan::IndexScan; use crate::execution::volcano::dql::join::hash_join::HashJoin; use crate::execution::volcano::dql::limit::Limit; use crate::execution::volcano::dql::projection::Projection; use crate::execution::volcano::dql::seq_scan::SeqScan; +use crate::execution::volcano::dql::show_table::ShowTables; use crate::execution::volcano::dql::sort::Sort; use crate::execution::volcano::dql::values::Values; -use crate::execution::volcano::show::show_table::ShowTables; use crate::execution::ExecutorError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::planner::LogicalPlan; @@ -101,7 +101,12 @@ pub fn build_read(plan: LogicalPlan, transaction: &T) -> BoxedEx Limit::from((op, input)).execute(transaction) } Operator::Values(op) => Values::from(op).execute(transaction), - Operator::Show(op) => ShowTables::from(op).execute(transaction), + Operator::Show => ShowTables.execute(transaction), + Operator::Explain => { + let input = childrens.remove(0); + + Explain::from(input).execute(transaction) + } _ => unreachable!(), } } diff --git a/src/execution/volcano/show/mod.rs b/src/execution/volcano/show/mod.rs deleted file mode 100644 index edc17c14..00000000 --- a/src/execution/volcano/show/mod.rs +++ /dev/null @@ -1 +0,0 @@ -pub(crate) mod show_table; diff --git a/src/expression/mod.rs b/src/expression/mod.rs index a6334cc2..dc095514 100644 --- a/src/expression/mod.rs +++ b/src/expression/mod.rs @@ -356,6 +356,12 @@ pub enum BinaryOperator { Xor, } +impl fmt::Display for ScalarExpression { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}", self.output_column().name()) + } +} + impl fmt::Display for BinaryOperator { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match self { diff --git a/src/expression/simplify.rs b/src/expression/simplify.rs index 2aaef23c..92e5c746 100644 --- a/src/expression/simplify.rs +++ b/src/expression/simplify.rs @@ -8,8 +8,9 @@ use ahash::RandomState; use itertools::Itertools; use std::cmp::Ordering; use std::collections::{Bound, HashSet}; -use std::mem; +use std::fmt::Formatter; use std::sync::Arc; +use std::{fmt, mem}; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub enum ConstantBinary { @@ -377,6 +378,13 @@ impl ConstantBinary { .chain(eqs.into_iter().map(|val| ConstantBinary::Eq(val.clone()))) .collect_vec() } + + fn join_write(f: &mut Formatter, binaries: &Vec, op: &str) -> fmt::Result { + let binaries = binaries.iter().map(|binary| format!("{}", binary)).join(op); + write!(f, " {} ", binaries)?; + + Ok(()) + } } #[derive(Debug)] @@ -871,7 +879,7 @@ impl ScalarExpression { | ScalarExpression::In { expr, .. } => expr.convert_binary(col_id), ScalarExpression::IsNull { expr, negated, .. } => match expr.as_ref() { ScalarExpression::ColumnRef(column) => { - Ok((column.id() == column.id()).then(|| { + Ok(column.id().is_some_and(|id| col_id == &id).then(|| { if *negated { ConstantBinary::NotEq(NULL_VALUE.clone()) } else { @@ -954,6 +962,34 @@ impl ScalarExpression { } } +impl fmt::Display for ConstantBinary { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + ConstantBinary::Scope { min, max } => { + match min { + Bound::Unbounded => write!(f, "-∞")?, + Bound::Included(value) => write!(f, "[{}", value)?, + Bound::Excluded(value) => write!(f, "({}", value)?, + } + + write!(f, ", ")?; + + match max { + Bound::Unbounded => write!(f, "+∞")?, + Bound::Included(value) => write!(f, "{}]", value)?, + Bound::Excluded(value) => write!(f, "{})", value)?, + } + + Ok(()) + } + ConstantBinary::Eq(value) => write!(f, "{}", value), + ConstantBinary::NotEq(value) => write!(f, "!{}", value), + ConstantBinary::And(binaries) => Self::join_write(f, binaries, " AND "), + ConstantBinary::Or(binaries) => Self::join_write(f, binaries, " OR "), + } + } +} + #[cfg(test)] mod test { use crate::catalog::{ColumnCatalog, ColumnDesc, ColumnSummary}; diff --git a/src/optimizer/core/memo.rs b/src/optimizer/core/memo.rs index a8e37593..e5307adb 100644 --- a/src/optimizer/core/memo.rs +++ b/src/optimizer/core/memo.rs @@ -114,7 +114,7 @@ mod tests { database.run("analyze table t1").await?; let transaction = database.storage.transaction().await?; - let binder = Binder::new(BinderContext::new(&transaction)); + let mut binder = Binder::new(BinderContext::new(&transaction)); let stmt = crate::parser::parse_sql( // FIXME: Only by bracketing (c1 > 40 or c1 = 2) can the filter be pushed down below the join "select c1, c3 from t1 inner join t2 on c1 = c3 where (c1 > 40 or c1 = 2) and c3 > 22", diff --git a/src/optimizer/rule/implementation/dql/scan.rs b/src/optimizer/rule/implementation/dql/scan.rs index 7ab96b24..e4a888b4 100644 --- a/src/optimizer/rule/implementation/dql/scan.rs +++ b/src/optimizer/rule/implementation/dql/scan.rs @@ -78,14 +78,16 @@ impl ImplementationRule for IndexScanImplementation { if let Some(binaries) = &index_info.binaries { // FIXME: Only UniqueIndex - if let Some(histogram) = + if let Some(column_meta) = find_column_meta(column_metas, &index_info.meta.column_ids[0]) { // need to return table query(non-covering index) - cost = Some(histogram.collect_count(binaries) * 2); + cost = Some(column_meta.collect_count(binaries) * 2); } } - assert!(!matches!(cost, Some(0))); + if matches!(cost, Some(0)) { + continue + } group_expr.append_expr(Expression { op: PhysicalOption::IndexScan(index_info.clone()), diff --git a/src/optimizer/rule/normalization/column_pruning.rs b/src/optimizer/rule/normalization/column_pruning.rs index e7e1f551..2434536b 100644 --- a/src/optimizer/rule/normalization/column_pruning.rs +++ b/src/optimizer/rule/normalization/column_pruning.rs @@ -91,7 +91,7 @@ impl ColumnPruning { } Operator::Scan(op) => { if !all_referenced { - Self::clear_exprs(column_references, &mut op.columns); + Self::clear_exprs(column_references, &mut op.projection_columns); } } Operator::Limit(_) | Operator::Join(_) | Operator::Filter(_) => { @@ -104,6 +104,13 @@ impl ColumnPruning { } // Last Operator Operator::Dummy | Operator::Values(_) => (), + Operator::Explain => { + if let Some(child_id) = graph.eldest_child_at(node_id) { + Self::_apply(column_references, true, child_id, graph); + } else { + unreachable!() + } + } // DDL Based on Other Plan Operator::Insert(_) | Operator::Update(_) @@ -121,7 +128,7 @@ impl ColumnPruning { Operator::CreateTable(_) | Operator::DropTable(_) | Operator::Truncate(_) - | Operator::Show(_) + | Operator::Show | Operator::CopyFromFile(_) | Operator::CopyToFile(_) | Operator::AddColumn(_) @@ -209,7 +216,7 @@ mod tests { for grandson_plan in &best_plan.childrens[0].childrens { match &grandson_plan.operator { Operator::Scan(op) => { - assert_eq!(op.columns.len(), 1); + assert_eq!(op.projection_columns.len(), 1); } _ => unreachable!("Should be a scan operator"), } diff --git a/src/optimizer/rule/normalization/simplification.rs b/src/optimizer/rule/normalization/simplification.rs index de92013f..e96c0d3f 100644 --- a/src/optimizer/rule/normalization/simplification.rs +++ b/src/optimizer/rule/normalization/simplification.rs @@ -57,7 +57,7 @@ impl ConstantCalculation { } } Operator::Scan(op) => { - for expr in &mut op.columns { + for expr in &mut op.projection_columns { expr.constant_calculation()?; } } diff --git a/src/planner/mod.rs b/src/planner/mod.rs index 25106cc0..8cea163d 100644 --- a/src/planner/mod.rs +++ b/src/planner/mod.rs @@ -29,4 +29,19 @@ impl LogicalPlan { collect_table(self, &mut tables); tables } + + pub fn explain(&self, indentation: usize) -> String { + let mut result = format!("{:indent$}{}", "", self.operator, indent = indentation); + + if let Some(physical_option) = &self.physical_option { + result.push_str(&format!(" [{}]", physical_option)); + } + + for child in &self.childrens { + result.push('\n'); + result.push_str(&child.explain(indentation + 2)); + } + + result + } } diff --git a/src/planner/operator/aggregate.rs b/src/planner/operator/aggregate.rs index 9017b079..ed7122b5 100644 --- a/src/planner/operator/aggregate.rs +++ b/src/planner/operator/aggregate.rs @@ -1,5 +1,8 @@ use crate::planner::LogicalPlan; use crate::{expression::ScalarExpression, planner::operator::Operator}; +use itertools::Itertools; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct AggregateOperator { @@ -23,3 +26,25 @@ impl AggregateOperator { } } } + +impl fmt::Display for AggregateOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let calls = self + .agg_calls + .iter() + .map(|call| format!("{}", call)) + .join(", "); + write!(f, "Aggregate [{}]", calls)?; + + if !self.groupby_exprs.is_empty() { + let groupbys = self + .groupby_exprs + .iter() + .map(|groupby| format!("{}", groupby)) + .join(", "); + write!(f, " -> Group By [{}]", groupbys)?; + } + + Ok(()) + } +} diff --git a/src/planner/operator/alter_table/add_column.rs b/src/planner/operator/alter_table/add_column.rs index 6e87851a..b941aceb 100644 --- a/src/planner/operator/alter_table/add_column.rs +++ b/src/planner/operator/alter_table/add_column.rs @@ -1,4 +1,6 @@ use crate::catalog::{ColumnCatalog, TableName}; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct AddColumnOperator { @@ -6,3 +8,17 @@ pub struct AddColumnOperator { pub if_not_exists: bool, pub column: ColumnCatalog, } + +impl fmt::Display for AddColumnOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!( + f, + "Add {} -> {}, If Not Exists: {}", + self.column.name(), + self.table_name, + self.if_not_exists + )?; + + Ok(()) + } +} diff --git a/src/planner/operator/alter_table/drop_column.rs b/src/planner/operator/alter_table/drop_column.rs index 8cf5ace5..ad8fc5b4 100644 --- a/src/planner/operator/alter_table/drop_column.rs +++ b/src/planner/operator/alter_table/drop_column.rs @@ -1,4 +1,6 @@ use crate::catalog::TableName; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct DropColumnOperator { @@ -6,3 +8,15 @@ pub struct DropColumnOperator { pub column_name: String, pub if_exists: bool, } + +impl fmt::Display for DropColumnOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!( + f, + "Drop {} -> {}, If Exists: {}", + self.column_name, self.table_name, self.if_exists + )?; + + Ok(()) + } +} diff --git a/src/planner/operator/copy_from_file.rs b/src/planner/operator/copy_from_file.rs index f5a3084e..daf269ef 100644 --- a/src/planner/operator/copy_from_file.rs +++ b/src/planner/operator/copy_from_file.rs @@ -1,5 +1,8 @@ use crate::binder::copy::ExtSource; use crate::catalog::ColumnRef; +use itertools::Itertools; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct CopyFromFileOperator { @@ -7,3 +10,16 @@ pub struct CopyFromFileOperator { pub source: ExtSource, pub columns: Vec, } + +impl fmt::Display for CopyFromFileOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let columns = self + .columns + .iter() + .map(|column| format!("{}", column.name())) + .join(", "); + write!(f, "Copy {} -> {} [{}]", self.source.path.display(), self.table, columns)?; + + Ok(()) + } +} diff --git a/src/planner/operator/create_table.rs b/src/planner/operator/create_table.rs index 49f93e1d..0adca431 100644 --- a/src/planner/operator/create_table.rs +++ b/src/planner/operator/create_table.rs @@ -1,4 +1,7 @@ use crate::catalog::{ColumnCatalog, TableName}; +use itertools::Itertools; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct CreateTableOperator { @@ -8,3 +11,20 @@ pub struct CreateTableOperator { pub columns: Vec, pub if_not_exists: bool, } + +impl fmt::Display for CreateTableOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let columns = self + .columns + .iter() + .map(|column| format!("{}", column.name())) + .join(", "); + write!( + f, + "Create {} -> [{}], If Not Exists: {}", + self.table_name, columns, self.if_not_exists + )?; + + Ok(()) + } +} diff --git a/src/planner/operator/delete.rs b/src/planner/operator/delete.rs index ca8f5b23..ef1030aa 100644 --- a/src/planner/operator/delete.rs +++ b/src/planner/operator/delete.rs @@ -1,4 +1,6 @@ use crate::catalog::{ColumnRef, TableName}; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct DeleteOperator { @@ -6,3 +8,11 @@ pub struct DeleteOperator { // for column pruning pub primary_key_column: ColumnRef, } + +impl fmt::Display for DeleteOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "Delete {}", self.table_name)?; + + Ok(()) + } +} diff --git a/src/planner/operator/drop_table.rs b/src/planner/operator/drop_table.rs index 731a0087..b153ac27 100644 --- a/src/planner/operator/drop_table.rs +++ b/src/planner/operator/drop_table.rs @@ -1,4 +1,6 @@ use crate::catalog::TableName; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct DropTableOperator { @@ -6,3 +8,11 @@ pub struct DropTableOperator { pub table_name: TableName, pub if_exists: bool, } + +impl fmt::Display for DropTableOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "Drop {}, If Exists: {}", self.table_name, self.if_exists)?; + + Ok(()) + } +} diff --git a/src/planner/operator/filter.rs b/src/planner/operator/filter.rs index 372e181a..49ccf0d0 100644 --- a/src/planner/operator/filter.rs +++ b/src/planner/operator/filter.rs @@ -1,4 +1,5 @@ -use std::vec; +use std::fmt::Formatter; +use std::{fmt, vec}; use crate::expression::ScalarExpression; use crate::planner::LogicalPlan; @@ -20,3 +21,11 @@ impl FilterOperator { } } } + +impl fmt::Display for FilterOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "Filter {}, Is Having: {}", self.predicate, self.having)?; + + Ok(()) + } +} diff --git a/src/planner/operator/insert.rs b/src/planner/operator/insert.rs index 561947dc..c9c12a12 100644 --- a/src/planner/operator/insert.rs +++ b/src/planner/operator/insert.rs @@ -1,7 +1,21 @@ use crate::catalog::TableName; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct InsertOperator { pub table_name: TableName, pub is_overwrite: bool, } + +impl fmt::Display for InsertOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!( + f, + "Insert {}, Is Overwrite: {}", + self.table_name, self.is_overwrite + )?; + + Ok(()) + } +} diff --git a/src/planner/operator/join.rs b/src/planner/operator/join.rs index 3f2b2122..e5de3525 100644 --- a/src/planner/operator/join.rs +++ b/src/planner/operator/join.rs @@ -1,9 +1,13 @@ use crate::expression::ScalarExpression; use crate::planner::LogicalPlan; +use itertools::Itertools; +use std::fmt; +use std::fmt::Formatter; +use strum_macros::Display; use super::Operator; -#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)] +#[derive(Debug, Display, PartialEq, Eq, Clone, Copy, Hash)] pub enum JoinType { Inner, Left, @@ -42,3 +46,34 @@ impl JoinOperator { } } } + +impl fmt::Display for JoinOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{} Join On {}", self.join_type, self.on)?; + + Ok(()) + } +} + +impl fmt::Display for JoinCondition { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + JoinCondition::On { on, filter } => { + let on = on + .iter() + .map(|(v1, v2)| format!("{} = {}", v1, v2)) + .join(" AND "); + + write!(f, "{}", on)?; + if let Some(filter) = filter { + write!(f, "Where {}", filter)?; + } + } + JoinCondition::None => { + write!(f, "Nothing")?; + } + } + + Ok(()) + } +} diff --git a/src/planner/operator/limit.rs b/src/planner/operator/limit.rs index 9a2cb8f7..ee034d73 100644 --- a/src/planner/operator/limit.rs +++ b/src/planner/operator/limit.rs @@ -1,4 +1,6 @@ use crate::planner::LogicalPlan; +use std::fmt; +use std::fmt::Formatter; use super::Operator; @@ -21,3 +23,19 @@ impl LimitOperator { } } } + +impl fmt::Display for LimitOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + if let Some(limit) = self.limit { + write!(f, "Limit {}", limit)?; + } + if self.limit.is_some() && self.offset.is_some() { + write!(f, ", ")?; + } + if let Some(offset) = self.offset { + write!(f, "Offset {}", offset)?; + } + + Ok(()) + } +} diff --git a/src/planner/operator/mod.rs b/src/planner/operator/mod.rs index 5d6a7c98..d59e0143 100644 --- a/src/planner/operator/mod.rs +++ b/src/planner/operator/mod.rs @@ -12,7 +12,6 @@ pub mod join; pub mod limit; pub mod project; pub mod scan; -pub mod show; pub mod sort; pub mod truncate; pub mod update; @@ -28,12 +27,13 @@ use crate::planner::operator::delete::DeleteOperator; use crate::planner::operator::drop_table::DropTableOperator; use crate::planner::operator::insert::InsertOperator; use crate::planner::operator::join::JoinCondition; -use crate::planner::operator::show::ShowTablesOperator; use crate::planner::operator::truncate::TruncateOperator; use crate::planner::operator::update::UpdateOperator; use crate::planner::operator::values::ValuesOperator; use crate::types::index::IndexInfo; use itertools::Itertools; +use std::fmt; +use std::fmt::Formatter; use self::{ aggregate::AggregateOperator, alter_table::add_column::AddColumnOperator, @@ -53,6 +53,8 @@ pub enum Operator { Sort(SortOperator), Limit(LimitOperator), Values(ValuesOperator), + Show, + Explain, // DML Insert(InsertOperator), Update(UpdateOperator), @@ -64,8 +66,6 @@ pub enum Operator { CreateTable(CreateTableOperator), DropTable(DropTableOperator), Truncate(TruncateOperator), - // Show - Show(ShowTablesOperator), // Copy CopyFromFile(CopyFromFileOperator), CopyToFile(CopyToFileOperator), @@ -130,7 +130,7 @@ impl Operator { .flat_map(|expr| expr.referenced_columns(only_column_ref)) .collect_vec(), Operator::Scan(op) => op - .columns + .projection_columns .iter() .flat_map(|expr| expr.referenced_columns(only_column_ref)) .collect_vec(), @@ -147,3 +147,62 @@ impl Operator { } } } + +impl fmt::Display for Operator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + Operator::Dummy => write!(f, "Dummy"), + Operator::Aggregate(op) => write!(f, "{}", op), + Operator::Filter(op) => write!(f, "{}", op), + Operator::Join(op) => write!(f, "{}", op), + Operator::Project(op) => write!(f, "{}", op), + Operator::Scan(op) => write!(f, "{}", op), + Operator::Sort(op) => write!(f, "{}", op), + Operator::Limit(op) => write!(f, "{}", op), + Operator::Values(op) => write!(f, "{}", op), + Operator::Show => write!(f, "Show Tables"), + Operator::Explain => unreachable!(), + Operator::Insert(op) => write!(f, "{}", op), + Operator::Update(op) => write!(f, "{}", op), + Operator::Delete(op) => write!(f, "{}", op), + Operator::Analyze(op) => write!(f, "{}", op), + Operator::AddColumn(op) => write!(f, "{}", op), + Operator::DropColumn(op) => write!(f, "{}", op), + Operator::CreateTable(op) => write!(f, "{}", op), + Operator::DropTable(op) => write!(f, "{}", op), + Operator::Truncate(op) => write!(f, "{}", op), + Operator::CopyFromFile(op) => write!(f, "{}", op), + Operator::CopyToFile(_) => todo!(), + } + } +} + +impl fmt::Display for PhysicalOption { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + match self { + PhysicalOption::Dummy => write!(f, "Dummy"), + PhysicalOption::SimpleAggregate => write!(f, "SimpleAggregate"), + PhysicalOption::HashAggregate => write!(f, "HashAggregate"), + PhysicalOption::Filter => write!(f, "Filter"), + PhysicalOption::HashJoin => write!(f, "HashJoin"), + PhysicalOption::Project => write!(f, "Project"), + PhysicalOption::SeqScan => write!(f, "SeqScan"), + PhysicalOption::IndexScan(index) => write!(f, "IndexScan By {}", index), + PhysicalOption::RadixSort => write!(f, "RadixSort"), + PhysicalOption::Limit => write!(f, "Limit"), + PhysicalOption::Values => write!(f, "Values"), + PhysicalOption::Insert => write!(f, "Insert"), + PhysicalOption::Update => write!(f, "Update"), + PhysicalOption::Delete => write!(f, "Delete"), + PhysicalOption::AddColumn => write!(f, "AddColumn"), + PhysicalOption::DropColumn => write!(f, "DropColumn"), + PhysicalOption::CreateTable => write!(f, "CreateTable"), + PhysicalOption::DropTable => write!(f, "DropTable"), + PhysicalOption::Truncate => write!(f, "Truncate"), + PhysicalOption::Show => write!(f, "Show"), + PhysicalOption::CopyFromFile => write!(f, "CopyFromFile"), + PhysicalOption::CopyToFile => write!(f, "CopyToFile"), + PhysicalOption::Analyze => write!(f, "Analyze"), + } + } +} diff --git a/src/planner/operator/project.rs b/src/planner/operator/project.rs index c9a8ee7c..f8961167 100644 --- a/src/planner/operator/project.rs +++ b/src/planner/operator/project.rs @@ -1,6 +1,19 @@ use crate::expression::ScalarExpression; +use itertools::Itertools; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct ProjectOperator { pub exprs: Vec, } + +impl fmt::Display for ProjectOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let exprs = self.exprs.iter().map(|expr| format!("{}", expr)).join(", "); + + write!(f, "Projection [{}]", exprs)?; + + Ok(()) + } +} diff --git a/src/planner/operator/scan.rs b/src/planner/operator/scan.rs index 9128c59d..7e808b73 100644 --- a/src/planner/operator/scan.rs +++ b/src/planner/operator/scan.rs @@ -5,6 +5,8 @@ use crate::storage::Bounds; use crate::types::index::IndexInfo; use crate::types::ColumnId; use itertools::Itertools; +use std::fmt; +use std::fmt::Formatter; use super::Operator; @@ -12,7 +14,7 @@ use super::Operator; pub struct ScanOperator { pub table_name: TableName, pub primary_key: ColumnId, - pub columns: Vec, + pub projection_columns: Vec, // Support push down limit. pub limit: Bounds, @@ -49,7 +51,7 @@ impl ScanOperator { index_infos, table_name, primary_key: primary_key_option.unwrap(), - columns, + projection_columns: columns, limit: (None, None), }), @@ -58,3 +60,24 @@ impl ScanOperator { } } } + +impl fmt::Display for ScanOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let projection_columns = self + .projection_columns + .iter() + .map(|column| format!("{}", column)) + .join(", "); + let (offset, limit) = self.limit; + + write!(f, "Scan {} -> [{}]", self.table_name, projection_columns)?; + if let Some(limit) = limit { + write!(f, ", Limit: {}", limit)?; + } + if let Some(offset) = offset { + write!(f, ", Offset: {}", offset)?; + } + + Ok(()) + } +} diff --git a/src/planner/operator/show.rs b/src/planner/operator/show.rs deleted file mode 100644 index 7bd3585e..00000000 --- a/src/planner/operator/show.rs +++ /dev/null @@ -1,2 +0,0 @@ -#[derive(Debug, PartialEq, Eq, Clone, Hash)] -pub struct ShowTablesOperator {} diff --git a/src/planner/operator/sort.rs b/src/planner/operator/sort.rs index 63bb74e2..3186be0b 100644 --- a/src/planner/operator/sort.rs +++ b/src/planner/operator/sort.rs @@ -1,4 +1,7 @@ use crate::expression::ScalarExpression; +use itertools::Itertools; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct SortField { @@ -23,3 +26,38 @@ pub struct SortOperator { /// Support push down limit to sort plan. pub limit: Option, } + +impl fmt::Display for SortOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let sort_fields = self + .sort_fields + .iter() + .map(|sort_field| format!("{}", sort_field)) + .join(", "); + write!(f, "Sort By {}", sort_fields)?; + + if let Some(limit) = self.limit { + write!(f, ", Limit {}", limit)?; + } + + Ok(()) + } +} + +impl fmt::Display for SortField { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}", self.expr)?; + if self.asc { + write!(f, " Asc")?; + } else { + write!(f, " Desc")?; + } + if self.nulls_first { + write!(f, " Nulls First")?; + } else { + write!(f, " Nulls Last")?; + } + + Ok(()) + } +} diff --git a/src/planner/operator/truncate.rs b/src/planner/operator/truncate.rs index 718f4bcb..3bee46c7 100644 --- a/src/planner/operator/truncate.rs +++ b/src/planner/operator/truncate.rs @@ -1,7 +1,17 @@ use crate::catalog::TableName; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct TruncateOperator { /// Table name to insert to pub table_name: TableName, } + +impl fmt::Display for TruncateOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "Truncate {}", self.table_name)?; + + Ok(()) + } +} diff --git a/src/planner/operator/update.rs b/src/planner/operator/update.rs index f4e61b56..bef45a92 100644 --- a/src/planner/operator/update.rs +++ b/src/planner/operator/update.rs @@ -1,6 +1,16 @@ use crate::catalog::TableName; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct UpdateOperator { pub table_name: TableName, } + +impl fmt::Display for UpdateOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "Update {}", self.table_name)?; + + Ok(()) + } +} diff --git a/src/planner/operator/values.rs b/src/planner/operator/values.rs index d0d11f9b..e1472287 100644 --- a/src/planner/operator/values.rs +++ b/src/planner/operator/values.rs @@ -1,8 +1,25 @@ use crate::catalog::ColumnRef; use crate::types::value::ValueRef; +use itertools::Itertools; +use std::fmt; +use std::fmt::Formatter; #[derive(Debug, PartialEq, Eq, Clone, Hash)] pub struct ValuesOperator { pub rows: Vec>, pub columns: Vec, } + +impl fmt::Display for ValuesOperator { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + let columns = self + .columns + .iter() + .map(|column| format!("{}", column.name())) + .join(", "); + + write!(f, "Values [{}], RowsLen: {}", columns, self.rows.len())?; + + Ok(()) + } +} diff --git a/src/types/index.rs b/src/types/index.rs index b5cffd5f..29dc70c8 100644 --- a/src/types/index.rs +++ b/src/types/index.rs @@ -1,7 +1,10 @@ use crate::expression::simplify::ConstantBinary; use crate::types::value::ValueRef; use crate::types::ColumnId; +use itertools::Itertools; use serde::{Deserialize, Serialize}; +use std::fmt; +use std::fmt::Formatter; use std::sync::Arc; pub type IndexId = u32; @@ -32,3 +35,28 @@ impl Index { Index { id, column_values } } } + +impl fmt::Display for IndexInfo { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}", self.meta)?; + write!(f, " => ")?; + + if let Some(binaries) = &self.binaries { + let binaries = binaries + .iter() + .map(|binary| format!("{}", binary)) + .join(", "); + write!(f, "{}", binaries)?; + } else { + write!(f, "NONE")?; + } + + Ok(()) + } +} + +impl fmt::Display for IndexMeta { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!(f, "{}", self.name) + } +} diff --git a/tests/slt/copy.slt b/tests/slt/copy.slt index 77f78384..ac334f31 100644 --- a/tests/slt/copy.slt +++ b/tests/slt/copy.slt @@ -1,15 +1,12 @@ statement ok create table test_copy (a int primary key, b float, c varchar(10)) - - # copy data from tbl file query I COPY test_copy FROM 'tests/data/copy.tbl' ( DELIMITER '|' ); ---- import 2 rows - query I SELECT * FROM test_copy ---- diff --git a/tests/slt/delete.slt b/tests/slt/delete.slt index 2e66b16c..2231b78f 100644 --- a/tests/slt/delete.slt +++ b/tests/slt/delete.slt @@ -22,4 +22,7 @@ delete from t query III rowsort select * from t ----- \ No newline at end of file +---- + +statement ok +drop table t \ No newline at end of file diff --git a/tests/slt/explain.slt.reference b/tests/slt/explain.slt.reference new file mode 100644 index 00000000..eaf8b1a5 --- /dev/null +++ b/tests/slt/explain.slt.reference @@ -0,0 +1,236 @@ +statement ok +create table t1(id int primary key, c1 int, c2 varchar) + +statement ok +create table t2(id int primary key, c3 int, c4 varchar) + +query T +COPY t1 FROM 'tests/data/row_20000.csv' ( DELIMITER '|' ); +---- +import 20000 rows + +statement ok +analyze table t1 + +query I +explain select * from t1 +---- +Projection [id, c1, c2] [Project] + Scan t1 -> [id, c1, c2] [SeqScan] + +query T +explain select c1 from t1 +---- +Projection [c1] [Project] + Scan t1 -> [c1] [SeqScan] + +query T +explain select c1 from t1 limit 10 +---- +Projection [c1] [Project] + Scan t1 -> [c1], Limit: 10 [SeqScan] + +query T +explain select c1 from t1 limit 10 offset 5 +---- +Projection [c1] [Project] + Scan t1 -> [c1], Limit: 10, Offset: 5 [SeqScan] + +query T +explain select c1 from t1 where c1 + 1 = 1 or c2 > 1 +---- +Projection [c1] [Project] + Filter ((c1 = 0) || (c2 > 1)), Is Having: false [Filter] + Scan t1 -> [c1, c2] [SeqScan] + +query T +explain select c1 from t1 where c1 > 1 + 3 and c1 < 10 +---- +Projection [c1] [Project] + Filter ((c1 > 4) && (c1 < 10)), Is Having: false [Filter] + Scan t1 -> [c1] [SeqScan] + +query T +explain select c1 from t1 where c1 in (1, 2, 3) +---- +Projection [c1] [Project] + Filter ((c1 = 3) || ((c1 = 2) || ((c1 = 1) || false))), Is Having: false [Filter] + Scan t1 -> [c1] [SeqScan] + +query T +explain select c1 from t1 where c1 not in (1, 2, 3) +---- +Projection [c1] [Project] + Filter ((c1 != 3) && ((c1 != 2) && ((c1 != 1) && true))), Is Having: false [Filter] + Scan t1 -> [c1] [SeqScan] + +query T +explain select c1 from t1 where c2 like 'lol%' +---- +Projection [c1] [Project] + Filter (c2 like lol%), Is Having: false [Filter] + Scan t1 -> [c1, c2] [SeqScan] + +query T +explain select c1 from t1 where c3 not like 'lol%' +---- +Projection [c1] [Project] + Filter (c2 not like lol%), Is Having: false [Filter] + Scan t1 -> [c1, c2] [SeqScan] + +query T +explain select c1 from t1 where c2 is null +---- +Projection [c1] [Project] + Filter c2 is null, Is Having: false [Filter] + Scan t1 -> [c1, c2] [SeqScan] + +query T +explain select c1 from t1 where c2 is not null +---- +Projection [c1] [Project] + Filter c2 is not null, Is Having: false [Filter] + Scan t1 -> [c1, c2] [SeqScan] + +query T +explain select c1 from t1 order by c1 +---- +Projection [c1] [Project] + Sort By c1 Asc Nulls First [RadixSort] + Scan t1 -> [id, c1, c2] [SeqScan] + +query T +explain select c1 from t1 order by c1 desc, c2 +---- +Projection [c1] [Project] + Sort By c1 Desc Null First, c2 Asc Nulls First [RadixSort] + Scan t1 -> [id, c1, c2] [SeqScan] + +query T +explain select c1 from t1 order by c1 nulls last +---- +Projection [c1] [Project] + Sort By c1 Asc Nulls Last [RadixSort] + Scan t1 -> [id, c1, c2] [SeqScan] + +query T +explain select sum(c1) from t1 +---- +Projection [Sum(c1)] [Project] + Aggregate [Sum(c1)] [SimpleAggregate] + Scan t1 -> [c1] [SeqScan] + +query T +explain select c1, sum(c2) from t1 group by c1 +---- +Projection [c1, Sum(c2)] [Project] + Aggregate [Sum(c2)] -> Group By [c1] [HashAggregate] + Scan t1 -> [c1, c2] [SeqScan] + +query T +explain select c1, sum(c2) from t1 where c1 > 10 group by c1 +---- +Projection [c1, Sum(c2)] [Project] + Aggregate [Sum(c2)] -> Group By [c1] [HashAggregate] + Filter (c1 > 10), Is Having: false [Filter] + Scan t1 -> [c1, c2] [SeqScan] + +query T +explain select c1, sum(c2) from t1 group by c1 having c1 > 10 +---- +Projection [c1, Sum(c2)] [Project] + Filter (c1 > 10), Is Having: true [Filter] + Aggregate [Sum(c2)] -> Group By [c1] [HashAggregate] + Scan t1 -> [c1, c2] [SeqScan] + +query T +explain select * from t1 left join t2 on c1 = c2 and c1 > 10 +---- +Projection [id, c1, c2, id, c3, c4] [Project] + Left Join On Where ((c1 = c2) && (c1 > 10)) [HashJoin] + Scan t1 -> [id, c1, c2] [SeqScan] + Scan t2 -> [id, c3, c4] [SeqScan] + +query T +explain select * from t1 right join t2 on c1 = c2 and c1 > 10 +---- +Projection [id, c1, c2, id, c3, c4] [Project] + Right Join On Where ((c1 = c2) && (c1 > 10)) [HashJoin] + Scan t1 -> [id, c1, c2] [SeqScan] + Scan t2 -> [id, c3, c4] [SeqScan] + +query T +explain select * from t1 inner join t2 on c1 = c2 and c1 > 10 +---- +Projection [id, c1, c2, id, c3, c4] [Project] + Inner Join On Where ((c1 = c2) && (c1 > 10)) [HashJoin] + Scan t1 -> [id, c1, c2] [SeqScan] + Scan t2 -> [id, c3, c4] [SeqScan] + +query T +explain select * from t1 full join t2 on c1 = c2 and c1 > 10 +---- +Projection [id, c1, c2, id, c3, c4] [Project] + Full Join On Where ((c1 = c2) && (c1 > 10)) [HashJoin] + Scan t1 -> [id, c1, c2] [SeqScan] + Scan t2 -> [id, c3, c4] [SeqScan] + +query T +explain show tables +---- +Show Tables + +query T +explain insert into t1 values (200001,1,10) +---- +Insert t1, Is Overwrite: false [Insert] + Values [id, c1, c2], RowsLen: 1 [Values] + +query T +explain insert overwrite t1 values (200001,1,10) +---- +Insert t1, Is Overwrite: true [Insert] + Values [id, c1, c2], RowsLen: 1 [Values] + +query T +explain update t1 set c1 = 0 where id = 0 +---- +Update t1 [Update] + Filter (id = 0), Is Having: false [Filter] + Scan t1 -> [id, c1, c2] [IndexScan By pk_id => 0] + Values [c1], RowsLen: 1 [Values] + +query T +explain delete from t1 where id = 0 +---- +Delete t1 [Delete] + Filter (id = 0), Is Having: false [Filter] + Scan t1 -> [id, c1, c2] [IndexScan By pk_id => 0] + +query T +explain alter table t1 add column if not exists c8 int default 0 +---- +Add c8 -> t1, If Not Exists: true [AddColumn] + Scan t1 -> [id, c1, c2] [SeqScan] + +query T +explain alter table t1 drop column if exists c2 +---- +Drop c2 -> t1, If Exists: true [DropColumn] + Scan t1 -> [id, c1, c2] [SeqScan] + +query T +explain truncate t1 +---- +Truncate t1 + +query T +explain copy t1 from 'tests/data/row_20000.csv' ( DELIMITER '|' ); +---- +Copy tests/data/row_20000.tbl -> t1 [id, c1, c2] [CopyFromFile] + +statement ok +drop table t1 + +statement ok +drop table t2 \ No newline at end of file diff --git a/tests/slt/insert.slt b/tests/slt/insert.slt index 28e8d9f9..caf0754f 100644 --- a/tests/slt/insert.slt +++ b/tests/slt/insert.slt @@ -28,11 +28,14 @@ insert into t(id, v1, v2, v3) values (0, 0, 0) statement ok insert into t values (8,NULL,NULL,NULL) +statement ok +insert overwrite t values (1, 9, 9, 9) + query IIII rowsort select * from t ---- 0 1 10 100 -1 1 10 100 +1 9 9 9 2 2 20 200 3 3 30 300 4 4 40 400 diff --git a/tests/slt/update b/tests/slt/update new file mode 100644 index 00000000..7ad629b0 --- /dev/null +++ b/tests/slt/update @@ -0,0 +1,33 @@ +statement ok +create table t(id int primary key, v1 int, v2 int, v3 int) + +statement ok +insert into t values (0,1,10,100) + +statement ok +insert into t values (1,1,10,100), (2,2,20,200), (3,3,30,300), (4,4,40,400) + +statement ok +update t set v2 = 9 where v1 = 1 + +query III rowsort +select * from t; +---- +1 1 9 100 +2 2 20 200 +3 3 30 300 +4 4 40 400 + +statement ok +update t set v2 = 9 + +query III rowsort +select * from t +---- +1 1 9 100 +2 2 9 200 +3 3 9 300 +4 4 9 400 + +statement ok +drop table t \ No newline at end of file diff --git a/tests/sqllogictest/src/main.rs b/tests/sqllogictest/src/main.rs index be927f92..efa99194 100644 --- a/tests/sqllogictest/src/main.rs +++ b/tests/sqllogictest/src/main.rs @@ -1,3 +1,6 @@ +use std::fs::File; +use std::io; +use std::io::Write; use fnck_sql::db::Database; use sqllogictest::Runner; use sqllogictest_test::KipSQL; @@ -12,6 +15,7 @@ async fn main() { std::env::set_current_dir(path).unwrap(); println!("FnckSQL Test Start!\n"); + init_20000_row_csv().expect("failed to init csv"); for slt_file in glob::glob(SLT_PATTERN).expect("failed to find slt files") { let temp_dir = TempDir::new().expect("unable to create temporary working directory"); @@ -33,3 +37,18 @@ async fn main() { println!("-> Pass!\n\n") } } + +fn init_20000_row_csv() -> io::Result<()> { + let path = "tests/data/row_20000.csv"; + + if !Path::new(path).exists() { + let mut file = File::create(path)?; + + for i in 0..20_000 { + let row = (0..3).map(|j| (i * 3 + j).to_string()).collect::>().join("|"); + writeln!(file, "{}", row)?; + } + } + + Ok(()) +}