From 17f04ddcd5bc185ba3e306b01c74ceb174341c9e Mon Sep 17 00:00:00 2001 From: Kould <2435992353@qq.com> Date: Thu, 1 Feb 2024 14:59:16 +0800 Subject: [PATCH] refactor: flatten the error hierarchy --- benchmarks/query_benchmark.rs | 3 +- examples/hello_world.rs | 3 +- examples/transaction.rs | 3 +- src/binder/aggregate.rs | 22 +-- src/binder/alter_table.rs | 9 +- src/binder/analyze.rs | 7 +- src/binder/copy.rs | 7 +- src/binder/create_table.rs | 20 ++- src/binder/delete.rs | 7 +- src/binder/drop_table.rs | 5 +- src/binder/explain.rs | 5 +- src/binder/expr.rs | 34 ++-- src/binder/insert.rs | 13 +- src/binder/mod.rs | 71 +++----- src/binder/select.rs | 55 +++--- src/binder/show.rs | 5 +- src/binder/truncate.rs | 8 +- src/binder/update.rs | 7 +- src/catalog/mod.rs | 10 -- src/catalog/table.rs | 13 +- src/db.rs | 50 +----- src/errors.rs | 166 ++++++++++++++++++ src/execution/mod.rs | 82 --------- src/execution/volcano/ddl/add_column.rs | 5 +- src/execution/volcano/ddl/create_table.rs | 4 +- src/execution/volcano/ddl/drop_column.rs | 9 +- src/execution/volcano/ddl/drop_table.rs | 4 +- src/execution/volcano/ddl/truncate.rs | 4 +- src/execution/volcano/dml/analyze.rs | 7 +- src/execution/volcano/dml/copy_from_file.rs | 17 +- src/execution/volcano/dml/delete.rs | 4 +- src/execution/volcano/dml/insert.rs | 9 +- src/execution/volcano/dml/update.rs | 4 +- src/execution/volcano/dql/aggregate/avg.rs | 6 +- src/execution/volcano/dql/aggregate/count.rs | 10 +- .../volcano/dql/aggregate/hash_agg.rs | 14 +- .../volcano/dql/aggregate/min_max.rs | 6 +- src/execution/volcano/dql/aggregate/mod.rs | 6 +- .../volcano/dql/aggregate/simple_agg.rs | 4 +- src/execution/volcano/dql/aggregate/sum.rs | 10 +- src/execution/volcano/dql/dummy.rs | 4 +- src/execution/volcano/dql/explain.rs | 4 +- src/execution/volcano/dql/filter.rs | 4 +- src/execution/volcano/dql/index_scan.rs | 4 +- src/execution/volcano/dql/join/hash_join.rs | 21 ++- src/execution/volcano/dql/limit.rs | 4 +- src/execution/volcano/dql/projection.rs | 4 +- src/execution/volcano/dql/seq_scan.rs | 4 +- src/execution/volcano/dql/show_table.rs | 4 +- src/execution/volcano/dql/sort.rs | 9 +- src/execution/volcano/dql/values.rs | 4 +- src/execution/volcano/mod.rs | 6 +- src/expression/evaluator.rs | 4 +- src/expression/simplify.rs | 51 +++--- src/expression/value_compute.rs | 36 ++-- src/lib.rs | 1 + src/optimizer/core/column_meta.rs | 12 +- src/optimizer/core/histogram.rs | 24 +-- src/optimizer/core/memo.rs | 9 +- src/optimizer/core/rule.rs | 6 +- src/optimizer/heuristic/graph.rs | 18 +- src/optimizer/heuristic/matcher.rs | 4 +- src/optimizer/heuristic/optimizer.rs | 10 +- src/optimizer/mod.rs | 41 ----- .../rule/implementation/ddl/add_column.rs | 2 +- .../rule/implementation/ddl/create_table.rs | 2 +- .../rule/implementation/ddl/drop_column.rs | 2 +- .../rule/implementation/ddl/drop_table.rs | 2 +- .../rule/implementation/ddl/truncate.rs | 2 +- .../rule/implementation/dml/analyze.rs | 2 +- .../rule/implementation/dml/copy_from_file.rs | 2 +- .../rule/implementation/dml/copy_to_file.rs | 2 +- .../rule/implementation/dml/delete.rs | 2 +- .../rule/implementation/dml/insert.rs | 2 +- .../rule/implementation/dml/update.rs | 2 +- .../rule/implementation/dql/aggregate.rs | 2 +- .../rule/implementation/dql/dummy.rs | 2 +- .../rule/implementation/dql/filter.rs | 2 +- src/optimizer/rule/implementation/dql/join.rs | 2 +- .../rule/implementation/dql/limit.rs | 2 +- .../rule/implementation/dql/projection.rs | 2 +- src/optimizer/rule/implementation/dql/scan.rs | 6 +- src/optimizer/rule/implementation/dql/sort.rs | 2 +- .../rule/implementation/dql/values.rs | 2 +- src/optimizer/rule/implementation/marcos.rs | 2 +- src/optimizer/rule/implementation/mod.rs | 4 +- .../rule/normalization/column_pruning.rs | 6 +- .../rule/normalization/combine_operators.rs | 8 +- src/optimizer/rule/normalization/mod.rs | 4 +- .../rule/normalization/pushdown_limit.rs | 12 +- .../rule/normalization/pushdown_predicates.rs | 8 +- .../rule/normalization/simplification.rs | 10 +- src/storage/kip.rs | 72 ++++---- src/storage/mod.rs | 85 +++------ src/storage/table_codec.rs | 39 ++-- src/types/errors.rs | 78 -------- src/types/mod.rs | 22 +-- src/types/tuple_builder.rs | 8 +- src/types/value.rs | 66 +++---- tests/sqllogictest/src/lib.rs | 3 +- 100 files changed, 687 insertions(+), 800 deletions(-) create mode 100644 src/errors.rs delete mode 100644 src/types/errors.rs diff --git a/benchmarks/query_benchmark.rs b/benchmarks/query_benchmark.rs index 0584b72f..b6cd368d 100644 --- a/benchmarks/query_benchmark.rs +++ b/benchmarks/query_benchmark.rs @@ -1,5 +1,6 @@ use criterion::{criterion_group, criterion_main, Criterion}; -use fnck_sql::db::{Database, DatabaseError}; +use fnck_sql::db::Database; +use fnck_sql::errors::DatabaseError; use fnck_sql::execution::volcano; use fnck_sql::storage::kip::KipStorage; use fnck_sql::storage::Storage; diff --git a/examples/hello_world.rs b/examples/hello_world.rs index 5f0d091c..a552ed29 100644 --- a/examples/hello_world.rs +++ b/examples/hello_world.rs @@ -1,4 +1,5 @@ -use fnck_sql::db::{Database, DatabaseError}; +use fnck_sql::db::Database; +use fnck_sql::errors::DatabaseError; use fnck_sql::implement_from_tuple; use fnck_sql::types::tuple::Tuple; use fnck_sql::types::value::DataValue; diff --git a/examples/transaction.rs b/examples/transaction.rs index 67888e6a..0a921d11 100644 --- a/examples/transaction.rs +++ b/examples/transaction.rs @@ -1,4 +1,5 @@ -use fnck_sql::db::{Database, DatabaseError}; +use fnck_sql::db::Database; +use fnck_sql::errors::DatabaseError; #[tokio::main] async fn main() -> Result<(), DatabaseError> { diff --git a/src/binder/aggregate.rs b/src/binder/aggregate.rs index 159be108..4c0cebb5 100644 --- a/src/binder/aggregate.rs +++ b/src/binder/aggregate.rs @@ -3,7 +3,7 @@ use itertools::Itertools; use sqlparser::ast::{Expr, OrderByExpr}; use std::collections::HashSet; -use crate::binder::BindError; +use crate::errors::DatabaseError; use crate::planner::LogicalPlan; use crate::storage::Transaction; use crate::{ @@ -26,7 +26,7 @@ impl<'a, T: Transaction> Binder<'a, T> { pub fn extract_select_aggregate( &mut self, select_items: &mut [ScalarExpression], - ) -> Result<(), BindError> { + ) -> Result<(), DatabaseError> { for column in select_items { self.visit_column_agg_expr(column)?; } @@ -37,7 +37,7 @@ impl<'a, T: Transaction> Binder<'a, T> { &mut self, select_list: &mut [ScalarExpression], groupby: &[Expr], - ) -> Result<(), BindError> { + ) -> Result<(), DatabaseError> { self.validate_groupby_illegal_column(select_list, groupby)?; for gb in groupby { @@ -51,7 +51,7 @@ impl<'a, T: Transaction> Binder<'a, T> { &mut self, having: &Option, orderbys: &[OrderByExpr], - ) -> Result<(Option, Option>), BindError> { + ) -> Result<(Option, Option>), DatabaseError> { // Extract having expression. let return_having = if let Some(having) = having { let mut having = self.bind_expr(having)?; @@ -87,7 +87,7 @@ impl<'a, T: Transaction> Binder<'a, T> { Ok((return_having, return_orderby)) } - fn visit_column_agg_expr(&mut self, expr: &mut ScalarExpression) -> Result<(), BindError> { + fn visit_column_agg_expr(&mut self, expr: &mut ScalarExpression) -> Result<(), DatabaseError> { match expr { ScalarExpression::AggCall { .. } => { self.context.agg_calls.push(expr.clone()); @@ -125,7 +125,7 @@ impl<'a, T: Transaction> Binder<'a, T> { &mut self, select_items: &[ScalarExpression], groupby: &[Expr], - ) -> Result<(), BindError> { + ) -> Result<(), DatabaseError> { let mut group_raw_exprs = vec![]; for expr in groupby { let expr = self.bind_expr(expr)?; @@ -159,7 +159,7 @@ impl<'a, T: Transaction> Binder<'a, T> { group_raw_set.remove(expr); if !group_raw_exprs.iter().contains(expr) { - return Err(BindError::AggMiss(format!( + return Err(DatabaseError::AggMiss(format!( "{:?} must appear in the GROUP BY clause or be used in an aggregate function", expr ))); @@ -167,7 +167,7 @@ impl<'a, T: Transaction> Binder<'a, T> { } if !group_raw_set.is_empty() { - return Err(BindError::AggMiss( + return Err(DatabaseError::AggMiss( "In the GROUP BY clause the field must be in the select clause".to_string(), )); } @@ -202,7 +202,7 @@ impl<'a, T: Transaction> Binder<'a, T> { } /// Validate having or orderby clause is valid, if SQL has group by clause. - pub fn validate_having_orderby(&self, expr: &ScalarExpression) -> Result<(), BindError> { + pub fn validate_having_orderby(&self, expr: &ScalarExpression) -> Result<(), DatabaseError> { if self.context.group_by_exprs.is_empty() { return Ok(()); } @@ -215,7 +215,7 @@ impl<'a, T: Transaction> Binder<'a, T> { return Ok(()); } - Err(BindError::AggMiss( + Err(DatabaseError::AggMiss( format!( "column {:?} must appear in the GROUP BY clause or be used in an aggregate function", expr @@ -230,7 +230,7 @@ impl<'a, T: Transaction> Binder<'a, T> { return self.validate_having_orderby(expr.unpack_alias()); } - Err(BindError::AggMiss( + Err(DatabaseError::AggMiss( format!( "column {:?} must appear in the GROUP BY clause or be used in an aggregate function", expr diff --git a/src/binder/alter_table.rs b/src/binder/alter_table.rs index 827fae1e..b3565efd 100644 --- a/src/binder/alter_table.rs +++ b/src/binder/alter_table.rs @@ -3,7 +3,8 @@ use sqlparser::ast::{AlterTableOperation, ObjectName}; use std::sync::Arc; use super::{is_valid_identifier, Binder}; -use crate::binder::{lower_case_name, split_name, BindError}; +use crate::binder::{lower_case_name, split_name}; +use crate::errors::DatabaseError; use crate::planner::operator::alter_table::add_column::AddColumnOperator; use crate::planner::operator::alter_table::drop_column::DropColumnOperator; use crate::planner::operator::scan::ScanOperator; @@ -16,7 +17,7 @@ impl<'a, T: Transaction> Binder<'a, T> { &mut self, name: &ObjectName, operation: &AlterTableOperation, - ) -> Result { + ) -> Result { let table_name: Arc = Arc::new(split_name(&lower_case_name(name))?.to_string()); if let Some(table) = self.context.table(table_name.clone()) { @@ -30,7 +31,7 @@ impl<'a, T: Transaction> Binder<'a, T> { let column = self.bind_column(column_def)?; if !is_valid_identifier(column.name()) { - return Err(BindError::InvalidColumn( + return Err(DatabaseError::InvalidColumn( "illegal column naming".to_string(), )); } @@ -83,7 +84,7 @@ impl<'a, T: Transaction> Binder<'a, T> { Ok(plan) } else { - Err(BindError::InvalidTable(format!( + Err(DatabaseError::InvalidTable(format!( "not found table {}", table_name ))) diff --git a/src/binder/analyze.rs b/src/binder/analyze.rs index de827c29..e316b9c2 100644 --- a/src/binder/analyze.rs +++ b/src/binder/analyze.rs @@ -1,4 +1,5 @@ -use crate::binder::{lower_case_name, split_name, BindError, Binder}; +use crate::binder::{lower_case_name, split_name, Binder}; +use crate::errors::DatabaseError; use crate::planner::operator::analyze::AnalyzeOperator; use crate::planner::operator::scan::ScanOperator; use crate::planner::operator::Operator; @@ -9,7 +10,7 @@ use sqlparser::ast::ObjectName; use std::sync::Arc; impl<'a, T: Transaction> Binder<'a, T> { - pub(crate) fn bind_analyze(&mut self, name: &ObjectName) -> Result { + pub(crate) fn bind_analyze(&mut self, name: &ObjectName) -> Result { let name = lower_case_name(name); let name = split_name(&name)?; let table_name = Arc::new(name.to_string()); @@ -18,7 +19,7 @@ impl<'a, T: Transaction> Binder<'a, T> { .context .table(table_name.clone()) .cloned() - .ok_or_else(|| BindError::InvalidTable(format!("bind table {}", name)))?; + .ok_or_else(|| DatabaseError::InvalidTable(format!("bind table {}", name)))?; let columns = table_catalog .all_columns() .into_iter() diff --git a/src/binder/copy.rs b/src/binder/copy.rs index f4f26e98..c88bfa19 100644 --- a/src/binder/copy.rs +++ b/src/binder/copy.rs @@ -2,6 +2,7 @@ use std::path::PathBuf; use std::str::FromStr; use std::sync::Arc; +use crate::errors::DatabaseError; use crate::planner::operator::copy_from_file::CopyFromFileOperator; use crate::planner::operator::copy_to_file::CopyToFileOperator; use crate::planner::operator::Operator; @@ -57,14 +58,14 @@ impl<'a, T: Transaction> Binder<'a, T> { to: bool, target: CopyTarget, options: &[CopyOption], - ) -> Result { + ) -> Result { let (table_name, ..) = match source { CopySource::Table { table_name, columns, } => (table_name, columns), CopySource::Query(_) => { - return Err(BindError::UnsupportedCopySource( + return Err(DatabaseError::UnsupportedCopySource( "bad copy source".to_string(), )); } @@ -100,7 +101,7 @@ impl<'a, T: Transaction> Binder<'a, T> { }) } } else { - Err(BindError::InvalidTable(format!( + Err(DatabaseError::InvalidTable(format!( "not found table {}", table_name ))) diff --git a/src/binder/create_table.rs b/src/binder/create_table.rs index a7ae26ec..0026717a 100644 --- a/src/binder/create_table.rs +++ b/src/binder/create_table.rs @@ -4,8 +4,9 @@ use std::collections::HashSet; use std::sync::Arc; use super::{is_valid_identifier, Binder}; -use crate::binder::{lower_case_name, split_name, BindError}; +use crate::binder::{lower_case_name, split_name}; use crate::catalog::{ColumnCatalog, ColumnDesc}; +use crate::errors::DatabaseError; use crate::expression::ScalarExpression; use crate::planner::operator::create_table::CreateTableOperator; use crate::planner::operator::Operator; @@ -22,13 +23,15 @@ impl<'a, T: Transaction> Binder<'a, T> { columns: &[ColumnDef], constraints: &[TableConstraint], if_not_exists: bool, - ) -> Result { + ) -> Result { let name = lower_case_name(name); let name = split_name(&name)?; let table_name = Arc::new(name.to_string()); if !is_valid_identifier(&table_name) { - return Err(BindError::InvalidTable("illegal table naming".to_string())); + return Err(DatabaseError::InvalidTable( + "illegal table naming".to_string(), + )); } { // check duplicated column names @@ -36,10 +39,10 @@ impl<'a, T: Transaction> Binder<'a, T> { for col in columns.iter() { let col_name = &col.name.value; if !set.insert(col_name.clone()) { - return Err(BindError::AmbiguousColumn(col_name.to_string())); + return Err(DatabaseError::AmbiguousColumn(col_name.to_string())); } if !is_valid_identifier(col_name) { - return Err(BindError::InvalidColumn( + return Err(DatabaseError::InvalidColumn( "illegal column naming".to_string(), )); } @@ -74,7 +77,7 @@ impl<'a, T: Transaction> Binder<'a, T> { } if columns.iter().filter(|col| col.desc.is_primary).count() != 1 { - return Err(BindError::InvalidTable( + return Err(DatabaseError::InvalidTable( "The primary key field must exist and have at least one".to_string(), )); } @@ -91,7 +94,7 @@ impl<'a, T: Transaction> Binder<'a, T> { Ok(plan) } - pub fn bind_column(&mut self, column_def: &ColumnDef) -> Result { + pub fn bind_column(&mut self, column_def: &ColumnDef) -> Result { let column_name = column_def.name.to_string(); let mut column_desc = ColumnDesc::new( LogicalType::try_from(column_def.data_type.clone())?, @@ -138,14 +141,13 @@ mod tests { use super::*; use crate::binder::BinderContext; use crate::catalog::ColumnDesc; - use crate::execution::ExecutorError; use crate::storage::kip::KipStorage; use crate::storage::Storage; use crate::types::LogicalType; use tempfile::TempDir; #[tokio::test] - async fn test_create_bind() -> Result<(), ExecutorError> { + async fn test_create_bind() -> Result<(), DatabaseError> { let temp_dir = TempDir::new().expect("unable to create temporary working directory"); let storage = KipStorage::new(temp_dir.path()).await?; let transaction = storage.transaction().await?; diff --git a/src/binder/delete.rs b/src/binder/delete.rs index bac496b3..2dec9458 100644 --- a/src/binder/delete.rs +++ b/src/binder/delete.rs @@ -1,4 +1,5 @@ -use crate::binder::{lower_case_name, split_name, BindError, Binder}; +use crate::binder::{lower_case_name, split_name, Binder}; +use crate::errors::DatabaseError; use crate::planner::operator::delete::DeleteOperator; use crate::planner::operator::scan::ScanOperator; use crate::planner::operator::Operator; @@ -12,7 +13,7 @@ impl<'a, T: Transaction> Binder<'a, T> { &mut self, from: &TableWithJoins, selection: &Option, - ) -> Result { + ) -> Result { if let TableFactor::Table { name, alias, .. } = &from.relation { let name = lower_case_name(name); let name = split_name(&name)?; @@ -22,7 +23,7 @@ impl<'a, T: Transaction> Binder<'a, T> { .context .table(table_name.clone()) .cloned() - .ok_or_else(|| BindError::InvalidTable(format!("bind table {}", name)))?; + .ok_or_else(|| DatabaseError::InvalidTable(format!("bind table {}", name)))?; let primary_key_column = table_catalog .all_columns_with_id() .iter() diff --git a/src/binder/drop_table.rs b/src/binder/drop_table.rs index ad878f95..f8f0b4a4 100644 --- a/src/binder/drop_table.rs +++ b/src/binder/drop_table.rs @@ -1,4 +1,5 @@ -use crate::binder::{lower_case_name, split_name, BindError, Binder}; +use crate::binder::{lower_case_name, split_name, Binder}; +use crate::errors::DatabaseError; use crate::planner::operator::drop_table::DropTableOperator; use crate::planner::operator::Operator; use crate::planner::LogicalPlan; @@ -11,7 +12,7 @@ impl<'a, T: Transaction> Binder<'a, T> { &mut self, name: &ObjectName, if_exists: &bool, - ) -> Result { + ) -> Result { let name = lower_case_name(name); let name = split_name(&name)?; let table_name = Arc::new(name.to_string()); diff --git a/src/binder/explain.rs b/src/binder/explain.rs index 4ef2a9cf..d08eaa7d 100644 --- a/src/binder/explain.rs +++ b/src/binder/explain.rs @@ -1,10 +1,11 @@ -use crate::binder::{BindError, Binder}; +use crate::binder::Binder; +use crate::errors::DatabaseError; use crate::planner::operator::Operator; use crate::planner::LogicalPlan; use crate::storage::Transaction; impl<'a, T: Transaction> Binder<'a, T> { - pub(crate) fn bind_explain(&mut self, plan: LogicalPlan) -> Result { + pub(crate) fn bind_explain(&mut self, plan: LogicalPlan) -> Result { Ok(LogicalPlan { operator: Operator::Explain, childrens: vec![plan], diff --git a/src/binder/expr.rs b/src/binder/expr.rs index a215df75..36a81f39 100644 --- a/src/binder/expr.rs +++ b/src/binder/expr.rs @@ -1,4 +1,4 @@ -use crate::binder::BindError; +use crate::errors::DatabaseError; use crate::expression; use crate::expression::agg::AggKind; use itertools::Itertools; @@ -15,7 +15,7 @@ use crate::types::value::DataValue; use crate::types::LogicalType; impl<'a, T: Transaction> Binder<'a, T> { - pub(crate) fn bind_expr(&mut self, expr: &Expr) -> Result { + pub(crate) fn bind_expr(&mut self, expr: &Expr) -> Result { match expr { Expr::Identifier(ident) => { self.bind_column_ref_from_identifiers(slice::from_ref(ident), None) @@ -51,7 +51,7 @@ impl<'a, T: Transaction> Binder<'a, T> { negated: bool, expr: &Expr, pattern: &Expr, - ) -> Result { + ) -> Result { let left_expr = Box::new(self.bind_expr(expr)?); let right_expr = Box::new(self.bind_expr(pattern)?); let op = if negated { @@ -71,7 +71,7 @@ impl<'a, T: Transaction> Binder<'a, T> { &mut self, idents: &[Ident], bind_table_name: Option<&String>, - ) -> Result { + ) -> Result { let idents = idents .iter() .map(|ident| Ident::new(ident.value.to_lowercase())) @@ -81,7 +81,7 @@ impl<'a, T: Transaction> Binder<'a, T> { [table, column] => (None, Some(&table.value), &column.value), [schema, table, column] => (Some(&schema.value), Some(&table.value), &column.value), _ => { - return Err(BindError::InvalidColumn( + return Err(DatabaseError::InvalidColumn( idents .iter() .map(|ident| ident.value.clone()) @@ -95,11 +95,11 @@ impl<'a, T: Transaction> Binder<'a, T> { let table_catalog = self .context .table(Arc::new(table.clone())) - .ok_or_else(|| BindError::InvalidTable(table.to_string()))?; + .ok_or_else(|| DatabaseError::InvalidTable(table.to_string()))?; let column_catalog = table_catalog .get_column_by_name(column_name) - .ok_or_else(|| BindError::InvalidColumn(column_name.to_string()))?; + .ok_or_else(|| DatabaseError::InvalidColumn(column_name.to_string()))?; Ok(ScalarExpression::ColumnRef(column_catalog.clone())) } else { // handle col syntax @@ -107,7 +107,7 @@ impl<'a, T: Transaction> Binder<'a, T> { for (table_catalog, _) in self.context.bind_table.values() { if let Some(column_catalog) = table_catalog.get_column_by_name(column_name) { if got_column.is_some() { - return Err(BindError::InvalidColumn(column_name.to_string())); + return Err(DatabaseError::InvalidColumn(column_name.to_string())); } got_column = Some(column_catalog); } @@ -121,7 +121,7 @@ impl<'a, T: Transaction> Binder<'a, T> { } } let column_catalog = - got_column.ok_or_else(|| BindError::InvalidColumn(column_name.to_string()))?; + got_column.ok_or_else(|| DatabaseError::InvalidColumn(column_name.to_string()))?; Ok(ScalarExpression::ColumnRef(column_catalog.clone())) } } @@ -131,7 +131,7 @@ impl<'a, T: Transaction> Binder<'a, T> { left: &Expr, right: &Expr, op: &BinaryOperator, - ) -> Result { + ) -> Result { let left_expr = Box::new(self.bind_expr(left)?); let right_expr = Box::new(self.bind_expr(right)?); @@ -167,7 +167,7 @@ impl<'a, T: Transaction> Binder<'a, T> { &mut self, expr: &Expr, op: &UnaryOperator, - ) -> Result { + ) -> Result { let expr = Box::new(self.bind_expr(expr)?); let ty = if let UnaryOperator::Not = op { LogicalType::Boolean @@ -182,7 +182,7 @@ impl<'a, T: Transaction> Binder<'a, T> { }) } - fn bind_agg_call(&mut self, func: &Function) -> Result { + fn bind_agg_call(&mut self, func: &Function) -> Result { let mut args = Vec::with_capacity(func.args.len()); for arg in func.args.iter() { @@ -233,7 +233,11 @@ impl<'a, T: Transaction> Binder<'a, T> { }) } - fn bind_is_null(&mut self, expr: &Expr, negated: bool) -> Result { + fn bind_is_null( + &mut self, + expr: &Expr, + negated: bool, + ) -> Result { Ok(ScalarExpression::IsNull { negated, expr: Box::new(self.bind_expr(expr)?), @@ -245,7 +249,7 @@ impl<'a, T: Transaction> Binder<'a, T> { expr: &Expr, list: &[Expr], negated: bool, - ) -> Result { + ) -> Result { let args = list.iter().map(|expr| self.bind_expr(expr)).try_collect()?; Ok(ScalarExpression::In { @@ -255,7 +259,7 @@ impl<'a, T: Transaction> Binder<'a, T> { }) } - fn bind_cast(&mut self, expr: &Expr, ty: &DataType) -> Result { + fn bind_cast(&mut self, expr: &Expr, ty: &DataType) -> Result { Ok(ScalarExpression::TypeCast { expr: Box::new(self.bind_expr(expr)?), ty: LogicalType::try_from(ty.clone())?, diff --git a/src/binder/insert.rs b/src/binder/insert.rs index e31bac1c..a0496455 100644 --- a/src/binder/insert.rs +++ b/src/binder/insert.rs @@ -1,5 +1,6 @@ -use crate::binder::{lower_case_name, split_name, BindError, Binder}; +use crate::binder::{lower_case_name, split_name, Binder}; use crate::catalog::ColumnRef; +use crate::errors::DatabaseError; use crate::expression::value_compute::unary_op; use crate::expression::ScalarExpression; use crate::planner::operator::insert::InsertOperator; @@ -19,7 +20,7 @@ impl<'a, T: Transaction> Binder<'a, T> { idents: &[Ident], expr_rows: &Vec>, is_overwrite: bool, - ) -> Result { + ) -> Result { let name = lower_case_name(&name); let name = split_name(&name)?; let table_name = Arc::new(name.to_string()); @@ -31,7 +32,7 @@ impl<'a, T: Transaction> Binder<'a, T> { if idents.is_empty() { columns = table.all_columns(); if values_len > columns.len() { - return Err(BindError::ValuesLenMismatch(columns.len(), values_len)); + return Err(DatabaseError::ValuesLenMismatch(columns.len(), values_len)); } } else { let bind_table_name = Some(table_name.to_string()); @@ -45,13 +46,13 @@ impl<'a, T: Transaction> Binder<'a, T> { } } if values_len != columns.len() { - return Err(BindError::ValuesLenMismatch(columns.len(), values_len)); + return Err(DatabaseError::ValuesLenMismatch(columns.len(), values_len)); } } let mut rows = Vec::with_capacity(expr_rows.len()); for expr_row in expr_rows { if expr_row.len() != values_len { - return Err(BindError::ValuesLenNotSame()); + return Err(DatabaseError::ValuesLenNotSame()); } let mut row = Vec::with_capacity(expr_row.len()); @@ -89,7 +90,7 @@ impl<'a, T: Transaction> Binder<'a, T> { physical_option: None, }) } else { - Err(BindError::InvalidTable(format!( + Err(DatabaseError::InvalidTable(format!( "not found table {}", table_name ))) diff --git a/src/binder/mod.rs b/src/binder/mod.rs index 33372463..2d8d4a1d 100644 --- a/src/binder/mod.rs +++ b/src/binder/mod.rs @@ -17,12 +17,12 @@ mod update; use sqlparser::ast::{Ident, ObjectName, ObjectType, SetExpr, Statement}; use std::collections::BTreeMap; -use crate::catalog::{CatalogError, TableCatalog, TableName}; +use crate::catalog::{TableCatalog, TableName}; +use crate::errors::DatabaseError; use crate::expression::ScalarExpression; use crate::planner::operator::join::JoinType; use crate::planner::LogicalPlan; use crate::storage::Transaction; -use crate::types::errors::TypeError; pub enum InputRefType { AggCall, @@ -67,22 +67,33 @@ impl<'a, T: Transaction> BinderContext<'a, T> { } } - pub fn add_alias(&mut self, alias: String, expr: ScalarExpression) -> Result<(), BindError> { + pub fn add_alias( + &mut self, + alias: String, + expr: ScalarExpression, + ) -> Result<(), DatabaseError> { let is_exist = self.aliases.insert(alias.clone(), expr).is_some(); if is_exist { - return Err(BindError::InvalidColumn(format!("{} duplicated", alias))); + return Err(DatabaseError::InvalidColumn(format!( + "{} duplicated", + alias + ))); } Ok(()) } - pub fn add_table_alias(&mut self, alias: String, table: TableName) -> Result<(), BindError> { + pub fn add_table_alias( + &mut self, + alias: String, + table: TableName, + ) -> Result<(), DatabaseError> { let is_alias_exist = self .table_aliases .insert(alias.clone(), table.clone()) .is_some(); if is_alias_exist { - return Err(BindError::InvalidTable(format!("{} duplicated", alias))); + return Err(DatabaseError::InvalidTable(format!("{} duplicated", alias))); } Ok(()) @@ -93,13 +104,13 @@ impl<'a, T: Transaction> BinderContext<'a, T> { table: TableName, table_catalog: TableCatalog, join_type: Option, - ) -> Result<(), BindError> { + ) -> Result<(), DatabaseError> { let is_bound = self .bind_table .insert(table.clone(), (table_catalog.clone(), join_type)) .is_some(); if is_bound { - return Err(BindError::InvalidTable(format!("{} duplicated", table))); + return Err(DatabaseError::InvalidTable(format!("{} duplicated", table))); } Ok(()) @@ -119,7 +130,7 @@ impl<'a, T: Transaction> Binder<'a, T> { Binder { context } } - pub fn bind(&mut self, stmt: &Statement) -> Result { + pub fn bind(&mut self, stmt: &Statement) -> Result { let plan = match stmt { Statement::Query(query) => self.bind_query(query)?, Statement::AlterTable { name, operation } => self.bind_alter_table(name, operation)?, @@ -190,7 +201,7 @@ impl<'a, T: Transaction> Binder<'a, T> { self.bind_explain(plan)? } - _ => return Err(BindError::UnsupportedStmt(stmt.to_string())), + _ => return Err(DatabaseError::UnsupportedStmt(stmt.to_string())), }; Ok(plan) } @@ -207,41 +218,13 @@ fn lower_case_name(name: &ObjectName) -> ObjectName { } /// Split an object name into `(schema name, table name)`. -fn split_name(name: &ObjectName) -> Result<&str, BindError> { +fn split_name(name: &ObjectName) -> Result<&str, DatabaseError> { Ok(match name.0.as_slice() { [table] => &table.value, - _ => return Err(BindError::InvalidTable(name.to_string())), + _ => return Err(DatabaseError::InvalidTable(name.to_string())), }) } -#[derive(thiserror::Error, Debug)] -pub enum BindError { - #[error("unsupported statement {0}")] - UnsupportedStmt(String), - #[error("invalid table {0}")] - InvalidTable(String), - #[error("invalid column {0}")] - InvalidColumn(String), - #[error("ambiguous column {0}")] - AmbiguousColumn(String), - #[error("values length not match, expect {0}, got {1}")] - ValuesLenMismatch(usize, usize), - #[error("values list must all be the same length")] - ValuesLenNotSame(), - #[error("binary operator types mismatch: {0} != {1}")] - BinaryOpTypeMismatch(String, String), - #[error("subquery error: {0}")] - Subquery(String), - #[error("agg miss: {0}")] - AggMiss(String), - #[error("catalog error: {0}")] - CatalogError(#[from] CatalogError), - #[error("type error: {0}")] - TypeError(#[from] TypeError), - #[error("copy error: {0}")] - UnsupportedCopySource(String), -} - pub(crate) fn is_valid_identifier(s: &str) -> bool { s.chars().all(|c| c.is_alphanumeric() || c == '_') && !s.chars().next().unwrap_or_default().is_numeric() @@ -252,10 +235,10 @@ pub(crate) fn is_valid_identifier(s: &str) -> bool { pub mod test { use crate::binder::{is_valid_identifier, Binder, BinderContext}; use crate::catalog::{ColumnCatalog, ColumnDesc}; - use crate::execution::ExecutorError; + use crate::errors::DatabaseError; use crate::planner::LogicalPlan; use crate::storage::kip::KipStorage; - use crate::storage::{Storage, StorageError, Transaction}; + use crate::storage::{Storage, Transaction}; use crate::types::LogicalType::Integer; use std::path::PathBuf; use std::sync::Arc; @@ -263,7 +246,7 @@ pub mod test { pub(crate) async fn build_test_catalog( path: impl Into + Send, - ) -> Result { + ) -> Result { let storage = KipStorage::new(path).await?; let mut transaction = storage.transaction().await?; @@ -310,7 +293,7 @@ pub mod test { Ok(storage) } - pub async fn select_sql_run>(sql: S) -> Result { + pub async fn select_sql_run>(sql: S) -> Result { let temp_dir = TempDir::new().expect("unable to create temporary working directory"); let storage = build_test_catalog(temp_dir.path()).await?; let transaction = storage.transaction().await?; diff --git a/src/binder/select.rs b/src/binder/select.rs index 9a1a2877..2898f63e 100644 --- a/src/binder/select.rs +++ b/src/binder/select.rs @@ -15,15 +15,14 @@ use crate::{ use super::Binder; -use crate::binder::BindError; use crate::catalog::{ColumnCatalog, TableCatalog, TableName}; +use crate::errors::DatabaseError; use crate::execution::volcano::dql::join::joins_nullable; use crate::expression::BinaryOperator; use crate::planner::operator::join::JoinCondition; use crate::planner::operator::sort::{SortField, SortOperator}; use crate::planner::LogicalPlan; use crate::storage::Transaction; -use crate::types::errors::TypeError; use crate::types::LogicalType; use itertools::Itertools; use sqlparser::ast; @@ -33,7 +32,7 @@ use sqlparser::ast::{ }; impl<'a, T: Transaction> Binder<'a, T> { - pub(crate) fn bind_query(&mut self, query: &Query) -> Result { + pub(crate) fn bind_query(&mut self, query: &Query) -> Result { if let Some(_with) = &query.with { // TODO support with clause. } @@ -58,7 +57,7 @@ impl<'a, T: Transaction> Binder<'a, T> { &mut self, select: &Select, orderby: &[OrderByExpr], - ) -> Result { + ) -> Result { let mut plan = self.bind_table_ref(&select.from)?; // Resolve scalar function call. @@ -112,7 +111,7 @@ impl<'a, T: Transaction> Binder<'a, T> { pub(crate) fn bind_table_ref( &mut self, from: &[TableWithJoins], - ) -> Result { + ) -> Result { assert!(from.len() < 2, "not support yet."); if from.is_empty() { return Ok(LogicalPlan { @@ -145,7 +144,7 @@ impl<'a, T: Transaction> Binder<'a, T> { &mut self, table: &TableFactor, joint_type: Option, - ) -> Result<(Option, LogicalPlan), BindError> { + ) -> Result<(Option, LogicalPlan), DatabaseError> { let plan_with_name = match table { TableFactor::Table { name, alias, .. } => { let obj_name = name @@ -156,7 +155,7 @@ impl<'a, T: Transaction> Binder<'a, T> { let table: &str = match obj_name.as_slice() { [table] => &table.value, - _ => return Err(BindError::InvalidTable(obj_name.iter().join(","))), + _ => return Err(DatabaseError::InvalidTable(obj_name.iter().join(","))), }; let (table, plan) = @@ -198,14 +197,14 @@ impl<'a, T: Transaction> Binder<'a, T> { join_type: Option, table: &str, alias: Option<&String>, - ) -> Result<(Arc, LogicalPlan), BindError> { + ) -> Result<(Arc, LogicalPlan), DatabaseError> { let table_name = Arc::new(table.to_string()); let table_catalog = self .context .table(table_name.clone()) .cloned() - .ok_or_else(|| BindError::InvalidTable(format!("bind table {}", table)))?; + .ok_or_else(|| DatabaseError::InvalidTable(format!("bind table {}", table)))?; let scan_op = ScanOperator::build(table_name.clone(), &table_catalog); self.context @@ -228,7 +227,7 @@ impl<'a, T: Transaction> Binder<'a, T> { fn normalize_select_item( &mut self, items: &[SelectItem], - ) -> Result, BindError> { + ) -> Result, DatabaseError> { let mut select_items = vec![]; for item in items.iter().enumerate() { @@ -256,13 +255,13 @@ impl<'a, T: Transaction> Binder<'a, T> { Ok(select_items) } - fn bind_all_column_refs(&mut self) -> Result, BindError> { + fn bind_all_column_refs(&mut self) -> Result, DatabaseError> { let mut exprs = vec![]; for table_name in self.context.bind_table.keys() { let table = self .context .table(table_name.clone()) - .ok_or_else(|| BindError::InvalidTable(table_name.to_string()))?; + .ok_or_else(|| DatabaseError::InvalidTable(table_name.to_string()))?; for col in table.all_columns() { exprs.push(ScalarExpression::ColumnRef(col)); } @@ -276,7 +275,7 @@ impl<'a, T: Transaction> Binder<'a, T> { left_table: TableName, left: LogicalPlan, join: &Join, - ) -> Result { + ) -> Result { let Join { relation, join_operator, @@ -297,12 +296,16 @@ impl<'a, T: Transaction> Binder<'a, T> { .context .table(left_table.clone()) .cloned() - .ok_or_else(|| BindError::InvalidTable(format!("Left: {} not found", left_table)))?; + .ok_or_else(|| { + DatabaseError::InvalidTable(format!("Left: {} not found", left_table)) + })?; let right_table = self .context .table(right_table.clone()) .cloned() - .ok_or_else(|| BindError::InvalidTable(format!("Right: {} not found", right_table)))?; + .ok_or_else(|| { + DatabaseError::InvalidTable(format!("Right: {} not found", right_table)) + })?; let on = match joint_condition { Some(constraint) => self.bind_join_constraint(&left_table, &right_table, constraint)?, @@ -316,7 +319,7 @@ impl<'a, T: Transaction> Binder<'a, T> { &mut self, children: LogicalPlan, predicate: &Expr, - ) -> Result { + ) -> Result { Ok(FilterOperator::build( self.bind_expr(predicate)?, children, @@ -328,7 +331,7 @@ impl<'a, T: Transaction> Binder<'a, T> { &mut self, children: LogicalPlan, having: ScalarExpression, - ) -> Result { + ) -> Result { self.validate_having_orderby(&having)?; Ok(FilterOperator::build(having, children, true)) } @@ -361,7 +364,7 @@ impl<'a, T: Transaction> Binder<'a, T> { children: LogicalPlan, limit_expr: &Option, offset_expr: &Option, - ) -> Result { + ) -> Result { let mut limit = None; let mut offset = None; if let Some(expr) = limit_expr { @@ -370,10 +373,10 @@ impl<'a, T: Transaction> Binder<'a, T> { ScalarExpression::Constant(dv) => match dv.as_ref() { DataValue::Int32(Some(v)) if *v >= 0 => limit = Some(*v as usize), DataValue::Int64(Some(v)) if *v >= 0 => limit = Some(*v as usize), - _ => return Err(BindError::from(TypeError::InvalidType)), + _ => return Err(DatabaseError::from(DatabaseError::InvalidType)), }, _ => { - return Err(BindError::InvalidColumn( + return Err(DatabaseError::InvalidColumn( "invalid limit expression.".to_owned(), )) } @@ -386,10 +389,10 @@ impl<'a, T: Transaction> Binder<'a, T> { ScalarExpression::Constant(dv) => match dv.as_ref() { DataValue::Int32(Some(v)) if *v > 0 => offset = Some(*v as usize), DataValue::Int64(Some(v)) if *v > 0 => offset = Some(*v as usize), - _ => return Err(BindError::from(TypeError::InvalidType)), + _ => return Err(DatabaseError::from(DatabaseError::InvalidType)), }, _ => { - return Err(BindError::InvalidColumn( + return Err(DatabaseError::InvalidColumn( "invalid limit expression.".to_owned(), )) } @@ -445,7 +448,7 @@ impl<'a, T: Transaction> Binder<'a, T> { left_table: &TableCatalog, right_table: &TableCatalog, constraint: &JoinConstraint, - ) -> Result { + ) -> Result { match constraint { JoinConstraint::On(expr) => { // left and right columns that match equi-join pattern @@ -492,7 +495,7 @@ impl<'a, T: Transaction> Binder<'a, T> { accum_filter: &mut Vec, left_schema: &TableCatalog, right_schema: &TableCatalog, - ) -> Result<(), BindError> { + ) -> Result<(), DatabaseError> { match expr { Expr::BinaryOp { left, op, right } => match op { ast::BinaryOperator::Eq => { @@ -557,10 +560,10 @@ impl<'a, T: Transaction> Binder<'a, T> { #[cfg(test)] mod tests { use crate::binder::test::select_sql_run; - use crate::execution::ExecutorError; + use crate::errors::DatabaseError; #[tokio::test] - async fn test_select_bind() -> Result<(), ExecutorError> { + async fn test_select_bind() -> Result<(), DatabaseError> { let plan_1 = select_sql_run("select * from t1").await?; println!("just_col:\n {:#?}", plan_1); let plan_2 = select_sql_run("select t1.c1, t1.c2 from t1").await?; diff --git a/src/binder/show.rs b/src/binder/show.rs index 0d6ff53d..d13de6c9 100644 --- a/src/binder/show.rs +++ b/src/binder/show.rs @@ -1,10 +1,11 @@ -use crate::binder::{BindError, Binder}; +use crate::binder::Binder; +use crate::errors::DatabaseError; use crate::planner::operator::Operator; use crate::planner::LogicalPlan; use crate::storage::Transaction; impl<'a, T: Transaction> Binder<'a, T> { - pub(crate) fn bind_show_tables(&mut self) -> Result { + pub(crate) fn bind_show_tables(&mut self) -> Result { let plan = LogicalPlan { operator: Operator::Show, childrens: vec![], diff --git a/src/binder/truncate.rs b/src/binder/truncate.rs index d3a0e1a8..acef61a4 100644 --- a/src/binder/truncate.rs +++ b/src/binder/truncate.rs @@ -1,4 +1,5 @@ -use crate::binder::{lower_case_name, split_name, BindError, Binder}; +use crate::binder::{lower_case_name, split_name, Binder}; +use crate::errors::DatabaseError; use crate::planner::operator::truncate::TruncateOperator; use crate::planner::operator::Operator; use crate::planner::LogicalPlan; @@ -7,7 +8,10 @@ use sqlparser::ast::ObjectName; use std::sync::Arc; impl<'a, T: Transaction> Binder<'a, T> { - pub(crate) fn bind_truncate(&mut self, name: &ObjectName) -> Result { + pub(crate) fn bind_truncate( + &mut self, + name: &ObjectName, + ) -> Result { let name = lower_case_name(name); let name = split_name(&name)?; let table_name = Arc::new(name.to_string()); diff --git a/src/binder/update.rs b/src/binder/update.rs index e0743c6a..e9ec5e6a 100644 --- a/src/binder/update.rs +++ b/src/binder/update.rs @@ -1,4 +1,5 @@ -use crate::binder::{lower_case_name, split_name, BindError, Binder}; +use crate::binder::{lower_case_name, split_name, Binder}; +use crate::errors::DatabaseError; use crate::expression::ScalarExpression; use crate::planner::operator::update::UpdateOperator; use crate::planner::operator::Operator; @@ -15,7 +16,7 @@ impl<'a, T: Transaction> Binder<'a, T> { to: &TableWithJoins, selection: &Option, assignments: &[Assignment], - ) -> Result { + ) -> Result { if let TableFactor::Table { name, .. } = &to.relation { let name = lower_case_name(name); let name = split_name(&name)?; @@ -34,7 +35,7 @@ impl<'a, T: Transaction> Binder<'a, T> { for assignment in assignments { let value = match self.bind_expr(&assignment.value)? { - ScalarExpression::Constant(value) => Ok::(value), + ScalarExpression::Constant(value) => Ok::(value), _ => unreachable!(), }?; diff --git a/src/catalog/mod.rs b/src/catalog/mod.rs index 7060a59d..f1f1fe83 100644 --- a/src/catalog/mod.rs +++ b/src/catalog/mod.rs @@ -5,13 +5,3 @@ pub(crate) use self::table::*; mod column; mod table; - -#[derive(thiserror::Error, Debug)] -pub enum CatalogError { - #[error("{0} not found: {1}")] - NotFound(&'static str, String), - #[error("duplicated {0}: {1}")] - Duplicated(&'static str, String), - #[error("columns empty")] - ColumnsEmpty, -} diff --git a/src/catalog/table.rs b/src/catalog/table.rs index e663c81c..0a27d88c 100644 --- a/src/catalog/table.rs +++ b/src/catalog/table.rs @@ -2,7 +2,8 @@ use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use std::sync::Arc; -use crate::catalog::{CatalogError, ColumnCatalog, ColumnRef}; +use crate::catalog::{ColumnCatalog, ColumnRef}; +use crate::errors::DatabaseError; use crate::types::index::{IndexMeta, IndexMetaRef}; use crate::types::ColumnId; @@ -58,9 +59,9 @@ impl TableCatalog { } /// Add a column to the table catalog. - pub(crate) fn add_column(&mut self, mut col: ColumnCatalog) -> Result { + pub(crate) fn add_column(&mut self, mut col: ColumnCatalog) -> Result { if self.column_idxs.contains_key(col.name()) { - return Err(CatalogError::Duplicated("column", col.name().to_string())); + return Err(DatabaseError::Duplicated("column", col.name().to_string())); } let col_id = self @@ -103,9 +104,9 @@ impl TableCatalog { pub(crate) fn new( name: TableName, columns: Vec, - ) -> Result { + ) -> Result { if columns.is_empty() { - return Err(CatalogError::ColumnsEmpty); + return Err(DatabaseError::ColumnsEmpty); } let mut table_catalog = TableCatalog { name, @@ -124,7 +125,7 @@ impl TableCatalog { name: TableName, columns: Vec, indexes: Vec, - ) -> Result { + ) -> Result { let mut catalog = TableCatalog::new(name, columns)?; catalog.indexes = indexes; diff --git a/src/db.rs b/src/db.rs index 17521e42..215fb1d2 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,19 +1,17 @@ use sqlparser::ast::Statement; -use sqlparser::parser::ParserError; use std::path::PathBuf; -use crate::binder::{BindError, Binder, BinderContext}; +use crate::binder::{Binder, BinderContext}; +use crate::errors::DatabaseError; use crate::execution::volcano::{build_write, try_collect}; -use crate::execution::ExecutorError; use crate::optimizer::heuristic::batch::HepBatchStrategy; use crate::optimizer::heuristic::optimizer::HepOptimizer; use crate::optimizer::rule::implementation::ImplementationRuleImpl; use crate::optimizer::rule::normalization::NormalizationRuleImpl; -use crate::optimizer::OptimizerError; use crate::parser::parse_sql; use crate::planner::LogicalPlan; use crate::storage::kip::KipStorage; -use crate::storage::{Storage, StorageError, Transaction}; +use crate::storage::{Storage, Transaction}; use crate::types::tuple::Tuple; #[derive(Copy, Clone)] @@ -222,56 +220,18 @@ impl DBTransaction { } } -#[derive(thiserror::Error, Debug)] -pub enum DatabaseError { - #[error("sql statement is empty")] - EmptyStatement, - #[error("parse error: {0}")] - Parse( - #[source] - #[from] - ParserError, - ), - #[error("bind error: {0}")] - Bind( - #[source] - #[from] - BindError, - ), - #[error("Storage error: {0}")] - StorageError( - #[source] - #[from] - StorageError, - ), - #[error("volcano error: {0}")] - ExecutorError( - #[source] - #[from] - ExecutorError, - ), - #[error("Internal error: {0}")] - InternalError(String), - #[error("optimizer error: {0}")] - OptimizerError( - #[source] - #[from] - OptimizerError, - ), -} - #[cfg(test)] mod test { use crate::catalog::{ColumnCatalog, ColumnDesc}; use crate::db::{Database, DatabaseError, QueryExecute}; - use crate::storage::{Storage, StorageError, Transaction}; + use crate::storage::{Storage, Transaction}; use crate::types::tuple::{create_table, Tuple}; use crate::types::value::DataValue; use crate::types::LogicalType; use std::sync::Arc; use tempfile::TempDir; - async fn build_table(mut transaction: impl Transaction) -> Result<(), StorageError> { + async fn build_table(mut transaction: impl Transaction) -> Result<(), DatabaseError> { let columns = vec![ ColumnCatalog::new( "c1".to_string(), diff --git a/src/errors.rs b/src/errors.rs new file mode 100644 index 00000000..baf1e96a --- /dev/null +++ b/src/errors.rs @@ -0,0 +1,166 @@ +use crate::types::LogicalType; +use chrono::ParseError; +use kip_db::KernelError; +#[cfg(feature = "codegen_execute")] +use mlua::prelude::LuaError; +use sqlparser::parser::ParserError; +use std::num::{ParseFloatError, ParseIntError, TryFromIntError}; +use std::str::ParseBoolError; +use std::string::FromUtf8Error; + +#[derive(thiserror::Error, Debug)] +pub enum DatabaseError { + #[error("sql statement is empty")] + EmptyStatement, + #[error("invalid type")] + InvalidType, + #[error("must contain PrimaryKey!")] + PrimaryKeyNotFound, + #[error("not implemented sqlparser datatype: {0}")] + NotImplementedSqlparserDataType(String), + #[error("cast fail")] + CastFail, + #[error("too long")] + TooLong, + #[error("cannot be Null")] + NotNull, + #[error("try from int: {0}")] + TryFromInt( + #[source] + #[from] + TryFromIntError, + ), + #[error("parser int: {0}")] + ParseInt( + #[source] + #[from] + ParseIntError, + ), + #[error("parser bool: {0}")] + ParseBool( + #[source] + #[from] + ParseBoolError, + ), + #[error("parser float: {0}")] + ParseFloat( + #[source] + #[from] + ParseFloatError, + ), + #[error("parser date: {0}")] + ParseDate( + #[source] + #[from] + ParseError, + ), + #[error("parser sql: {0}")] + ParserSql( + #[source] + #[from] + ParserError, + ), + #[error("bindcode: {0}")] + Bincode( + #[source] + #[from] + Box, + ), + #[error("try from decimal")] + TryFromDecimal( + #[source] + #[from] + rust_decimal::Error, + ), + #[error("from utf8: {0}")] + FromUtf8Error( + #[source] + #[from] + FromUtf8Error, + ), + #[error("{0} and {1} do not match")] + MisMatch(String, String), + #[error("io: {0}")] + IO( + #[source] + #[from] + std::io::Error, + ), + #[error("kipdb error: {0}")] + KipDBError( + #[source] + #[from] + KernelError, + ), + #[error("the same primary key data already exists")] + DuplicatePrimaryKey, + #[error("the column has been declared unique and the value already exists")] + DuplicateUniqueValue, + #[error("the table not found")] + TableNotFound, + #[error("the some column already exists")] + DuplicateColumn, + #[error("add column must be nullable or specify a default value")] + NeedNullAbleOrDefault, + #[error("the table already exists")] + TableExists, + #[error("plan is empty")] + EmptyPlan, + #[error("this column must belong to a table")] + OwnerLessColumn, + #[error("there are more buckets than elements")] + TooManyBuckets, + #[error("csv error: {0}")] + Csv( + #[from] + #[source] + csv::Error, + ), + #[error("tuple length mismatch: expected {expected} but got {actual}")] + LengthMismatch { expected: usize, actual: usize }, + #[error("join error")] + JoinError( + #[from] + #[source] + tokio::task::JoinError, + ), + #[cfg(feature = "codegen_execute")] + #[error("lua error")] + LuaError( + #[from] + #[source] + LuaError, + ), + #[error("channel close")] + ChannelClose, + #[error("invalid index")] + InvalidIndex, + #[error("{0} not found: {1}")] + NotFound(&'static str, String), + #[error("duplicated {0}: {1}")] + Duplicated(&'static str, String), + #[error("columns empty")] + ColumnsEmpty, + #[error("unsupported statement {0}")] + UnsupportedStmt(String), + #[error("invalid table {0}")] + InvalidTable(String), + #[error("invalid column {0}")] + InvalidColumn(String), + #[error("ambiguous column {0}")] + AmbiguousColumn(String), + #[error("values length not match, expect {0}, got {1}")] + ValuesLenMismatch(usize, usize), + #[error("values list must all be the same length")] + ValuesLenNotSame(), + #[error("binary operator types mismatch: {0} != {1}")] + BinaryOpTypeMismatch(String, String), + #[error("subquery error: {0}")] + Subquery(String), + #[error("agg miss: {0}")] + AggMiss(String), + #[error("copy error: {0}")] + UnsupportedCopySource(String), + #[error("can not compare two types: {0} and {1}")] + Incomparable(LogicalType, LogicalType), +} diff --git a/src/execution/mod.rs b/src/execution/mod.rs index 2b2eb335..f67f2b60 100644 --- a/src/execution/mod.rs +++ b/src/execution/mod.rs @@ -1,85 +1,3 @@ #[cfg(feature = "codegen_execute")] pub mod codegen; pub mod volcano; - -use crate::binder::BindError; -use crate::catalog::CatalogError; -use crate::optimizer::OptimizerError; -use crate::storage::StorageError; -use crate::types::errors::TypeError; -#[cfg(feature = "codegen_execute")] -use mlua::prelude::LuaError; -use sqlparser::parser::ParserError; - -#[derive(thiserror::Error, Debug)] -pub enum ExecutorError { - #[error("catalog error: {0}")] - CatalogError( - #[source] - #[from] - CatalogError, - ), - #[error("type error: {0}")] - TypeError( - #[source] - #[from] - TypeError, - ), - #[error("storage error: {0}")] - StorageError( - #[source] - #[from] - StorageError, - ), - #[error("bind error: {0}")] - BindError( - #[source] - #[from] - BindError, - ), - #[error("optimizer error: {0}")] - Optimizer( - #[source] - #[from] - OptimizerError, - ), - #[error("parser error: {0}")] - ParserError( - #[source] - #[from] - ParserError, - ), - #[error("Internal error: {0}")] - InternalError(String), - #[error("io error: {0}")] - Io( - #[from] - #[source] - std::io::Error, - ), - #[error("csv error: {0}")] - Csv( - #[from] - #[source] - csv::Error, - ), - #[error("tuple length mismatch: expected {expected} but got {actual}")] - LengthMismatch { expected: usize, actual: usize }, - #[error("join error")] - JoinError( - #[from] - #[source] - tokio::task::JoinError, - ), - #[cfg(feature = "codegen_execute")] - #[error("lua error")] - LuaError( - #[from] - #[source] - LuaError, - ), - #[error("channel close")] - ChannelClose, - #[error("invalid index")] - InvalidIndex, -} diff --git a/src/execution/volcano/ddl/add_column.rs b/src/execution/volcano/ddl/add_column.rs index 8e5c0c35..bd865d6b 100644 --- a/src/execution/volcano/ddl/add_column.rs +++ b/src/execution/volcano/ddl/add_column.rs @@ -1,7 +1,8 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::{build_read, BoxedExecutor, WriteExecutor}; use crate::types::tuple::Tuple; +use crate::types::tuple_builder::TupleBuilder; use crate::types::value::DataValue; -use crate::{execution::ExecutorError, types::tuple_builder::TupleBuilder}; use futures_async_stream::try_stream; use std::sync::Arc; @@ -27,7 +28,7 @@ impl WriteExecutor for AddColumn { } impl AddColumn { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] async fn _execute(self, transaction: &mut T) { let AddColumnOperator { table_name, diff --git a/src/execution/volcano/ddl/create_table.rs b/src/execution/volcano/ddl/create_table.rs index efcd15c6..86c178d2 100644 --- a/src/execution/volcano/ddl/create_table.rs +++ b/src/execution/volcano/ddl/create_table.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::{BoxedExecutor, WriteExecutor}; -use crate::execution::ExecutorError; use crate::planner::operator::create_table::CreateTableOperator; use crate::storage::Transaction; use crate::types::tuple::Tuple; @@ -23,7 +23,7 @@ impl WriteExecutor for CreateTable { } impl CreateTable { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &mut T) { let CreateTableOperator { table_name, diff --git a/src/execution/volcano/ddl/drop_column.rs b/src/execution/volcano/ddl/drop_column.rs index 07839608..f5b626e1 100644 --- a/src/execution/volcano/ddl/drop_column.rs +++ b/src/execution/volcano/ddl/drop_column.rs @@ -1,6 +1,5 @@ -use crate::binder::BindError; +use crate::errors::DatabaseError; use crate::execution::volcano::{build_read, BoxedExecutor, WriteExecutor}; -use crate::execution::ExecutorError; use crate::planner::operator::alter_table::drop_column::DropColumnOperator; use crate::planner::LogicalPlan; use crate::storage::Transaction; @@ -26,7 +25,7 @@ impl WriteExecutor for DropColumn { } impl DropColumn { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] async fn _execute(self, transaction: &mut T) { let DropColumnOperator { table_name, @@ -49,7 +48,7 @@ impl DropColumn { .map(|(i, column)| (i, column.desc.is_primary)) { if is_primary { - Err(BindError::InvalidColumn( + Err(DatabaseError::InvalidColumn( "drop of primary key column is not allowed.".to_owned(), ))?; } @@ -60,7 +59,7 @@ impl DropColumn { return Ok(()); } let column_i = option_column_i - .ok_or_else(|| BindError::InvalidColumn("not found column".to_string()))?; + .ok_or_else(|| DatabaseError::InvalidColumn("not found column".to_string()))?; let _ = tuple.columns.remove(column_i); let _ = tuple.values.remove(column_i); diff --git a/src/execution/volcano/ddl/drop_table.rs b/src/execution/volcano/ddl/drop_table.rs index d0f06ac9..ea89a61e 100644 --- a/src/execution/volcano/ddl/drop_table.rs +++ b/src/execution/volcano/ddl/drop_table.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::{BoxedExecutor, WriteExecutor}; -use crate::execution::ExecutorError; use crate::planner::operator::drop_table::DropTableOperator; use crate::storage::Transaction; use crate::types::tuple::Tuple; @@ -22,7 +22,7 @@ impl WriteExecutor for DropTable { } impl DropTable { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &mut T) { let DropTableOperator { table_name, diff --git a/src/execution/volcano/ddl/truncate.rs b/src/execution/volcano/ddl/truncate.rs index 9d431174..4714f37c 100644 --- a/src/execution/volcano/ddl/truncate.rs +++ b/src/execution/volcano/ddl/truncate.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::{BoxedExecutor, WriteExecutor}; -use crate::execution::ExecutorError; use crate::planner::operator::truncate::TruncateOperator; use crate::storage::Transaction; use crate::types::tuple::Tuple; @@ -22,7 +22,7 @@ impl WriteExecutor for Truncate { } impl Truncate { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &mut T) { let TruncateOperator { table_name } = self.op; diff --git a/src/execution/volcano/dml/analyze.rs b/src/execution/volcano/dml/analyze.rs index 227d9ad4..27b5fc6f 100644 --- a/src/execution/volcano/dml/analyze.rs +++ b/src/execution/volcano/dml/analyze.rs @@ -1,9 +1,8 @@ use crate::catalog::{ColumnCatalog, ColumnRef, TableMeta, TableName}; +use crate::errors::DatabaseError; use crate::execution::volcano::{build_read, BoxedExecutor, WriteExecutor}; -use crate::execution::ExecutorError; use crate::optimizer::core::column_meta::ColumnMeta; use crate::optimizer::core::histogram::HistogramBuilder; -use crate::optimizer::OptimizerError; use crate::planner::operator::analyze::AnalyzeOperator; use crate::planner::LogicalPlan; use crate::storage::Transaction; @@ -51,7 +50,7 @@ impl WriteExecutor for Analyze { } impl Analyze { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &mut T) { let Analyze { table_name, @@ -98,7 +97,7 @@ impl Analyze { let path = dir_path.join(column_id.unwrap().to_string()); let (histogram, sketch) = match builder.build(DEFAULT_NUM_OF_BUCKETS) { Ok(build) => build, - Err(OptimizerError::TooManyBuckets) => continue, + Err(DatabaseError::TooManyBuckets) => continue, err => err?, }; diff --git a/src/execution/volcano/dml/copy_from_file.rs b/src/execution/volcano/dml/copy_from_file.rs index eb4e2977..c24e126a 100644 --- a/src/execution/volcano/dml/copy_from_file.rs +++ b/src/execution/volcano/dml/copy_from_file.rs @@ -1,6 +1,6 @@ use crate::binder::copy::FileFormat; +use crate::errors::DatabaseError; use crate::execution::volcano::{BoxedExecutor, WriteExecutor}; -use crate::execution::ExecutorError; use crate::planner::operator::copy_from_file::CopyFromFileOperator; use crate::storage::Transaction; use crate::types::tuple::Tuple; @@ -28,7 +28,7 @@ impl WriteExecutor for CopyFromFile { } impl CopyFromFile { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &mut T) { let (tx, mut rx) = tokio::sync::mpsc::channel(1); let (tx1, mut rx1) = tokio::sync::mpsc::channel(1); @@ -53,7 +53,7 @@ impl CopyFromFile { /// Read records from file using blocking IO. /// /// The read data chunks will be sent through `tx`. - fn read_file_blocking(mut self, tx: Sender) -> Result<(), ExecutorError> { + fn read_file_blocking(mut self, tx: Sender) -> Result<(), DatabaseError> { let file = File::open(self.op.source.path)?; let mut buf_reader = BufReader::new(file); let mut reader = match self.op.source.format { @@ -80,7 +80,7 @@ impl CopyFromFile { if !(record.len() == column_count || record.len() == column_count + 1 && record.get(column_count) == Some("")) { - return Err(ExecutorError::LengthMismatch { + return Err(DatabaseError::LengthMismatch { expected: column_count, actual: record.len(), }); @@ -88,27 +88,27 @@ impl CopyFromFile { self.size += 1; tx.blocking_send(tuple_builder.build_with_row(record.iter())?) - .map_err(|_| ExecutorError::ChannelClose)?; + .map_err(|_| DatabaseError::ChannelClose)?; } Ok(()) } } -fn return_result(size: usize, tx: Sender) -> Result<(), ExecutorError> { +fn return_result(size: usize, tx: Sender) -> Result<(), DatabaseError> { let tuple = TupleBuilder::build_result( "COPY FROM SOURCE".to_string(), format!("import {} rows", size), )?; tx.blocking_send(tuple) - .map_err(|_| ExecutorError::ChannelClose)?; + .map_err(|_| DatabaseError::ChannelClose)?; Ok(()) } #[cfg(test)] mod tests { use crate::catalog::{ColumnCatalog, ColumnDesc, ColumnSummary}; - use crate::db::{Database, DatabaseError}; + use crate::db::Database; use futures::StreamExt; use std::io::Write; use std::sync::Arc; @@ -116,6 +116,7 @@ mod tests { use super::*; use crate::binder::copy::ExtSource; + use crate::errors::DatabaseError; use crate::storage::Storage; use crate::types::LogicalType; diff --git a/src/execution/volcano/dml/delete.rs b/src/execution/volcano/dml/delete.rs index 2f1b1901..fe33918c 100644 --- a/src/execution/volcano/dml/delete.rs +++ b/src/execution/volcano/dml/delete.rs @@ -1,6 +1,6 @@ use crate::catalog::TableName; +use crate::errors::DatabaseError; use crate::execution::volcano::{build_read, BoxedExecutor, WriteExecutor}; -use crate::execution::ExecutorError; use crate::planner::operator::delete::DeleteOperator; use crate::planner::LogicalPlan; use crate::storage::Transaction; @@ -27,7 +27,7 @@ impl WriteExecutor for Delete { } impl Delete { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] async fn _execute(self, transaction: &mut T) { let Delete { table_name, input } = self; let option_index_metas = transaction.table(table_name.clone()).map(|table_catalog| { diff --git a/src/execution/volcano/dml/insert.rs b/src/execution/volcano/dml/insert.rs index 20b95c62..def6f60e 100644 --- a/src/execution/volcano/dml/insert.rs +++ b/src/execution/volcano/dml/insert.rs @@ -1,6 +1,6 @@ use crate::catalog::TableName; +use crate::errors::DatabaseError; use crate::execution::volcano::{build_read, BoxedExecutor, WriteExecutor}; -use crate::execution::ExecutorError; use crate::planner::operator::insert::InsertOperator; use crate::planner::LogicalPlan; use crate::storage::Transaction; @@ -42,7 +42,7 @@ impl WriteExecutor for Insert { } impl Insert { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &mut T) { let Insert { table_name, @@ -94,10 +94,7 @@ impl Insert { .push((tuple_id.clone(), value.clone())) } if value.is_null() && !col.nullable { - return Err(ExecutorError::InternalError(format!( - "Non-null fields do not allow null values to be passed in: {:?}", - col - ))); + return Err(DatabaseError::NotNull); } tuple.columns.push(col.clone()); diff --git a/src/execution/volcano/dml/update.rs b/src/execution/volcano/dml/update.rs index 68003952..8888047c 100644 --- a/src/execution/volcano/dml/update.rs +++ b/src/execution/volcano/dml/update.rs @@ -1,6 +1,6 @@ use crate::catalog::TableName; +use crate::errors::DatabaseError; use crate::execution::volcano::{build_read, BoxedExecutor, WriteExecutor}; -use crate::execution::ExecutorError; use crate::planner::operator::update::UpdateOperator; use crate::planner::LogicalPlan; use crate::storage::Transaction; @@ -34,7 +34,7 @@ impl WriteExecutor for Update { } impl Update { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &mut T) { let Update { table_name, diff --git a/src/execution/volcano/dql/aggregate/avg.rs b/src/execution/volcano/dql/aggregate/avg.rs index 822c5de7..f461fa22 100644 --- a/src/execution/volcano/dql/aggregate/avg.rs +++ b/src/execution/volcano/dql/aggregate/avg.rs @@ -1,6 +1,6 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::dql::aggregate::sum::SumAccumulator; use crate::execution::volcano::dql::aggregate::Accumulator; -use crate::execution::ExecutorError; use crate::expression::value_compute::binary_op; use crate::expression::BinaryOperator; use crate::types::value::{DataValue, ValueRef}; @@ -22,7 +22,7 @@ impl AvgAccumulator { } impl Accumulator for AvgAccumulator { - fn update_value(&mut self, value: &ValueRef) -> Result<(), ExecutorError> { + fn update_value(&mut self, value: &ValueRef) -> Result<(), DatabaseError> { if !value.is_null() { self.inner.update_value(value)?; self.count += 1; @@ -31,7 +31,7 @@ impl Accumulator for AvgAccumulator { Ok(()) } - fn evaluate(&self) -> Result { + fn evaluate(&self) -> Result { let value = self.inner.evaluate()?; let quantity = if value.logical_type().is_signed_numeric() { diff --git a/src/execution/volcano/dql/aggregate/count.rs b/src/execution/volcano/dql/aggregate/count.rs index 77e6391c..4eef2637 100644 --- a/src/execution/volcano/dql/aggregate/count.rs +++ b/src/execution/volcano/dql/aggregate/count.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::dql::aggregate::Accumulator; -use crate::execution::ExecutorError; use crate::types::value::{DataValue, ValueRef}; use ahash::RandomState; use std::collections::HashSet; @@ -16,7 +16,7 @@ impl CountAccumulator { } impl Accumulator for CountAccumulator { - fn update_value(&mut self, value: &ValueRef) -> Result<(), ExecutorError> { + fn update_value(&mut self, value: &ValueRef) -> Result<(), DatabaseError> { if !value.is_null() { self.result += 1; } @@ -24,7 +24,7 @@ impl Accumulator for CountAccumulator { Ok(()) } - fn evaluate(&self) -> Result { + fn evaluate(&self) -> Result { Ok(Arc::new(DataValue::Int32(Some(self.result)))) } } @@ -42,7 +42,7 @@ impl DistinctCountAccumulator { } impl Accumulator for DistinctCountAccumulator { - fn update_value(&mut self, value: &ValueRef) -> Result<(), ExecutorError> { + fn update_value(&mut self, value: &ValueRef) -> Result<(), DatabaseError> { if !value.is_null() { self.distinct_values.insert(value.clone()); } @@ -50,7 +50,7 @@ impl Accumulator for DistinctCountAccumulator { Ok(()) } - fn evaluate(&self) -> Result { + fn evaluate(&self) -> Result { Ok(Arc::new(DataValue::Int32(Some( self.distinct_values.len() as i32 )))) diff --git a/src/execution/volcano/dql/aggregate/hash_agg.rs b/src/execution/volcano/dql/aggregate/hash_agg.rs index 64e02bba..97dd9e07 100644 --- a/src/execution/volcano/dql/aggregate/hash_agg.rs +++ b/src/execution/volcano/dql/aggregate/hash_agg.rs @@ -1,7 +1,7 @@ use crate::catalog::ColumnRef; +use crate::errors::DatabaseError; use crate::execution::volcano::dql::aggregate::{create_accumulators, Accumulator}; use crate::execution::volcano::{build_read, BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::expression::ScalarExpression; use crate::planner::operator::aggregate::AggregateOperator; use crate::planner::LogicalPlan; @@ -63,7 +63,7 @@ impl HashAggStatus { } } - pub(crate) fn update(&mut self, tuple: Tuple) -> Result<(), ExecutorError> { + pub(crate) fn update(&mut self, tuple: Tuple) -> Result<(), DatabaseError> { // 1. build group and agg columns for hash_agg columns. // Tips: AggCall First if self.group_columns.is_empty() { @@ -107,7 +107,7 @@ impl HashAggStatus { Ok(()) } - pub(crate) fn to_tuples(&mut self) -> Result, ExecutorError> { + pub(crate) fn to_tuples(&mut self) -> Result, DatabaseError> { Ok(self .group_hash_accs .drain() @@ -119,7 +119,7 @@ impl HashAggStatus { .chain(group_keys.into_iter().map(Ok)) .try_collect()?; - Ok::(Tuple { + Ok::(Tuple { id: None, columns: self.group_columns.clone(), values, @@ -130,7 +130,7 @@ impl HashAggStatus { } impl HashAggExecutor { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &T) { let HashAggExecutor { agg_calls, @@ -154,10 +154,10 @@ impl HashAggExecutor { #[cfg(test)] mod test { use crate::catalog::{ColumnCatalog, ColumnDesc}; + use crate::errors::DatabaseError; use crate::execution::volcano::dql::aggregate::hash_agg::HashAggExecutor; use crate::execution::volcano::dql::test::build_integers; use crate::execution::volcano::{try_collect, ReadExecutor}; - use crate::execution::ExecutorError; use crate::expression::agg::AggKind; use crate::expression::ScalarExpression; use crate::planner::operator::aggregate::AggregateOperator; @@ -174,7 +174,7 @@ mod test { use tempfile::TempDir; #[tokio::test] - async fn test_hash_agg() -> Result<(), ExecutorError> { + async fn test_hash_agg() -> Result<(), DatabaseError> { let temp_dir = TempDir::new().expect("unable to create temporary working directory"); let storage = KipStorage::new(temp_dir.path()).await.unwrap(); let transaction = storage.transaction().await?; diff --git a/src/execution/volcano/dql/aggregate/min_max.rs b/src/execution/volcano/dql/aggregate/min_max.rs index 1cbf649d..f7e51040 100644 --- a/src/execution/volcano/dql/aggregate/min_max.rs +++ b/src/execution/volcano/dql/aggregate/min_max.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::dql::aggregate::Accumulator; -use crate::execution::ExecutorError; use crate::expression::value_compute::binary_op; use crate::expression::BinaryOperator; use crate::types::value::{DataValue, ValueRef}; @@ -29,7 +29,7 @@ impl MinMaxAccumulator { } impl Accumulator for MinMaxAccumulator { - fn update_value(&mut self, value: &ValueRef) -> Result<(), ExecutorError> { + fn update_value(&mut self, value: &ValueRef) -> Result<(), DatabaseError> { if !value.is_null() { if let Some(inner_value) = &self.inner { if let DataValue::Boolean(Some(result)) = binary_op(inner_value, value, &self.op)? { @@ -46,7 +46,7 @@ impl Accumulator for MinMaxAccumulator { Ok(()) } - fn evaluate(&self) -> Result { + fn evaluate(&self) -> Result { Ok(self .inner .clone() diff --git a/src/execution/volcano/dql/aggregate/mod.rs b/src/execution/volcano/dql/aggregate/mod.rs index 6a416763..65a2fa3c 100644 --- a/src/execution/volcano/dql/aggregate/mod.rs +++ b/src/execution/volcano/dql/aggregate/mod.rs @@ -5,13 +5,13 @@ mod min_max; pub mod simple_agg; mod sum; +use crate::errors::DatabaseError; use crate::execution::volcano::dql::aggregate::avg::AvgAccumulator; use crate::execution::volcano::dql::aggregate::count::{ CountAccumulator, DistinctCountAccumulator, }; use crate::execution::volcano::dql::aggregate::min_max::MinMaxAccumulator; use crate::execution::volcano::dql::aggregate::sum::{DistinctSumAccumulator, SumAccumulator}; -use crate::execution::ExecutorError; use crate::expression::agg::AggKind; use crate::expression::ScalarExpression; use crate::types::value::ValueRef; @@ -21,10 +21,10 @@ use crate::types::value::ValueRef; /// rows and generically accumulates values. pub trait Accumulator: Send + Sync { /// updates the accumulator's state from a vector of arrays. - fn update_value(&mut self, value: &ValueRef) -> Result<(), ExecutorError>; + fn update_value(&mut self, value: &ValueRef) -> Result<(), DatabaseError>; /// returns its value based on its current state. - fn evaluate(&self) -> Result; + fn evaluate(&self) -> Result; } fn create_accumulator(expr: &ScalarExpression) -> Box { diff --git a/src/execution/volcano/dql/aggregate/simple_agg.rs b/src/execution/volcano/dql/aggregate/simple_agg.rs index e67167ce..5ef80aa7 100644 --- a/src/execution/volcano/dql/aggregate/simple_agg.rs +++ b/src/execution/volcano/dql/aggregate/simple_agg.rs @@ -1,6 +1,6 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::dql::aggregate::create_accumulators; use crate::execution::volcano::{build_read, BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::expression::ScalarExpression; use crate::planner::operator::aggregate::AggregateOperator; use crate::planner::LogicalPlan; @@ -30,7 +30,7 @@ impl ReadExecutor for SimpleAggExecutor { } impl SimpleAggExecutor { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &T) { let mut accs = create_accumulators(&self.agg_calls); let mut columns_option = None; diff --git a/src/execution/volcano/dql/aggregate/sum.rs b/src/execution/volcano/dql/aggregate/sum.rs index baee8e14..1a033bed 100644 --- a/src/execution/volcano/dql/aggregate/sum.rs +++ b/src/execution/volcano/dql/aggregate/sum.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::dql::aggregate::Accumulator; -use crate::execution::ExecutorError; use crate::expression::value_compute::binary_op; use crate::expression::BinaryOperator; use crate::types::value::{DataValue, ValueRef}; @@ -23,7 +23,7 @@ impl SumAccumulator { } impl Accumulator for SumAccumulator { - fn update_value(&mut self, value: &ValueRef) -> Result<(), ExecutorError> { + fn update_value(&mut self, value: &ValueRef) -> Result<(), DatabaseError> { if !value.is_null() { self.result = binary_op(&self.result, value, &BinaryOperator::Plus)?; } @@ -31,7 +31,7 @@ impl Accumulator for SumAccumulator { Ok(()) } - fn evaluate(&self) -> Result { + fn evaluate(&self) -> Result { Ok(Arc::new(self.result.clone())) } } @@ -51,7 +51,7 @@ impl DistinctSumAccumulator { } impl Accumulator for DistinctSumAccumulator { - fn update_value(&mut self, value: &ValueRef) -> Result<(), ExecutorError> { + fn update_value(&mut self, value: &ValueRef) -> Result<(), DatabaseError> { if !self.distinct_values.contains(value) { self.distinct_values.insert(value.clone()); self.inner.update_value(value)?; @@ -60,7 +60,7 @@ impl Accumulator for DistinctSumAccumulator { Ok(()) } - fn evaluate(&self) -> Result { + fn evaluate(&self) -> Result { self.inner.evaluate() } } diff --git a/src/execution/volcano/dql/dummy.rs b/src/execution/volcano/dql/dummy.rs index 8cd2f323..12575695 100644 --- a/src/execution/volcano/dql/dummy.rs +++ b/src/execution/volcano/dql/dummy.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::{BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::storage::Transaction; use crate::types::tuple::Tuple; use futures_async_stream::try_stream; @@ -13,6 +13,6 @@ impl ReadExecutor for Dummy { } impl Dummy { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self) {} } diff --git a/src/execution/volcano/dql/explain.rs b/src/execution/volcano/dql/explain.rs index 20cb0486..bcd6c899 100644 --- a/src/execution/volcano/dql/explain.rs +++ b/src/execution/volcano/dql/explain.rs @@ -1,7 +1,7 @@ use crate::catalog::ColumnCatalog; use crate::catalog::ColumnRef; +use crate::errors::DatabaseError; use crate::execution::volcano::{BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::planner::LogicalPlan; use crate::storage::Transaction; use crate::types::tuple::Tuple; @@ -27,7 +27,7 @@ impl ReadExecutor for Explain { } impl Explain { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self) { let columns: Vec = vec![Arc::new(ColumnCatalog::new_dummy("PLAN".to_string()))]; let values: Vec = vec![Arc::new(DataValue::Utf8(Some(self.plan.explain(0))))]; diff --git a/src/execution/volcano/dql/filter.rs b/src/execution/volcano/dql/filter.rs index 185b31f5..84bae47a 100644 --- a/src/execution/volcano/dql/filter.rs +++ b/src/execution/volcano/dql/filter.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::{build_read, BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::expression::ScalarExpression; use crate::planner::operator::filter::FilterOperator; use crate::planner::LogicalPlan; @@ -26,7 +26,7 @@ impl ReadExecutor for Filter { } impl Filter { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &T) { let Filter { predicate, input } = self; diff --git a/src/execution/volcano/dql/index_scan.rs b/src/execution/volcano/dql/index_scan.rs index f3f0ffca..b6808f3d 100644 --- a/src/execution/volcano/dql/index_scan.rs +++ b/src/execution/volcano/dql/index_scan.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::{BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::expression::simplify::ConstantBinary; use crate::planner::operator::scan::ScanOperator; use crate::storage::{Iter, Transaction}; @@ -30,7 +30,7 @@ impl ReadExecutor for IndexScan { } impl IndexScan { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &T) { let ScanOperator { table_name, diff --git a/src/execution/volcano/dql/join/hash_join.rs b/src/execution/volcano/dql/join/hash_join.rs index d85d2ce2..005f0c06 100644 --- a/src/execution/volcano/dql/join/hash_join.rs +++ b/src/execution/volcano/dql/join/hash_join.rs @@ -1,12 +1,11 @@ use crate::catalog::{ColumnCatalog, ColumnRef}; +use crate::errors::DatabaseError; use crate::execution::volcano::dql::join::joins_nullable; use crate::execution::volcano::{build_read, BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::expression::ScalarExpression; use crate::planner::operator::join::{JoinCondition, JoinOperator, JoinType}; use crate::planner::LogicalPlan; use crate::storage::Transaction; -use crate::types::errors::TypeError; use crate::types::tuple::Tuple; use crate::types::value::{DataValue, ValueRef}; use ahash::{HashMap, HashSet, HashSetExt, RandomState}; @@ -94,7 +93,7 @@ impl HashJoinStatus { } } - pub(crate) fn left_build(&mut self, tuple: Tuple) -> Result<(), ExecutorError> { + pub(crate) fn left_build(&mut self, tuple: Tuple) -> Result<(), DatabaseError> { let HashJoinStatus { on_left_keys, hash_random_state, @@ -120,7 +119,7 @@ impl HashJoinStatus { Ok(()) } - pub(crate) fn right_probe(&mut self, tuple: Tuple) -> Result, ExecutorError> { + pub(crate) fn right_probe(&mut self, tuple: Tuple) -> Result, DatabaseError> { let HashJoinStatus { hash_random_state, join_columns, @@ -285,7 +284,7 @@ impl HashJoinStatus { on_keys: &[ScalarExpression], hash_random_state: &RandomState, tuple: &Tuple, - ) -> Result { + ) -> Result { let mut values = Vec::with_capacity(on_keys.len()); for expr in on_keys { @@ -297,7 +296,7 @@ impl HashJoinStatus { } impl HashJoin { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &T) { let HashJoin { on, @@ -337,10 +336,10 @@ impl HashJoin { #[cfg(test)] mod test { use crate::catalog::{ColumnCatalog, ColumnDesc}; + use crate::errors::DatabaseError; use crate::execution::volcano::dql::join::hash_join::HashJoin; use crate::execution::volcano::dql::test::build_integers; use crate::execution::volcano::{try_collect, ReadExecutor}; - use crate::execution::ExecutorError; use crate::expression::ScalarExpression; use crate::planner::operator::join::{JoinCondition, JoinOperator, JoinType}; use crate::planner::operator::values::ValuesOperator; @@ -467,7 +466,7 @@ mod test { } #[tokio::test] - async fn test_inner_join() -> Result<(), ExecutorError> { + async fn test_inner_join() -> Result<(), DatabaseError> { let temp_dir = TempDir::new().expect("unable to create temporary working directory"); let storage = KipStorage::new(temp_dir.path()).await?; let transaction = storage.transaction().await?; @@ -504,7 +503,7 @@ mod test { } #[tokio::test] - async fn test_left_join() -> Result<(), ExecutorError> { + async fn test_left_join() -> Result<(), DatabaseError> { let temp_dir = TempDir::new().expect("unable to create temporary working directory"); let storage = KipStorage::new(temp_dir.path()).await?; let transaction = storage.transaction().await?; @@ -545,7 +544,7 @@ mod test { } #[tokio::test] - async fn test_right_join() -> Result<(), ExecutorError> { + async fn test_right_join() -> Result<(), DatabaseError> { let temp_dir = TempDir::new().expect("unable to create temporary working directory"); let storage = KipStorage::new(temp_dir.path()).await?; let transaction = storage.transaction().await?; @@ -586,7 +585,7 @@ mod test { } #[tokio::test] - async fn test_full_join() -> Result<(), ExecutorError> { + async fn test_full_join() -> Result<(), DatabaseError> { let temp_dir = TempDir::new().expect("unable to create temporary working directory"); let storage = KipStorage::new(temp_dir.path()).await?; let transaction = storage.transaction().await?; diff --git a/src/execution/volcano/dql/limit.rs b/src/execution/volcano/dql/limit.rs index cbe6f8e9..11543407 100644 --- a/src/execution/volcano/dql/limit.rs +++ b/src/execution/volcano/dql/limit.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::{build_read, BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::planner::operator::limit::LimitOperator; use crate::planner::LogicalPlan; use crate::storage::Transaction; @@ -30,7 +30,7 @@ impl ReadExecutor for Limit { } impl Limit { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &T) { let Limit { offset, diff --git a/src/execution/volcano/dql/projection.rs b/src/execution/volcano/dql/projection.rs index 0d61ed7a..544053f7 100644 --- a/src/execution/volcano/dql/projection.rs +++ b/src/execution/volcano/dql/projection.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::{build_read, BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::expression::ScalarExpression; use crate::planner::operator::project::ProjectOperator; use crate::planner::LogicalPlan; @@ -25,7 +25,7 @@ impl ReadExecutor for Projection { } impl Projection { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &T) { let Projection { exprs, input } = self; diff --git a/src/execution/volcano/dql/seq_scan.rs b/src/execution/volcano/dql/seq_scan.rs index 364738ca..60456bf2 100644 --- a/src/execution/volcano/dql/seq_scan.rs +++ b/src/execution/volcano/dql/seq_scan.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::{BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::planner::operator::scan::ScanOperator; use crate::storage::{Iter, Transaction}; use crate::types::tuple::Tuple; @@ -22,7 +22,7 @@ impl ReadExecutor for SeqScan { } impl SeqScan { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &T) { let ScanOperator { table_name, diff --git a/src/execution/volcano/dql/show_table.rs b/src/execution/volcano/dql/show_table.rs index 53f9fde6..616f954b 100644 --- a/src/execution/volcano/dql/show_table.rs +++ b/src/execution/volcano/dql/show_table.rs @@ -1,7 +1,7 @@ use crate::catalog::ColumnRef; use crate::catalog::{ColumnCatalog, TableMeta}; +use crate::errors::DatabaseError; use crate::execution::volcano::{BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::storage::Transaction; use crate::types::tuple::Tuple; use crate::types::value::{DataValue, ValueRef}; @@ -17,7 +17,7 @@ impl ReadExecutor for ShowTables { } impl ShowTables { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &T) { let metas = transaction.table_metas()?; diff --git a/src/execution/volcano/dql/sort.rs b/src/execution/volcano/dql/sort.rs index 9cd8dbd2..eb122264 100644 --- a/src/execution/volcano/dql/sort.rs +++ b/src/execution/volcano/dql/sort.rs @@ -1,9 +1,8 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::{build_read, BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::planner::operator::sort::{SortField, SortOperator}; use crate::planner::LogicalPlan; use crate::storage::Transaction; -use crate::types::errors::TypeError; use crate::types::tuple::Tuple; use futures_async_stream::try_stream; use itertools::Itertools; @@ -41,7 +40,7 @@ pub(crate) fn radix_sort(mut tuples: Vec<(T, Vec)>) -> Vec { pub(crate) fn sort( sort_fields: &[SortField], tuples: Vec, -) -> Result, ExecutorError> { +) -> Result, DatabaseError> { let tuples_with_keys: Vec<(Tuple, Vec)> = tuples .into_iter() .map(|tuple| { @@ -65,7 +64,7 @@ pub(crate) fn sort( } full_key.extend(key); } - Ok::<(Tuple, Vec), TypeError>((tuple, full_key)) + Ok::<(Tuple, Vec), DatabaseError>((tuple, full_key)) }) .try_collect()?; @@ -95,7 +94,7 @@ impl ReadExecutor for Sort { } impl Sort { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self, transaction: &T) { let Sort { sort_fields, diff --git a/src/execution/volcano/dql/values.rs b/src/execution/volcano/dql/values.rs index ed8eca42..d2cd4555 100644 --- a/src/execution/volcano/dql/values.rs +++ b/src/execution/volcano/dql/values.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::execution::volcano::{BoxedExecutor, ReadExecutor}; -use crate::execution::ExecutorError; use crate::planner::operator::values::ValuesOperator; use crate::storage::Transaction; use crate::types::tuple::Tuple; @@ -22,7 +22,7 @@ impl ReadExecutor for Values { } impl Values { - #[try_stream(boxed, ok = Tuple, error = ExecutorError)] + #[try_stream(boxed, ok = Tuple, error = DatabaseError)] pub async fn _execute(self) { let ValuesOperator { columns, rows } = self.op; diff --git a/src/execution/volcano/mod.rs b/src/execution/volcano/mod.rs index 4d685f53..7ee67f9a 100644 --- a/src/execution/volcano/mod.rs +++ b/src/execution/volcano/mod.rs @@ -2,6 +2,7 @@ pub(crate) mod ddl; pub(crate) mod dml; pub(crate) mod dql; +use crate::errors::DatabaseError; use crate::execution::volcano::ddl::create_table::CreateTable; use crate::execution::volcano::ddl::drop_column::DropColumn; use crate::execution::volcano::ddl::drop_table::DropTable; @@ -24,7 +25,6 @@ use crate::execution::volcano::dql::seq_scan::SeqScan; use crate::execution::volcano::dql::show_table::ShowTables; use crate::execution::volcano::dql::sort::Sort; use crate::execution::volcano::dql::values::Values; -use crate::execution::ExecutorError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::planner::LogicalPlan; use crate::storage::Transaction; @@ -35,7 +35,7 @@ use futures::TryStreamExt; use self::ddl::add_column::AddColumn; -pub type BoxedExecutor<'a> = BoxStream<'a, Result>; +pub type BoxedExecutor<'a> = BoxStream<'a, Result>; pub trait ReadExecutor { fn execute(self, transaction: &T) -> BoxedExecutor; @@ -169,7 +169,7 @@ pub fn build_write(plan: LogicalPlan, transaction: &mut T) -> Bo pub async fn try_collect<'a>( executor: &mut BoxedExecutor<'a>, -) -> Result, ExecutorError> { +) -> Result, DatabaseError> { let mut output = Vec::new(); while let Some(tuple) = executor.try_next().await? { diff --git a/src/expression/evaluator.rs b/src/expression/evaluator.rs index fcca9fe2..90b581e9 100644 --- a/src/expression/evaluator.rs +++ b/src/expression/evaluator.rs @@ -1,7 +1,7 @@ use crate::catalog::ColumnSummary; +use crate::errors::DatabaseError; use crate::expression::value_compute::{binary_op, unary_op}; use crate::expression::ScalarExpression; -use crate::types::errors::TypeError; use crate::types::tuple::Tuple; use crate::types::value::{DataValue, ValueRef}; use itertools::Itertools; @@ -13,7 +13,7 @@ lazy_static! { } impl ScalarExpression { - pub fn eval(&self, tuple: &Tuple) -> Result { + pub fn eval(&self, tuple: &Tuple) -> Result { if let Some(value) = Self::eval_with_summary(tuple, self.output_column().summary()) { return Ok(value.clone()); } diff --git a/src/expression/simplify.rs b/src/expression/simplify.rs index ed0ce390..f0bcff86 100644 --- a/src/expression/simplify.rs +++ b/src/expression/simplify.rs @@ -1,7 +1,7 @@ use crate::catalog::ColumnRef; +use crate::errors::DatabaseError; use crate::expression::value_compute::{binary_op, unary_op}; use crate::expression::{BinaryOperator, ScalarExpression, UnaryOperator}; -use crate::types::errors::TypeError; use crate::types::value::{DataValue, ValueRef, NULL_VALUE}; use crate::types::{ColumnId, LogicalType}; use ahash::RandomState; @@ -29,7 +29,7 @@ pub enum ConstantBinary { impl ConstantBinary { #[allow(dead_code)] - fn is_null(&self) -> Result { + fn is_null(&self) -> Result { match self { ConstantBinary::Scope { min, max } => { let op = |bound: &Bound| { @@ -46,11 +46,11 @@ impl ConstantBinary { Ok(matches!((min, max), (Bound::Unbounded, Bound::Unbounded))) } ConstantBinary::Eq(val) | ConstantBinary::NotEq(val) => Ok(val.is_null()), - _ => Err(TypeError::InvalidType), + _ => Err(DatabaseError::InvalidType), } } - pub fn rearrange(self) -> Result, TypeError> { + pub fn rearrange(self) -> Result, DatabaseError> { match self { ConstantBinary::Or(binaries) => { if binaries.is_empty() { @@ -61,7 +61,7 @@ impl ConstantBinary { for binary in binaries { match binary { - ConstantBinary::Or(_) => return Err(TypeError::InvalidType), + ConstantBinary::Or(_) => return Err(DatabaseError::InvalidType), ConstantBinary::And(mut and_binaries) => { condition_binaries.append(&mut and_binaries); } @@ -135,7 +135,7 @@ impl ConstantBinary { } } - pub fn scope_aggregation(&mut self) -> Result<(), TypeError> { + pub fn scope_aggregation(&mut self) -> Result<(), DatabaseError> { match self { // `Or` is allowed to contain And, `Scope`, `Eq/NotEq` // Tips: Only single-level `And` @@ -199,7 +199,7 @@ impl ConstantBinary { // Tips: It only makes sense if the condition is and aggregation fn and_scope_aggregation( binaries: &Vec, - ) -> Result, TypeError> { + ) -> Result, DatabaseError> { if binaries.is_empty() { return Ok(vec![]); } @@ -243,7 +243,7 @@ impl ConstantBinary { let _ = eq_set.remove(val); } ConstantBinary::Or(_) | ConstantBinary::And(_) => { - return Err(TypeError::InvalidType) + return Err(DatabaseError::InvalidType) } } } @@ -480,11 +480,11 @@ impl ScalarExpression { } } - pub fn simplify(&mut self) -> Result<(), TypeError> { + pub fn simplify(&mut self) -> Result<(), DatabaseError> { self._simplify(&mut Vec::new()) } - pub fn constant_calculation(&mut self) -> Result<(), TypeError> { + pub fn constant_calculation(&mut self) -> Result<(), DatabaseError> { match self { ScalarExpression::Unary { expr, op, .. } => { expr.constant_calculation()?; @@ -527,7 +527,7 @@ impl ScalarExpression { } // Tips: Indirect expressions like `ScalarExpression::Alias` will be lost - fn _simplify(&mut self, replaces: &mut Vec) -> Result<(), TypeError> { + fn _simplify(&mut self, replaces: &mut Vec) -> Result<(), DatabaseError> { match self { ScalarExpression::Binary { left_expr, @@ -677,7 +677,7 @@ impl ScalarExpression { left_expr: &mut Box, right_expr: &mut Box, op: &mut BinaryOperator, - ) -> Result<(), TypeError> { + ) -> Result<(), DatabaseError> { left_expr._simplify(replaces)?; if Self::is_arithmetic(op) { @@ -794,7 +794,10 @@ impl ScalarExpression { /// The And and Or of ConstantBinary are concerned with the data range that needs to be aggregated. /// - `ConstantBinary::And`: Aggregate the minimum range of all conditions in and /// - `ConstantBinary::Or`: Rearrange and sort the range of each OR data - pub fn convert_binary(&self, col_id: &ColumnId) -> Result, TypeError> { + pub fn convert_binary( + &self, + col_id: &ColumnId, + ) -> Result, DatabaseError> { match self { ScalarExpression::Binary { left_expr, @@ -997,16 +1000,16 @@ impl fmt::Display for ConstantBinary { #[cfg(test)] mod test { use crate::catalog::{ColumnCatalog, ColumnDesc, ColumnSummary}; + use crate::errors::DatabaseError; use crate::expression::simplify::ConstantBinary; use crate::expression::{BinaryOperator, ScalarExpression}; - use crate::types::errors::TypeError; use crate::types::value::DataValue; use crate::types::LogicalType; use std::collections::Bound; use std::sync::Arc; #[test] - fn test_convert_binary_simple() -> Result<(), TypeError> { + fn test_convert_binary_simple() -> Result<(), DatabaseError> { let col_1 = Arc::new(ColumnCatalog { summary: ColumnSummary { id: Some(0), @@ -1118,7 +1121,7 @@ mod test { } #[test] - fn test_scope_aggregation_eq_noteq() -> Result<(), TypeError> { + fn test_scope_aggregation_eq_noteq() -> Result<(), DatabaseError> { let val_0 = Arc::new(DataValue::Int32(Some(0))); let val_1 = Arc::new(DataValue::Int32(Some(1))); let val_2 = Arc::new(DataValue::Int32(Some(2))); @@ -1139,7 +1142,7 @@ mod test { } #[test] - fn test_scope_aggregation_eq_noteq_cover() -> Result<(), TypeError> { + fn test_scope_aggregation_eq_noteq_cover() -> Result<(), DatabaseError> { let val_0 = Arc::new(DataValue::Int32(Some(0))); let val_1 = Arc::new(DataValue::Int32(Some(1))); let val_2 = Arc::new(DataValue::Int32(Some(2))); @@ -1164,7 +1167,7 @@ mod test { } #[test] - fn test_scope_aggregation_scope() -> Result<(), TypeError> { + fn test_scope_aggregation_scope() -> Result<(), DatabaseError> { let val_0 = Arc::new(DataValue::Int32(Some(0))); let val_1 = Arc::new(DataValue::Int32(Some(1))); let val_2 = Arc::new(DataValue::Int32(Some(2))); @@ -1207,7 +1210,7 @@ mod test { } #[test] - fn test_scope_aggregation_mixed() -> Result<(), TypeError> { + fn test_scope_aggregation_mixed() -> Result<(), DatabaseError> { let val_0 = Arc::new(DataValue::Int32(Some(0))); let val_1 = Arc::new(DataValue::Int32(Some(1))); let val_2 = Arc::new(DataValue::Int32(Some(2))); @@ -1250,7 +1253,7 @@ mod test { } #[test] - fn test_scope_aggregation_or() -> Result<(), TypeError> { + fn test_scope_aggregation_or() -> Result<(), DatabaseError> { let val_0 = Arc::new(DataValue::Int32(Some(0))); let val_1 = Arc::new(DataValue::Int32(Some(1))); let val_2 = Arc::new(DataValue::Int32(Some(2))); @@ -1289,7 +1292,7 @@ mod test { } #[test] - fn test_scope_aggregation_or_unbounded() -> Result<(), TypeError> { + fn test_scope_aggregation_or_unbounded() -> Result<(), DatabaseError> { let val_0 = Arc::new(DataValue::Int32(Some(0))); let val_1 = Arc::new(DataValue::Int32(Some(1))); let val_2 = Arc::new(DataValue::Int32(Some(2))); @@ -1322,7 +1325,7 @@ mod test { } #[test] - fn test_scope_aggregation_or_lower_unbounded() -> Result<(), TypeError> { + fn test_scope_aggregation_or_lower_unbounded() -> Result<(), DatabaseError> { let val_0 = Arc::new(DataValue::Int32(Some(2))); let val_1 = Arc::new(DataValue::Int32(Some(3))); @@ -1359,7 +1362,7 @@ mod test { } #[test] - fn test_scope_aggregation_or_upper_unbounded() -> Result<(), TypeError> { + fn test_scope_aggregation_or_upper_unbounded() -> Result<(), DatabaseError> { let val_0 = Arc::new(DataValue::Int32(Some(2))); let val_1 = Arc::new(DataValue::Int32(Some(3))); @@ -1396,7 +1399,7 @@ mod test { } #[test] - fn test_rearrange() -> Result<(), TypeError> { + fn test_rearrange() -> Result<(), DatabaseError> { let val_0 = Arc::new(DataValue::Int32(Some(0))); let val_1 = Arc::new(DataValue::Int32(Some(1))); let val_2 = Arc::new(DataValue::Int32(Some(2))); diff --git a/src/expression/value_compute.rs b/src/expression/value_compute.rs index 18b57c54..59ad2310 100644 --- a/src/expression/value_compute.rs +++ b/src/expression/value_compute.rs @@ -1,5 +1,5 @@ +use crate::errors::DatabaseError; use crate::expression::{BinaryOperator, UnaryOperator}; -use crate::types::errors::TypeError; use crate::types::value::DataValue; use crate::types::LogicalType; use regex::Regex; @@ -18,7 +18,7 @@ fn unpack_utf8(value: DataValue) -> Option { } } -pub fn unary_op(value: &DataValue, op: &UnaryOperator) -> Result { +pub fn unary_op(value: &DataValue, op: &UnaryOperator) -> Result { let mut value_type = value.logical_type(); let mut value = value.clone(); @@ -55,7 +55,7 @@ pub fn unary_op(value: &DataValue, op: &UnaryOperator) -> Result unreachable!(), } } else { - Err(TypeError::InvalidType) + Err(DatabaseError::InvalidType) } } @@ -180,7 +180,7 @@ pub fn binary_op( left: &DataValue, right: &DataValue, op: &BinaryOperator, -) -> Result { +) -> Result { if matches!(op, BinaryOperator::Like | BinaryOperator::NotLike) { let value_option = unpack_utf8(left.clone().cast(&LogicalType::Varchar(None))?); let pattern_option = unpack_utf8(right.clone().cast(&LogicalType::Varchar(None))?); @@ -523,8 +523,8 @@ pub fn binary_op( _ => todo!("unsupported operator"), } } - LogicalType::SqlNull => return Err(TypeError::NotNull), - LogicalType::Invalid => return Err(TypeError::InvalidType), + LogicalType::SqlNull => return Err(DatabaseError::NotNull), + LogicalType::Invalid => return Err(DatabaseError::InvalidType), }; Ok(value) @@ -532,13 +532,13 @@ pub fn binary_op( #[cfg(test)] mod test { + use crate::errors::DatabaseError; use crate::expression::value_compute::binary_op; use crate::expression::BinaryOperator; - use crate::types::errors::TypeError; use crate::types::value::DataValue; #[test] - fn test_binary_op_arithmetic_plus() -> Result<(), TypeError> { + fn test_binary_op_arithmetic_plus() -> Result<(), DatabaseError> { let plus_i32_1 = binary_op( &DataValue::Int32(None), &DataValue::Int32(None), @@ -618,7 +618,7 @@ mod test { } #[test] - fn test_binary_op_arithmetic_minus() -> Result<(), TypeError> { + fn test_binary_op_arithmetic_minus() -> Result<(), DatabaseError> { let minus_i32_1 = binary_op( &DataValue::Int32(None), &DataValue::Int32(None), @@ -698,7 +698,7 @@ mod test { } #[test] - fn test_binary_op_arithmetic_multiply() -> Result<(), TypeError> { + fn test_binary_op_arithmetic_multiply() -> Result<(), DatabaseError> { let multiply_i32_1 = binary_op( &DataValue::Int32(None), &DataValue::Int32(None), @@ -778,7 +778,7 @@ mod test { } #[test] - fn test_binary_op_arithmetic_divide() -> Result<(), TypeError> { + fn test_binary_op_arithmetic_divide() -> Result<(), DatabaseError> { let divide_i32_1 = binary_op( &DataValue::Int32(None), &DataValue::Int32(None), @@ -858,7 +858,7 @@ mod test { } #[test] - fn test_binary_op_cast() -> Result<(), TypeError> { + fn test_binary_op_cast() -> Result<(), DatabaseError> { let i32_cast_1 = binary_op( &DataValue::Int32(Some(1)), &DataValue::Int8(Some(1)), @@ -902,7 +902,7 @@ mod test { } #[test] - fn test_binary_op_i32_compare() -> Result<(), TypeError> { + fn test_binary_op_i32_compare() -> Result<(), DatabaseError> { assert_eq!( binary_op( &DataValue::Int32(Some(1)), @@ -1014,7 +1014,7 @@ mod test { } #[test] - fn test_binary_op_i64_compare() -> Result<(), TypeError> { + fn test_binary_op_i64_compare() -> Result<(), DatabaseError> { assert_eq!( binary_op( &DataValue::Int64(Some(1)), @@ -1126,7 +1126,7 @@ mod test { } #[test] - fn test_binary_op_f64_compare() -> Result<(), TypeError> { + fn test_binary_op_f64_compare() -> Result<(), DatabaseError> { assert_eq!( binary_op( &DataValue::Float64(Some(1.0)), @@ -1238,7 +1238,7 @@ mod test { } #[test] - fn test_binary_op_f32_compare() -> Result<(), TypeError> { + fn test_binary_op_f32_compare() -> Result<(), DatabaseError> { assert_eq!( binary_op( &DataValue::Float32(Some(1.0)), @@ -1350,7 +1350,7 @@ mod test { } #[test] - fn test_binary_op_bool_compare() -> Result<(), TypeError> { + fn test_binary_op_bool_compare() -> Result<(), DatabaseError> { assert_eq!( binary_op( &DataValue::Boolean(Some(true)), @@ -1423,7 +1423,7 @@ mod test { } #[test] - fn test_binary_op_utf8_compare() -> Result<(), TypeError> { + fn test_binary_op_utf8_compare() -> Result<(), DatabaseError> { assert_eq!( binary_op( &DataValue::Utf8(Some("a".to_string())), diff --git a/src/lib.rs b/src/lib.rs index d158657c..0d125f5c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,6 +9,7 @@ extern crate core; pub mod binder; pub mod catalog; pub mod db; +pub mod errors; pub mod execution; pub mod expression; #[cfg(feature = "marcos")] diff --git a/src/optimizer/core/column_meta.rs b/src/optimizer/core/column_meta.rs index 6b8aa18e..04ca1204 100644 --- a/src/optimizer/core/column_meta.rs +++ b/src/optimizer/core/column_meta.rs @@ -1,8 +1,8 @@ use crate::catalog::TableName; +use crate::errors::DatabaseError; use crate::expression::simplify::ConstantBinary; use crate::optimizer::core::cm_sketch::CountMinSketch; use crate::optimizer::core::histogram::Histogram; -use crate::optimizer::OptimizerError; use crate::storage::Transaction; use crate::types::value::DataValue; use crate::types::{ColumnId, LogicalType}; @@ -25,7 +25,7 @@ impl<'a, T: Transaction> ColumnMetaLoader<'a, T> { ColumnMetaLoader { cache, tx } } - pub fn load(&self, table_name: TableName) -> Result<&Vec, OptimizerError> { + pub fn load(&self, table_name: TableName) -> Result<&Vec, DatabaseError> { let option = self.cache.get(&table_name); return if let Some(column_metas) = option { @@ -81,7 +81,7 @@ impl ColumnMeta { count } - pub fn to_file(&self, path: impl AsRef) -> Result<(), OptimizerError> { + pub fn to_file(&self, path: impl AsRef) -> Result<(), DatabaseError> { let mut file = OpenOptions::new() .create(true) .write(true) @@ -93,7 +93,7 @@ impl ColumnMeta { Ok(()) } - pub fn from_file(path: impl AsRef) -> Result { + pub fn from_file(path: impl AsRef) -> Result { let mut file = OpenOptions::new() .create(true) .write(true) @@ -110,9 +110,9 @@ impl ColumnMeta { #[cfg(test)] mod tests { use crate::catalog::{ColumnCatalog, ColumnDesc, ColumnSummary}; + use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMeta; use crate::optimizer::core::histogram::HistogramBuilder; - use crate::optimizer::OptimizerError; use crate::types::value::DataValue; use crate::types::LogicalType; use std::sync::Arc; @@ -137,7 +137,7 @@ mod tests { } #[test] - fn test_to_file_and_from_file() -> Result<(), OptimizerError> { + fn test_to_file_and_from_file() -> Result<(), DatabaseError> { let temp_dir = TempDir::new().expect("unable to create temporary working directory"); let column = int32_column(); diff --git a/src/optimizer/core/histogram.rs b/src/optimizer/core/histogram.rs index 9d968fc8..635c02c6 100644 --- a/src/optimizer/core/histogram.rs +++ b/src/optimizer/core/histogram.rs @@ -1,8 +1,8 @@ use crate::catalog::ColumnCatalog; +use crate::errors::DatabaseError; use crate::execution::volcano::dql::sort::radix_sort; use crate::expression::simplify::ConstantBinary; use crate::optimizer::core::cm_sketch::CountMinSketch; -use crate::optimizer::OptimizerError; use crate::types::value::{DataValue, ValueRef}; use crate::types::{ColumnId, LogicalType}; use ordered_float::OrderedFloat; @@ -48,9 +48,9 @@ struct Bucket { } impl HistogramBuilder { - pub fn new(column: &ColumnCatalog, capacity: Option) -> Result { + pub fn new(column: &ColumnCatalog, capacity: Option) -> Result { Ok(Self { - column_id: column.id().ok_or(OptimizerError::OwnerLessColumn)?, + column_id: column.id().ok_or(DatabaseError::OwnerLessColumn)?, data_type: *column.datatype(), null_count: 0, values: capacity @@ -60,7 +60,7 @@ impl HistogramBuilder { }) } - pub fn append(&mut self, value: &ValueRef) -> Result<(), OptimizerError> { + pub fn append(&mut self, value: &ValueRef) -> Result<(), DatabaseError> { if value.is_null() { self.null_count += 1; } else { @@ -78,9 +78,9 @@ impl HistogramBuilder { pub fn build( self, number_of_buckets: usize, - ) -> Result<(Histogram, CountMinSketch), OptimizerError> { + ) -> Result<(Histogram, CountMinSketch), DatabaseError> { if number_of_buckets > self.values.len() { - return Err(OptimizerError::TooManyBuckets); + return Err(DatabaseError::TooManyBuckets); } let mut sketch = CountMinSketch::new(self.values.len(), 0.95, 1.0); @@ -437,9 +437,9 @@ impl Bucket { #[cfg(test)] mod tests { use crate::catalog::{ColumnCatalog, ColumnDesc, ColumnSummary}; + use crate::errors::DatabaseError; use crate::expression::simplify::ConstantBinary; use crate::optimizer::core::histogram::{Bucket, HistogramBuilder}; - use crate::optimizer::OptimizerError; use crate::types::value::DataValue; use crate::types::LogicalType; use std::ops::Bound; @@ -464,7 +464,7 @@ mod tests { } #[test] - fn test_sort_tuples_on_histogram() -> Result<(), OptimizerError> { + fn test_sort_tuples_on_histogram() -> Result<(), DatabaseError> { let column = int32_column(); let mut builder = HistogramBuilder::new(&column, Some(15))?; @@ -490,7 +490,7 @@ mod tests { builder.append(&Arc::new(DataValue::Null))?; builder.append(&Arc::new(DataValue::Int32(None)))?; - // assert!(matches!(builder.build(10), Err(OptimizerError::TooManyBuckets))); + // assert!(matches!(builder.build(10), Err(DataBaseError::TooManyBuckets))); let (histogram, _) = builder.build(5)?; @@ -532,7 +532,7 @@ mod tests { } #[test] - fn test_rev_sort_tuples_on_histogram() -> Result<(), OptimizerError> { + fn test_rev_sort_tuples_on_histogram() -> Result<(), DatabaseError> { let column = int32_column(); let mut builder = HistogramBuilder::new(&column, Some(15))?; @@ -598,7 +598,7 @@ mod tests { } #[test] - fn test_non_average_on_histogram() -> Result<(), OptimizerError> { + fn test_non_average_on_histogram() -> Result<(), DatabaseError> { let column = int32_column(); let mut builder = HistogramBuilder::new(&column, Some(15))?; @@ -659,7 +659,7 @@ mod tests { } #[test] - fn test_collect_count() -> Result<(), OptimizerError> { + fn test_collect_count() -> Result<(), DatabaseError> { let column = int32_column(); let mut builder = HistogramBuilder::new(&column, Some(15))?; diff --git a/src/optimizer/core/memo.rs b/src/optimizer/core/memo.rs index e5307adb..65d109ba 100644 --- a/src/optimizer/core/memo.rs +++ b/src/optimizer/core/memo.rs @@ -1,3 +1,4 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::pattern::PatternMatcher; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; @@ -5,7 +6,6 @@ use crate::optimizer::heuristic::batch::HepMatchOrder; use crate::optimizer::heuristic::graph::{HepGraph, HepNodeId}; use crate::optimizer::heuristic::matcher::HepMatcher; use crate::optimizer::rule::implementation::ImplementationRuleImpl; -use crate::optimizer::OptimizerError; use crate::planner::operator::PhysicalOption; use crate::storage::Transaction; use std::cmp::Ordering; @@ -38,12 +38,12 @@ impl Memo { graph: &HepGraph, loader: &ColumnMetaLoader<'_, T>, implementations: &[ImplementationRuleImpl], - ) -> Result { + ) -> Result { let node_count = graph.node_count(); let mut groups = HashMap::new(); if node_count == 0 { - return Err(OptimizerError::EmptyPlan); + return Err(DatabaseError::EmptyPlan); } for node_id in graph.nodes_iter(HepMatchOrder::TopDown, None) { @@ -81,7 +81,8 @@ impl Memo { #[cfg(test)] mod tests { use crate::binder::{Binder, BinderContext}; - use crate::db::{Database, DatabaseError}; + use crate::db::Database; + use crate::errors::DatabaseError; use crate::optimizer::core::memo::Memo; use crate::optimizer::heuristic::batch::HepBatchStrategy; use crate::optimizer::heuristic::graph::HepGraph; diff --git a/src/optimizer/core/rule.rs b/src/optimizer/core/rule.rs index b2ee1a6d..106a8923 100644 --- a/src/optimizer/core/rule.rs +++ b/src/optimizer/core/rule.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::GroupExpression; use crate::optimizer::core::pattern::Pattern; use crate::optimizer::heuristic::graph::{HepGraph, HepNodeId}; -use crate::optimizer::OptimizerError; use crate::planner::operator::Operator; use crate::storage::Transaction; @@ -12,7 +12,7 @@ pub trait MatchPattern { } pub trait NormalizationRule: MatchPattern { - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError>; + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError>; } pub trait ImplementationRule: MatchPattern { @@ -21,5 +21,5 @@ pub trait ImplementationRule: MatchPattern { op: &Operator, loader: &ColumnMetaLoader, group_expr: &mut GroupExpression, - ) -> Result<(), OptimizerError>; + ) -> Result<(), DatabaseError>; } diff --git a/src/optimizer/heuristic/graph.rs b/src/optimizer/heuristic/graph.rs index bff31450..50cc8276 100644 --- a/src/optimizer/heuristic/graph.rs +++ b/src/optimizer/heuristic/graph.rs @@ -207,13 +207,13 @@ impl HepGraph { #[cfg(test)] mod tests { use crate::binder::test::select_sql_run; - use crate::execution::ExecutorError; + use crate::errors::DatabaseError; use crate::optimizer::heuristic::graph::{HepGraph, HepNodeId}; use crate::planner::operator::Operator; use petgraph::stable_graph::{EdgeIndex, NodeIndex}; #[tokio::test] - async fn test_graph_for_plan() -> Result<(), ExecutorError> { + async fn test_graph_for_plan() -> Result<(), DatabaseError> { let plan = select_sql_run("select * from t1 left join t2 on c1 = c3").await?; let graph = HepGraph::new(plan); @@ -235,7 +235,7 @@ mod tests { } #[tokio::test] - async fn test_graph_add_node() -> Result<(), ExecutorError> { + async fn test_graph_add_node() -> Result<(), DatabaseError> { let plan = select_sql_run("select * from t1 left join t2 on c1 = c3").await?; let mut graph = HepGraph::new(plan); @@ -263,7 +263,7 @@ mod tests { } #[tokio::test] - async fn test_graph_replace_node() -> Result<(), ExecutorError> { + async fn test_graph_replace_node() -> Result<(), DatabaseError> { let plan = select_sql_run("select * from t1 left join t2 on c1 = c3").await?; let mut graph = HepGraph::new(plan); @@ -275,7 +275,7 @@ mod tests { } #[tokio::test] - async fn test_graph_remove_middle_node_by_single() -> Result<(), ExecutorError> { + async fn test_graph_remove_middle_node_by_single() -> Result<(), DatabaseError> { let plan = select_sql_run("select * from t1 left join t2 on c1 = c3").await?; let mut graph = HepGraph::new(plan); @@ -294,7 +294,7 @@ mod tests { } #[tokio::test] - async fn test_graph_remove_middle_node_with_childrens() -> Result<(), ExecutorError> { + async fn test_graph_remove_middle_node_with_childrens() -> Result<(), DatabaseError> { let plan = select_sql_run("select * from t1 left join t2 on c1 = c3").await?; let mut graph = HepGraph::new(plan); @@ -306,7 +306,7 @@ mod tests { } #[tokio::test] - async fn test_graph_swap_node() -> Result<(), ExecutorError> { + async fn test_graph_swap_node() -> Result<(), DatabaseError> { let plan = select_sql_run("select * from t1 left join t2 on c1 = c3").await?; let mut graph = HepGraph::new(plan); @@ -325,7 +325,7 @@ mod tests { } #[tokio::test] - async fn test_graph_add_root() -> Result<(), ExecutorError> { + async fn test_graph_add_root() -> Result<(), DatabaseError> { let plan = select_sql_run("select * from t1 left join t2 on c1 = c3").await?; let mut graph = HepGraph::new(plan); @@ -341,7 +341,7 @@ mod tests { } #[tokio::test] - async fn test_graph_to_plan() -> Result<(), ExecutorError> { + async fn test_graph_to_plan() -> Result<(), DatabaseError> { let plan = select_sql_run("select * from t1 left join t2 on c1 = c3").await?; let graph = HepGraph::new(plan.clone()); diff --git a/src/optimizer/heuristic/matcher.rs b/src/optimizer/heuristic/matcher.rs index f31d1682..5381b174 100644 --- a/src/optimizer/heuristic/matcher.rs +++ b/src/optimizer/heuristic/matcher.rs @@ -58,7 +58,7 @@ impl PatternMatcher for HepMatcher<'_, '_> { #[cfg(test)] mod tests { use crate::binder::test::select_sql_run; - use crate::execution::ExecutorError; + use crate::errors::DatabaseError; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate, PatternMatcher}; use crate::optimizer::heuristic::graph::{HepGraph, HepNodeId}; use crate::optimizer::heuristic::matcher::HepMatcher; @@ -66,7 +66,7 @@ mod tests { use crate::planner::LogicalPlan; #[tokio::test] - async fn test_predicate() -> Result<(), ExecutorError> { + async fn test_predicate() -> Result<(), DatabaseError> { let plan = select_sql_run("select * from t1").await?; let graph = HepGraph::new(plan.clone()); diff --git a/src/optimizer/heuristic/optimizer.rs b/src/optimizer/heuristic/optimizer.rs index 2fa7adb8..415d8e3b 100644 --- a/src/optimizer/heuristic/optimizer.rs +++ b/src/optimizer/heuristic/optimizer.rs @@ -1,3 +1,4 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::Memo; use crate::optimizer::core::pattern::PatternMatcher; @@ -7,7 +8,6 @@ use crate::optimizer::heuristic::graph::{HepGraph, HepNodeId}; use crate::optimizer::heuristic::matcher::HepMatcher; use crate::optimizer::rule::implementation::ImplementationRuleImpl; use crate::optimizer::rule::normalization::NormalizationRuleImpl; -use crate::optimizer::OptimizerError; use crate::planner::LogicalPlan; use crate::storage::Transaction; use std::ops::Not; @@ -45,7 +45,7 @@ impl HepOptimizer { pub fn find_best( mut self, loader: Option<&ColumnMetaLoader<'_, T>>, - ) -> Result { + ) -> Result { for ref batch in self.batches { let mut batch_over = false; let mut iteration = 1usize; @@ -70,7 +70,7 @@ impl HepOptimizer { Ok(self .graph .to_plan(memo.as_ref()) - .ok_or(OptimizerError::EmptyPlan)?) + .ok_or(DatabaseError::EmptyPlan)?) } fn apply_batch( @@ -78,7 +78,7 @@ impl HepOptimizer { HepBatch { rules, strategy, .. }: &HepBatch, - ) -> Result { + ) -> Result { let before_version = graph.version; for rule in rules { @@ -96,7 +96,7 @@ impl HepOptimizer { graph: &mut HepGraph, rule: &NormalizationRuleImpl, node_id: HepNodeId, - ) -> Result { + ) -> Result { let before_version = graph.version; if HepMatcher::new(rule.pattern(), node_id, graph).match_opt_expr() { diff --git a/src/optimizer/mod.rs b/src/optimizer/mod.rs index 5135e07a..0165dc25 100644 --- a/src/optimizer/mod.rs +++ b/src/optimizer/mod.rs @@ -1,46 +1,5 @@ -use crate::storage::StorageError; -use crate::types::errors::TypeError; -use kip_db::KernelError; - /// The architecture and some components, /// such as (/core) are referenced from sqlrs pub mod core; pub mod heuristic; pub mod rule; - -#[derive(thiserror::Error, Debug)] -pub enum OptimizerError { - #[error("type error")] - TypeError( - #[source] - #[from] - TypeError, - ), - #[error("plan is empty")] - EmptyPlan, - #[error("this column must belong to a table")] - OwnerLessColumn, - #[error("there are more buckets than elements")] - TooManyBuckets, - #[error("io: {0}")] - IO( - #[source] - #[from] - std::io::Error, - ), - #[error("cache error: {0}")] - Cache( - #[source] - #[from] - KernelError, - ), - /// Serialization or deserialization error - #[error(transparent)] - SerdeBinCode(#[from] Box), - #[error("storage error: {0}")] - Storage( - #[source] - #[from] - StorageError, - ), -} diff --git a/src/optimizer/rule/implementation/ddl/add_column.rs b/src/optimizer/rule/implementation/ddl/add_column.rs index 47eac517..3b672735 100644 --- a/src/optimizer/rule/implementation/ddl/add_column.rs +++ b/src/optimizer/rule/implementation/ddl/add_column.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/ddl/create_table.rs b/src/optimizer/rule/implementation/ddl/create_table.rs index fc700be5..5d6a6ec6 100644 --- a/src/optimizer/rule/implementation/ddl/create_table.rs +++ b/src/optimizer/rule/implementation/ddl/create_table.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/ddl/drop_column.rs b/src/optimizer/rule/implementation/ddl/drop_column.rs index c293f5c2..41641345 100644 --- a/src/optimizer/rule/implementation/ddl/drop_column.rs +++ b/src/optimizer/rule/implementation/ddl/drop_column.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/ddl/drop_table.rs b/src/optimizer/rule/implementation/ddl/drop_table.rs index 5bd0ae18..2f7b0a87 100644 --- a/src/optimizer/rule/implementation/ddl/drop_table.rs +++ b/src/optimizer/rule/implementation/ddl/drop_table.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/ddl/truncate.rs b/src/optimizer/rule/implementation/ddl/truncate.rs index 2f77f06d..2088fcc9 100644 --- a/src/optimizer/rule/implementation/ddl/truncate.rs +++ b/src/optimizer/rule/implementation/ddl/truncate.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dml/analyze.rs b/src/optimizer/rule/implementation/dml/analyze.rs index dd522d53..18524212 100644 --- a/src/optimizer/rule/implementation/dml/analyze.rs +++ b/src/optimizer/rule/implementation/dml/analyze.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dml/copy_from_file.rs b/src/optimizer/rule/implementation/dml/copy_from_file.rs index 13fc0c75..9365df5a 100644 --- a/src/optimizer/rule/implementation/dml/copy_from_file.rs +++ b/src/optimizer/rule/implementation/dml/copy_from_file.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dml/copy_to_file.rs b/src/optimizer/rule/implementation/dml/copy_to_file.rs index d894bbb8..051d6360 100644 --- a/src/optimizer/rule/implementation/dml/copy_to_file.rs +++ b/src/optimizer/rule/implementation/dml/copy_to_file.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dml/delete.rs b/src/optimizer/rule/implementation/dml/delete.rs index 9adf3f77..2191a4e4 100644 --- a/src/optimizer/rule/implementation/dml/delete.rs +++ b/src/optimizer/rule/implementation/dml/delete.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dml/insert.rs b/src/optimizer/rule/implementation/dml/insert.rs index 828bf8df..274b2a14 100644 --- a/src/optimizer/rule/implementation/dml/insert.rs +++ b/src/optimizer/rule/implementation/dml/insert.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dml/update.rs b/src/optimizer/rule/implementation/dml/update.rs index ab0b56d3..7e2488f2 100644 --- a/src/optimizer/rule/implementation/dml/update.rs +++ b/src/optimizer/rule/implementation/dml/update.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dql/aggregate.rs b/src/optimizer/rule/implementation/dql/aggregate.rs index cbb8c3d4..aece1faf 100644 --- a/src/optimizer/rule/implementation/dql/aggregate.rs +++ b/src/optimizer/rule/implementation/dql/aggregate.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dql/dummy.rs b/src/optimizer/rule/implementation/dql/dummy.rs index c616c55c..cc211919 100644 --- a/src/optimizer/rule/implementation/dql/dummy.rs +++ b/src/optimizer/rule/implementation/dql/dummy.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dql/filter.rs b/src/optimizer/rule/implementation/dql/filter.rs index 506ac2fd..2946d4fb 100644 --- a/src/optimizer/rule/implementation/dql/filter.rs +++ b/src/optimizer/rule/implementation/dql/filter.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dql/join.rs b/src/optimizer/rule/implementation/dql/join.rs index 68f9f0da..59d62450 100644 --- a/src/optimizer/rule/implementation/dql/join.rs +++ b/src/optimizer/rule/implementation/dql/join.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dql/limit.rs b/src/optimizer/rule/implementation/dql/limit.rs index 4936c6db..05c785f3 100644 --- a/src/optimizer/rule/implementation/dql/limit.rs +++ b/src/optimizer/rule/implementation/dql/limit.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dql/projection.rs b/src/optimizer/rule/implementation/dql/projection.rs index 66208208..ca4bc6a8 100644 --- a/src/optimizer/rule/implementation/dql/projection.rs +++ b/src/optimizer/rule/implementation/dql/projection.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dql/scan.rs b/src/optimizer/rule/implementation/dql/scan.rs index 54a4814d..58147905 100644 --- a/src/optimizer/rule/implementation/dql/scan.rs +++ b/src/optimizer/rule/implementation/dql/scan.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::{ColumnMeta, ColumnMetaLoader}; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::storage::Transaction; use crate::types::ColumnId; @@ -32,7 +32,7 @@ impl ImplementationRule for SeqScanImplementation { op: &Operator, loader: &ColumnMetaLoader, group_expr: &mut GroupExpression, - ) -> Result<(), OptimizerError> { + ) -> Result<(), DatabaseError> { if let Operator::Scan(scan_op) = op { let column_metas = loader.load(scan_op.table_name.clone())?; let mut cost = None; @@ -67,7 +67,7 @@ impl ImplementationRule for IndexScanImplementation { op: &Operator, loader: &ColumnMetaLoader<'_, T>, group_expr: &mut GroupExpression, - ) -> Result<(), OptimizerError> { + ) -> Result<(), DatabaseError> { if let Operator::Scan(scan_op) = op { let column_metas = loader.load(scan_op.table_name.clone())?; for index_info in scan_op.index_infos.iter() { diff --git a/src/optimizer/rule/implementation/dql/sort.rs b/src/optimizer/rule/implementation/dql/sort.rs index 95688f04..bd0a2ae2 100644 --- a/src/optimizer/rule/implementation/dql/sort.rs +++ b/src/optimizer/rule/implementation/dql/sort.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/dql/values.rs b/src/optimizer/rule/implementation/dql/values.rs index df0f170a..32b81874 100644 --- a/src/optimizer/rule/implementation/dql/values.rs +++ b/src/optimizer/rule/implementation/dql/values.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::{Expression, GroupExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{ImplementationRule, MatchPattern}; -use crate::optimizer::OptimizerError; use crate::planner::operator::{Operator, PhysicalOption}; use crate::single_mapping; use crate::storage::Transaction; diff --git a/src/optimizer/rule/implementation/marcos.rs b/src/optimizer/rule/implementation/marcos.rs index d61326c5..5e5faa4d 100644 --- a/src/optimizer/rule/implementation/marcos.rs +++ b/src/optimizer/rule/implementation/marcos.rs @@ -13,7 +13,7 @@ macro_rules! single_mapping { _: &Operator, _: &ColumnMetaLoader<'_, T>, group_expr: &mut GroupExpression, - ) -> Result<(), OptimizerError> { + ) -> Result<(), DatabaseError> { //TODO: CostModel group_expr.append_expr(Expression { op: $option, diff --git a/src/optimizer/rule/implementation/mod.rs b/src/optimizer/rule/implementation/mod.rs index f7318cca..9fc422e7 100644 --- a/src/optimizer/rule/implementation/mod.rs +++ b/src/optimizer/rule/implementation/mod.rs @@ -3,6 +3,7 @@ pub(crate) mod dml; pub(crate) mod dql; pub(crate) mod marcos; +use crate::errors::DatabaseError; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::optimizer::core::memo::GroupExpression; use crate::optimizer::core::pattern::Pattern; @@ -31,7 +32,6 @@ use crate::optimizer::rule::implementation::dql::scan::{ }; use crate::optimizer::rule::implementation::dql::sort::SortImplementation; use crate::optimizer::rule::implementation::dql::values::ValuesImplementation; -use crate::optimizer::OptimizerError; use crate::planner::operator::Operator; use crate::storage::Transaction; @@ -99,7 +99,7 @@ impl ImplementationRule for ImplementationRuleImpl { operator: &Operator, loader: &ColumnMetaLoader<'_, T>, group_expr: &mut GroupExpression, - ) -> Result<(), OptimizerError> { + ) -> Result<(), DatabaseError> { match self { ImplementationRuleImpl::GroupByAggregate => { GroupByAggregateImplementation.to_expression(operator, loader, group_expr)? diff --git a/src/optimizer/rule/normalization/column_pruning.rs b/src/optimizer/rule/normalization/column_pruning.rs index 2434536b..7b30a829 100644 --- a/src/optimizer/rule/normalization/column_pruning.rs +++ b/src/optimizer/rule/normalization/column_pruning.rs @@ -1,10 +1,10 @@ use crate::catalog::{ColumnRef, ColumnSummary}; +use crate::errors::DatabaseError; use crate::expression::agg::AggKind; use crate::expression::ScalarExpression; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{MatchPattern, NormalizationRule}; use crate::optimizer::heuristic::graph::{HepGraph, HepNodeId}; -use crate::optimizer::OptimizerError; use crate::planner::operator::Operator; use crate::types::value::DataValue; use crate::types::LogicalType; @@ -161,7 +161,7 @@ impl MatchPattern for ColumnPruning { } impl NormalizationRule for ColumnPruning { - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { Self::_apply(&mut HashSet::new(), true, node_id, graph); // mark changed to skip this rule batch graph.version += 1; @@ -173,7 +173,7 @@ impl NormalizationRule for ColumnPruning { #[cfg(test)] mod tests { use crate::binder::test::select_sql_run; - use crate::db::DatabaseError; + use crate::errors::DatabaseError; use crate::optimizer::heuristic::batch::HepBatchStrategy; use crate::optimizer::heuristic::optimizer::HepOptimizer; use crate::optimizer::rule::normalization::NormalizationRuleImpl; diff --git a/src/optimizer/rule/normalization/combine_operators.rs b/src/optimizer/rule/normalization/combine_operators.rs index df6cb87f..419122a9 100644 --- a/src/optimizer/rule/normalization/combine_operators.rs +++ b/src/optimizer/rule/normalization/combine_operators.rs @@ -1,9 +1,9 @@ +use crate::errors::DatabaseError; use crate::expression::{BinaryOperator, ScalarExpression}; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{MatchPattern, NormalizationRule}; use crate::optimizer::heuristic::graph::{HepGraph, HepNodeId}; use crate::optimizer::rule::normalization::is_subset_exprs; -use crate::optimizer::OptimizerError; use crate::planner::operator::Operator; use crate::types::LogicalType; use lazy_static::lazy_static; @@ -39,7 +39,7 @@ impl MatchPattern for CollapseProject { } impl NormalizationRule for CollapseProject { - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { if let Operator::Project(op) = graph.operator(node_id) { if let Some(child_id) = graph.eldest_child_at(node_id) { if let Operator::Project(child_op) = graph.operator(child_id) { @@ -66,7 +66,7 @@ impl MatchPattern for CombineFilter { } impl NormalizationRule for CombineFilter { - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { if let Operator::Filter(op) = graph.operator(node_id).clone() { if let Some(child_id) = graph.eldest_child_at(node_id) { if let Operator::Filter(child_op) = graph.operator_mut(child_id) { @@ -90,7 +90,7 @@ impl NormalizationRule for CombineFilter { #[cfg(test)] mod tests { use crate::binder::test::select_sql_run; - use crate::db::DatabaseError; + use crate::errors::DatabaseError; use crate::expression::ScalarExpression::Constant; use crate::expression::{BinaryOperator, ScalarExpression}; use crate::optimizer::heuristic::batch::HepBatchStrategy; diff --git a/src/optimizer/rule/normalization/mod.rs b/src/optimizer/rule/normalization/mod.rs index 27697c0f..e143c93c 100644 --- a/src/optimizer/rule/normalization/mod.rs +++ b/src/optimizer/rule/normalization/mod.rs @@ -1,3 +1,4 @@ +use crate::errors::DatabaseError; use crate::expression::ScalarExpression; use crate::optimizer::core::pattern::Pattern; use crate::optimizer::core::rule::{MatchPattern, NormalizationRule}; @@ -11,7 +12,6 @@ use crate::optimizer::rule::normalization::pushdown_predicates::PushPredicateInt use crate::optimizer::rule::normalization::pushdown_predicates::PushPredicateThroughJoin; use crate::optimizer::rule::normalization::simplification::ConstantCalculation; use crate::optimizer::rule::normalization::simplification::SimplifyFilter; -use crate::optimizer::OptimizerError; mod column_pruning; mod combine_operators; @@ -58,7 +58,7 @@ impl MatchPattern for NormalizationRuleImpl { } impl NormalizationRule for NormalizationRuleImpl { - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { match self { NormalizationRuleImpl::ColumnPruning => ColumnPruning.apply(node_id, graph), NormalizationRuleImpl::CollapseProject => CollapseProject.apply(node_id, graph), diff --git a/src/optimizer/rule/normalization/pushdown_limit.rs b/src/optimizer/rule/normalization/pushdown_limit.rs index a5fd5b7e..e2f4413c 100644 --- a/src/optimizer/rule/normalization/pushdown_limit.rs +++ b/src/optimizer/rule/normalization/pushdown_limit.rs @@ -1,8 +1,8 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::pattern::Pattern; use crate::optimizer::core::pattern::PatternChildrenPredicate; use crate::optimizer::core::rule::{MatchPattern, NormalizationRule}; use crate::optimizer::heuristic::graph::{HepGraph, HepNodeId}; -use crate::optimizer::OptimizerError; use crate::planner::operator::join::JoinType; use crate::planner::operator::limit::LimitOperator; use crate::planner::operator::Operator; @@ -57,7 +57,7 @@ impl MatchPattern for LimitProjectTranspose { } impl NormalizationRule for LimitProjectTranspose { - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { if let Some(child_id) = graph.eldest_child_at(node_id) { graph.swap_node(node_id, child_id); } @@ -77,7 +77,7 @@ impl MatchPattern for EliminateLimits { } impl NormalizationRule for EliminateLimits { - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { if let Operator::Limit(op) = graph.operator(node_id) { if let Some(child_id) = graph.eldest_child_at(node_id) { if let Operator::Limit(child_op) = graph.operator(child_id) { @@ -126,7 +126,7 @@ impl MatchPattern for PushLimitThroughJoin { } impl NormalizationRule for PushLimitThroughJoin { - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { if let Operator::Limit(op) = graph.operator(node_id) { if let Some(child_id) = graph.eldest_child_at(node_id) { let join_type = if let Operator::Join(op) = graph.operator(child_id) { @@ -163,7 +163,7 @@ impl MatchPattern for PushLimitIntoScan { } impl NormalizationRule for PushLimitIntoScan { - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { if let Operator::Limit(limit_op) = graph.operator(node_id) { if let Some(child_index) = graph.eldest_child_at(node_id) { if let Operator::Scan(scan_op) = graph.operator(child_index) { @@ -184,7 +184,7 @@ impl NormalizationRule for PushLimitIntoScan { #[cfg(test)] mod tests { use crate::binder::test::select_sql_run; - use crate::db::DatabaseError; + use crate::errors::DatabaseError; use crate::optimizer::heuristic::batch::HepBatchStrategy; use crate::optimizer::heuristic::optimizer::HepOptimizer; use crate::optimizer::rule::normalization::NormalizationRuleImpl; diff --git a/src/optimizer/rule/normalization/pushdown_predicates.rs b/src/optimizer/rule/normalization/pushdown_predicates.rs index 27cd03d1..1ecafd26 100644 --- a/src/optimizer/rule/normalization/pushdown_predicates.rs +++ b/src/optimizer/rule/normalization/pushdown_predicates.rs @@ -1,10 +1,10 @@ use crate::catalog::ColumnRef; +use crate::errors::DatabaseError; use crate::expression::{BinaryOperator, ScalarExpression}; use crate::optimizer::core::pattern::Pattern; use crate::optimizer::core::pattern::PatternChildrenPredicate; use crate::optimizer::core::rule::{MatchPattern, NormalizationRule}; use crate::optimizer::heuristic::graph::{HepGraph, HepNodeId}; -use crate::optimizer::OptimizerError; use crate::planner::operator::filter::FilterOperator; use crate::planner::operator::join::JoinType; use crate::planner::operator::Operator; @@ -103,7 +103,7 @@ impl MatchPattern for PushPredicateThroughJoin { impl NormalizationRule for PushPredicateThroughJoin { // TODO: pushdown_predicates need to consider output columns - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { let child_id = match graph.eldest_child_at(node_id) { Some(child_id) => child_id, None => return Ok(()), @@ -209,7 +209,7 @@ impl MatchPattern for PushPredicateIntoScan { } impl NormalizationRule for PushPredicateIntoScan { - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { if let Operator::Filter(op) = graph.operator(node_id).clone() { if let Some(child_id) = graph.eldest_child_at(node_id) { if let Operator::Scan(child_op) = graph.operator_mut(child_id) { @@ -240,7 +240,7 @@ impl NormalizationRule for PushPredicateIntoScan { #[cfg(test)] mod tests { use crate::binder::test::select_sql_run; - use crate::db::DatabaseError; + use crate::errors::DatabaseError; use crate::expression::simplify::ConstantBinary::Scope; use crate::expression::{BinaryOperator, ScalarExpression}; use crate::optimizer::heuristic::batch::HepBatchStrategy; diff --git a/src/optimizer/rule/normalization/simplification.rs b/src/optimizer/rule/normalization/simplification.rs index e96c0d3f..3ee29649 100644 --- a/src/optimizer/rule/normalization/simplification.rs +++ b/src/optimizer/rule/normalization/simplification.rs @@ -1,7 +1,7 @@ +use crate::errors::DatabaseError; use crate::optimizer::core::pattern::{Pattern, PatternChildrenPredicate}; use crate::optimizer::core::rule::{MatchPattern, NormalizationRule}; use crate::optimizer::heuristic::graph::{HepGraph, HepNodeId}; -use crate::optimizer::OptimizerError; use crate::planner::operator::join::JoinCondition; use crate::planner::operator::Operator; use itertools::Itertools; @@ -28,7 +28,7 @@ lazy_static! { pub struct ConstantCalculation; impl ConstantCalculation { - fn _apply(node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn _apply(node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { let operator = graph.operator_mut(node_id); match operator { @@ -83,7 +83,7 @@ impl MatchPattern for ConstantCalculation { } impl NormalizationRule for ConstantCalculation { - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { Self::_apply(node_id, graph)?; // mark changed to skip this rule batch graph.version += 1; @@ -102,7 +102,7 @@ impl MatchPattern for SimplifyFilter { } impl NormalizationRule for SimplifyFilter { - fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), OptimizerError> { + fn apply(&self, node_id: HepNodeId, graph: &mut HepGraph) -> Result<(), DatabaseError> { if let Operator::Filter(mut filter_op) = graph.operator(node_id).clone() { filter_op.predicate.simplify()?; filter_op.predicate.constant_calculation()?; @@ -118,7 +118,7 @@ impl NormalizationRule for SimplifyFilter { mod test { use crate::binder::test::select_sql_run; use crate::catalog::{ColumnCatalog, ColumnDesc, ColumnSummary}; - use crate::db::DatabaseError; + use crate::errors::DatabaseError; use crate::expression::simplify::ConstantBinary; use crate::expression::{BinaryOperator, ScalarExpression, UnaryOperator}; use crate::optimizer::heuristic::batch::HepBatchStrategy; diff --git a/src/storage/kip.rs b/src/storage/kip.rs index ade2cf5d..7c4ce09d 100644 --- a/src/storage/kip.rs +++ b/src/storage/kip.rs @@ -1,9 +1,10 @@ use crate::catalog::{ColumnCatalog, ColumnRef, TableCatalog, TableMeta, TableName}; +use crate::errors::DatabaseError; use crate::expression::simplify::ConstantBinary; use crate::optimizer::core::column_meta::{ColumnMeta, ColumnMetaLoader}; use crate::storage::table_codec::TableCodec; use crate::storage::{ - tuple_projection, Bounds, IndexIter, Iter, Projections, Storage, StorageError, Transaction, + tuple_projection, Bounds, IndexIter, Iter, Projections, Storage, Transaction, }; use crate::types::index::{Index, IndexMetaRef}; use crate::types::tuple::{Tuple, TupleId}; @@ -26,7 +27,7 @@ pub struct KipStorage { } impl KipStorage { - pub async fn new(path: impl Into + Send) -> Result { + pub async fn new(path: impl Into + Send) -> Result { let storage = storage::KipStorage::open_with_config(Config::new(path).enable_level_0_memorization()) .await?; @@ -42,7 +43,7 @@ impl KipStorage { impl Storage for KipStorage { type TransactionType = KipTransaction; - async fn transaction(&self) -> Result { + async fn transaction(&self) -> Result { let tx = self.inner.new_transaction(CheckType::Optimistic).await; Ok(KipTransaction { @@ -67,10 +68,10 @@ impl Transaction for KipTransaction { table_name: TableName, bounds: Bounds, projections: Projections, - ) -> Result, StorageError> { + ) -> Result, DatabaseError> { let all_columns = self .table(table_name.clone()) - .ok_or(StorageError::TableNotFound)? + .ok_or(DatabaseError::TableNotFound)? .all_columns(); let (min, max) = TableCodec::tuple_bound(&table_name); let iter = self.tx.iter(Bound::Included(&min), Bound::Included(&max))?; @@ -91,10 +92,10 @@ impl Transaction for KipTransaction { projections: Projections, index_meta: IndexMetaRef, binaries: Vec, - ) -> Result, StorageError> { + ) -> Result, DatabaseError> { let table = self .table(table_name.clone()) - .ok_or(StorageError::TableNotFound)?; + .ok_or(DatabaseError::TableNotFound)?; let offset = offset_option.unwrap_or(0); Ok(IndexIter { @@ -116,7 +117,7 @@ impl Transaction for KipTransaction { index: Index, tuple_ids: Vec, is_unique: bool, - ) -> Result<(), StorageError> { + ) -> Result<(), DatabaseError> { let (key, value) = TableCodec::encode_index(table_name, &index, &tuple_ids)?; if let Some(bytes) = self.tx.get(&key)? { @@ -124,7 +125,7 @@ impl Transaction for KipTransaction { let old_tuple_ids = TableCodec::decode_index(&bytes)?; if old_tuple_ids[0] != tuple_ids[0] { - return Err(StorageError::DuplicateUniqueValue); + return Err(DatabaseError::DuplicateUniqueValue); } else { return Ok(()); } @@ -138,7 +139,7 @@ impl Transaction for KipTransaction { Ok(()) } - fn del_index(&mut self, table_name: &str, index: &Index) -> Result<(), StorageError> { + fn del_index(&mut self, table_name: &str, index: &Index) -> Result<(), DatabaseError> { let key = TableCodec::encode_index_key(table_name, index)?; self.tx.remove(&key)?; @@ -151,18 +152,18 @@ impl Transaction for KipTransaction { table_name: &str, tuple: Tuple, is_overwrite: bool, - ) -> Result<(), StorageError> { + ) -> Result<(), DatabaseError> { let (key, value) = TableCodec::encode_tuple(table_name, &tuple)?; if !is_overwrite && self.tx.get(&key)?.is_some() { - return Err(StorageError::DuplicatePrimaryKey); + return Err(DatabaseError::DuplicatePrimaryKey); } self.tx.set(key, value); Ok(()) } - fn delete(&mut self, table_name: &str, tuple_id: TupleId) -> Result<(), StorageError> { + fn delete(&mut self, table_name: &str, tuple_id: TupleId) -> Result<(), DatabaseError> { let key = TableCodec::encode_tuple_key(table_name, &tuple_id)?; self.tx.remove(&key)?; @@ -174,10 +175,10 @@ impl Transaction for KipTransaction { table_name: &TableName, column: &ColumnCatalog, if_not_exists: bool, - ) -> Result { + ) -> Result { if let Some(mut catalog) = self.table(table_name.clone()).cloned() { if !column.nullable && column.default_value().is_none() { - return Err(StorageError::NeedNullAbleOrDefault); + return Err(DatabaseError::NeedNullAbleOrDefault); } for col in catalog.all_columns() { @@ -185,7 +186,7 @@ impl Transaction for KipTransaction { return if if_not_exists { Ok(col.id().unwrap()) } else { - Err(StorageError::DuplicateColumn) + Err(DatabaseError::DuplicateColumn) }; } } @@ -210,7 +211,7 @@ impl Transaction for KipTransaction { Ok(col_id) } else { - Err(StorageError::TableNotFound) + Err(DatabaseError::TableNotFound) } } @@ -219,7 +220,7 @@ impl Transaction for KipTransaction { table_name: &TableName, column_name: &str, if_exists: bool, - ) -> Result<(), StorageError> { + ) -> Result<(), DatabaseError> { if let Some(catalog) = self.table(table_name.clone()).cloned() { let column = catalog.get_column_by_name(column_name).unwrap(); @@ -245,7 +246,7 @@ impl Transaction for KipTransaction { Ok(()) } else { - Err(StorageError::TableNotFound) + Err(DatabaseError::TableNotFound) } } @@ -254,14 +255,14 @@ impl Transaction for KipTransaction { table_name: TableName, columns: Vec, if_not_exists: bool, - ) -> Result { + ) -> Result { let (table_key, value) = TableCodec::encode_root_table(&TableMeta::empty(table_name.clone()))?; if self.tx.get(&table_key)?.is_some() { if if_not_exists { return Ok(table_name); } - return Err(StorageError::TableExists); + return Err(DatabaseError::TableExists); } self.tx.set(table_key, value); @@ -278,12 +279,12 @@ impl Transaction for KipTransaction { Ok(table_name) } - fn drop_table(&mut self, table_name: &str, if_exists: bool) -> Result<(), StorageError> { + fn drop_table(&mut self, table_name: &str, if_exists: bool) -> Result<(), DatabaseError> { if self.table(Arc::new(table_name.to_string())).is_none() { if if_exists { return Ok(()); } else { - return Err(StorageError::TableNotFound); + return Err(DatabaseError::TableNotFound); } } self.drop_data(table_name)?; @@ -302,7 +303,7 @@ impl Transaction for KipTransaction { Ok(()) } - fn drop_data(&mut self, table_name: &str) -> Result<(), StorageError> { + fn drop_data(&mut self, table_name: &str) -> Result<(), DatabaseError> { let (tuple_min, tuple_max) = TableCodec::tuple_bound(table_name); Self::_drop_data(&mut self.tx, &tuple_min, &tuple_max)?; @@ -330,7 +331,7 @@ impl Transaction for KipTransaction { option } - fn table_metas(&self) -> Result, StorageError> { + fn table_metas(&self) -> Result, DatabaseError> { let mut metas = vec![]; let (min, max) = TableCodec::root_table_bound(); let mut iter = self.tx.iter(Bound::Included(&min), Bound::Included(&max))?; @@ -346,7 +347,7 @@ impl Transaction for KipTransaction { Ok(metas) } - fn save_table_meta(&mut self, table_meta: &TableMeta) -> Result<(), StorageError> { + fn save_table_meta(&mut self, table_meta: &TableMeta) -> Result<(), DatabaseError> { let _ = self.meta_cache.remove(&table_meta.table_name); let (key, value) = TableCodec::encode_root_table(table_meta)?; self.tx.set(key, value); @@ -354,7 +355,7 @@ impl Transaction for KipTransaction { Ok(()) } - fn column_meta_paths(&self, table_name: &str) -> Result, StorageError> { + fn column_meta_paths(&self, table_name: &str) -> Result, DatabaseError> { if let Some(bytes) = self .tx .get(&TableCodec::encode_root_table_key(table_name))? @@ -374,7 +375,7 @@ impl Transaction for KipTransaction { ColumnMetaLoader::new(self, &self.meta_cache) } - async fn commit(self) -> Result<(), StorageError> { + async fn commit(self) -> Result<(), DatabaseError> { self.tx.commit().await?; Ok(()) @@ -385,7 +386,7 @@ impl KipTransaction { fn table_collect( table_name: TableName, tx: &mvcc::Transaction, - ) -> Result<(Vec, Vec), StorageError> { + ) -> Result<(Vec, Vec), DatabaseError> { let (table_min, table_max) = TableCodec::table_bound(&table_name); let mut column_iter = tx.iter(Bound::Included(&table_min), Bound::Included(&table_max))?; @@ -406,7 +407,7 @@ impl KipTransaction { Ok((columns, index_metas)) } - fn _drop_data(tx: &mut mvcc::Transaction, min: &[u8], max: &[u8]) -> Result<(), StorageError> { + fn _drop_data(tx: &mut mvcc::Transaction, min: &[u8], max: &[u8]) -> Result<(), DatabaseError> { let mut iter = tx.iter(Bound::Included(min), Bound::Included(max))?; let mut data_keys = vec![]; @@ -427,7 +428,7 @@ impl KipTransaction { fn create_index_meta_for_table( tx: &mut mvcc::Transaction, table: &mut TableCatalog, - ) -> Result<(), StorageError> { + ) -> Result<(), DatabaseError> { let table_name = table.name.clone(); for col in table @@ -464,7 +465,7 @@ pub struct KipIter<'a> { } impl Iter for KipIter<'_> { - fn next_tuple(&mut self) -> Result, StorageError> { + fn next_tuple(&mut self) -> Result, DatabaseError> { while self.offset > 0 { let _ = self.iter.try_next()?; self.offset -= 1; @@ -495,11 +496,12 @@ impl Iter for KipIter<'_> { #[cfg(test)] mod test { use crate::catalog::{ColumnCatalog, ColumnDesc}; - use crate::db::{Database, DatabaseError}; + use crate::db::Database; + use crate::errors::DatabaseError; use crate::expression::simplify::ConstantBinary; use crate::expression::ScalarExpression; use crate::storage::kip::KipStorage; - use crate::storage::{IndexIter, Iter, Storage, StorageError, Transaction}; + use crate::storage::{IndexIter, Iter, Storage, Transaction}; use crate::types::index::IndexMeta; use crate::types::tuple::Tuple; use crate::types::value::DataValue; @@ -510,7 +512,7 @@ mod test { use tempfile::TempDir; #[tokio::test] - async fn test_in_kipdb_storage_works_with_data() -> Result<(), StorageError> { + async fn test_in_kipdb_storage_works_with_data() -> Result<(), DatabaseError> { let temp_dir = TempDir::new().expect("unable to create temporary working directory"); let storage = KipStorage::new(temp_dir.path()).await?; let mut transaction = storage.transaction().await?; diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 72e76ca1..819e84de 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -1,19 +1,18 @@ pub mod kip; mod table_codec; -use crate::catalog::{CatalogError, ColumnCatalog, TableCatalog, TableMeta, TableName}; +use crate::catalog::{ColumnCatalog, TableCatalog, TableMeta, TableName}; +use crate::errors::DatabaseError; use crate::expression::simplify::ConstantBinary; use crate::expression::ScalarExpression; use crate::optimizer::core::column_meta::ColumnMetaLoader; use crate::storage::table_codec::TableCodec; -use crate::types::errors::TypeError; use crate::types::index::{Index, IndexMetaRef}; use crate::types::tuple::{Tuple, TupleId}; use crate::types::value::ValueRef; use crate::types::ColumnId; use kip_db::kernel::lsm::iterator::Iter as DBIter; use kip_db::kernel::lsm::mvcc; -use kip_db::KernelError; use std::collections::{Bound, VecDeque}; use std::mem; use std::ops::SubAssign; @@ -22,7 +21,7 @@ pub trait Storage: Sync + Send + Clone + 'static { type TransactionType: Transaction; #[allow(async_fn_in_trait)] - async fn transaction(&self) -> Result; + async fn transaction(&self) -> Result; } /// Optional bounds of the reader, of the form (offset, limit). @@ -40,7 +39,7 @@ pub trait Transaction: Sync + Send + 'static { table_name: TableName, bounds: Bounds, projection: Projections, - ) -> Result, StorageError>; + ) -> Result, DatabaseError>; fn read_by_index( &self, @@ -49,7 +48,7 @@ pub trait Transaction: Sync + Send + 'static { projection: Projections, index_meta: IndexMetaRef, binaries: Vec, - ) -> Result, StorageError>; + ) -> Result, DatabaseError>; fn add_index( &mut self, @@ -57,52 +56,52 @@ pub trait Transaction: Sync + Send + 'static { index: Index, tuple_ids: Vec, is_unique: bool, - ) -> Result<(), StorageError>; + ) -> Result<(), DatabaseError>; - fn del_index(&mut self, table_name: &str, index: &Index) -> Result<(), StorageError>; + fn del_index(&mut self, table_name: &str, index: &Index) -> Result<(), DatabaseError>; fn append( &mut self, table_name: &str, tuple: Tuple, is_overwrite: bool, - ) -> Result<(), StorageError>; + ) -> Result<(), DatabaseError>; - fn delete(&mut self, table_name: &str, tuple_id: TupleId) -> Result<(), StorageError>; + fn delete(&mut self, table_name: &str, tuple_id: TupleId) -> Result<(), DatabaseError>; fn add_column( &mut self, table_name: &TableName, column: &ColumnCatalog, if_not_exists: bool, - ) -> Result; + ) -> Result; fn drop_column( &mut self, table_name: &TableName, column: &str, if_exists: bool, - ) -> Result<(), StorageError>; + ) -> Result<(), DatabaseError>; fn create_table( &mut self, table_name: TableName, columns: Vec, if_not_exists: bool, - ) -> Result; + ) -> Result; - fn drop_table(&mut self, table_name: &str, if_exists: bool) -> Result<(), StorageError>; - fn drop_data(&mut self, table_name: &str) -> Result<(), StorageError>; + fn drop_table(&mut self, table_name: &str, if_exists: bool) -> Result<(), DatabaseError>; + fn drop_data(&mut self, table_name: &str) -> Result<(), DatabaseError>; fn table(&self, table_name: TableName) -> Option<&TableCatalog>; - fn table_metas(&self) -> Result, StorageError>; - fn save_table_meta(&mut self, table_meta: &TableMeta) -> Result<(), StorageError>; - fn column_meta_paths(&self, table_name: &str) -> Result, StorageError>; + fn table_metas(&self) -> Result, DatabaseError>; + fn save_table_meta(&mut self, table_meta: &TableMeta) -> Result<(), DatabaseError>; + fn column_meta_paths(&self, table_name: &str) -> Result, DatabaseError>; fn meta_loader(&self) -> ColumnMetaLoader where Self: Sized; #[allow(async_fn_in_trait)] - async fn commit(self) -> Result<(), StorageError>; + async fn commit(self) -> Result<(), DatabaseError>; } enum IndexValue { @@ -137,7 +136,7 @@ impl IndexIter<'_> { } } - fn val_to_key(&self, val: ValueRef) -> Result, TypeError> { + fn val_to_key(&self, val: ValueRef) -> Result, DatabaseError> { if self.index_meta.is_unique { let index = Index::new(self.index_meta.id, vec![val]); @@ -147,7 +146,7 @@ impl IndexIter<'_> { } } - fn get_tuple_by_id(&mut self, tuple_id: &TupleId) -> Result, StorageError> { + fn get_tuple_by_id(&mut self, tuple_id: &TupleId) -> Result, DatabaseError> { let key = TableCodec::encode_tuple_key(&self.table.name, &tuple_id)?; self.tx @@ -166,7 +165,7 @@ impl IndexIter<'_> { } impl Iter for IndexIter<'_> { - fn next_tuple(&mut self) -> Result, StorageError> { + fn next_tuple(&mut self) -> Result, DatabaseError> { // 1. check limit if matches!(self.limit, Some(0)) || self.is_empty() { self.scope_iter = None; @@ -230,7 +229,7 @@ impl Iter for IndexIter<'_> { let table_name = &self.table.name; let index_meta = &self.index_meta; - let bound_encode = |bound: Bound| -> Result<_, StorageError> { + let bound_encode = |bound: Bound| -> Result<_, DatabaseError> { match bound { Bound::Included(val) => Ok(Bound::Included(self.val_to_key(val)?)), Bound::Excluded(val) => Ok(Bound::Excluded(self.val_to_key(val)?)), @@ -285,14 +284,14 @@ impl Iter for IndexIter<'_> { } pub trait Iter: Sync + Send { - fn next_tuple(&mut self) -> Result, StorageError>; + fn next_tuple(&mut self) -> Result, DatabaseError>; } pub(crate) fn tuple_projection( limit: &mut Option, projections: &Projections, tuple: Tuple, -) -> Result { +) -> Result { let projection_len = projections.len(); let mut columns = Vec::with_capacity(projection_len); let mut values = Vec::with_capacity(projection_len); @@ -312,39 +311,3 @@ pub(crate) fn tuple_projection( values, }) } - -#[derive(thiserror::Error, Debug)] -pub enum StorageError { - #[error("catalog error")] - CatalogError(#[from] CatalogError), - - #[error("kipdb error")] - KipDBError(KernelError), - - #[error("type error")] - TypeError(#[from] TypeError), - - #[error("The same primary key data already exists")] - DuplicatePrimaryKey, - - #[error("The column has been declared unique and the value already exists")] - DuplicateUniqueValue, - - #[error("The table not found")] - TableNotFound, - - #[error("The some column already exists")] - DuplicateColumn, - - #[error("Add column must be nullable or specify a default value")] - NeedNullAbleOrDefault, - - #[error("The table already exists")] - TableExists, -} - -impl From for StorageError { - fn from(value: KernelError) -> Self { - StorageError::KipDBError(value) - } -} diff --git a/src/storage/table_codec.rs b/src/storage/table_codec.rs index c4ce78e7..32769819 100644 --- a/src/storage/table_codec.rs +++ b/src/storage/table_codec.rs @@ -1,5 +1,5 @@ use crate::catalog::{ColumnCatalog, ColumnRef, TableMeta}; -use crate::types::errors::TypeError; +use crate::errors::DatabaseError; use crate::types::index::{Index, IndexId, IndexMeta}; use crate::types::tuple::{Tuple, TupleId}; use crate::types::LogicalType; @@ -137,14 +137,17 @@ impl TableCodec { /// Key: {TableName}{TUPLE_TAG}{BOUND_MIN_TAG}{RowID}(Sorted) /// Value: Tuple - pub fn encode_tuple(table_name: &str, tuple: &Tuple) -> Result<(Bytes, Bytes), TypeError> { - let tuple_id = tuple.id.clone().ok_or(TypeError::PrimaryKeyNotFound)?; + pub fn encode_tuple(table_name: &str, tuple: &Tuple) -> Result<(Bytes, Bytes), DatabaseError> { + let tuple_id = tuple.id.clone().ok_or(DatabaseError::PrimaryKeyNotFound)?; let key = Self::encode_tuple_key(table_name, &tuple_id)?; Ok((Bytes::from(key), Bytes::from(tuple.serialize_to()))) } - pub fn encode_tuple_key(table_name: &str, tuple_id: &TupleId) -> Result, TypeError> { + pub fn encode_tuple_key( + table_name: &str, + tuple_id: &TupleId, + ) -> Result, DatabaseError> { let mut key_prefix = Self::key_prefix(CodecType::Tuple, table_name); key_prefix.push(BOUND_MIN_TAG); @@ -160,7 +163,7 @@ impl TableCodec { | LogicalType::UBigint | LogicalType::Varchar(_) ) { - return Err(TypeError::InvalidType); + return Err(DatabaseError::InvalidType); } tuple_id.memcomparable_encode(&mut key_prefix)?; @@ -176,7 +179,7 @@ impl TableCodec { pub fn encode_index_meta( table_name: &str, index_meta: &IndexMeta, - ) -> Result<(Bytes, Bytes), TypeError> { + ) -> Result<(Bytes, Bytes), DatabaseError> { let mut key_prefix = Self::key_prefix(CodecType::IndexMeta, table_name); key_prefix.push(BOUND_MIN_TAG); key_prefix.append(&mut index_meta.id.to_be_bytes().to_vec()); @@ -187,7 +190,7 @@ impl TableCodec { )) } - pub fn decode_index_meta(bytes: &[u8]) -> Result { + pub fn decode_index_meta(bytes: &[u8]) -> Result { Ok(bincode::deserialize(bytes)?) } @@ -205,7 +208,7 @@ impl TableCodec { name: &str, index: &Index, tuple_ids: &[TupleId], - ) -> Result<(Bytes, Bytes), TypeError> { + ) -> Result<(Bytes, Bytes), DatabaseError> { let key = TableCodec::encode_index_key(name, index)?; Ok(( @@ -214,7 +217,7 @@ impl TableCodec { )) } - pub fn encode_index_key(name: &str, index: &Index) -> Result, TypeError> { + pub fn encode_index_key(name: &str, index: &Index) -> Result, DatabaseError> { let mut key_prefix = Self::key_prefix(CodecType::Index, name); key_prefix.push(BOUND_MIN_TAG); key_prefix.append(&mut index.id.to_be_bytes().to_vec()); @@ -228,7 +231,7 @@ impl TableCodec { Ok(key_prefix) } - pub fn decode_index(bytes: &[u8]) -> Result, TypeError> { + pub fn decode_index(bytes: &[u8]) -> Result, DatabaseError> { Ok(bincode::deserialize(bytes)?) } @@ -239,7 +242,7 @@ impl TableCodec { pub fn encode_column( table_name: &str, col: &ColumnCatalog, - ) -> Result<(Bytes, Bytes), TypeError> { + ) -> Result<(Bytes, Bytes), DatabaseError> { let bytes = bincode::serialize(col)?; let mut key_prefix = Self::key_prefix(CodecType::Column, table_name); @@ -249,13 +252,13 @@ impl TableCodec { Ok((Bytes::from(key_prefix), Bytes::from(bytes))) } - pub fn decode_column(bytes: &[u8]) -> Result { + pub fn decode_column(bytes: &[u8]) -> Result { Ok(bincode::deserialize::(bytes)?) } /// Key: Root{BOUND_MIN_TAG}{TableName} /// Value: TableName - pub fn encode_root_table(meta: &TableMeta) -> Result<(Bytes, Bytes), TypeError> { + pub fn encode_root_table(meta: &TableMeta) -> Result<(Bytes, Bytes), DatabaseError> { let key = Self::encode_root_table_key(&meta.table_name); Ok((Bytes::from(key), Bytes::from(bincode::serialize(meta)?))) @@ -265,7 +268,7 @@ impl TableCodec { Self::key_prefix(CodecType::Root, table_name) } - pub fn decode_root_table(bytes: &[u8]) -> Result { + pub fn decode_root_table(bytes: &[u8]) -> Result { Ok(bincode::deserialize(bytes)?) } } @@ -273,8 +276,8 @@ impl TableCodec { #[cfg(test)] mod tests { use crate::catalog::{ColumnCatalog, ColumnDesc, TableCatalog, TableMeta}; + use crate::errors::DatabaseError; use crate::storage::table_codec::TableCodec; - use crate::types::errors::TypeError; use crate::types::index::{Index, IndexMeta}; use crate::types::tuple::Tuple; use crate::types::value::DataValue; @@ -305,7 +308,7 @@ mod tests { } #[test] - fn test_table_codec_tuple() -> Result<(), TypeError> { + fn test_table_codec_tuple() -> Result<(), DatabaseError> { let table_catalog = build_table_codec(); let tuple = Tuple { @@ -342,7 +345,7 @@ mod tests { } #[test] - fn test_table_codec_index_meta() -> Result<(), TypeError> { + fn test_table_codec_index_meta() -> Result<(), DatabaseError> { let index_meta = IndexMeta { id: 0, column_ids: vec![0], @@ -358,7 +361,7 @@ mod tests { } #[test] - fn test_table_codec_index() -> Result<(), TypeError> { + fn test_table_codec_index() -> Result<(), DatabaseError> { let table_catalog = build_table_codec(); let index = Index { diff --git a/src/types/errors.rs b/src/types/errors.rs deleted file mode 100644 index 55dbb2b7..00000000 --- a/src/types/errors.rs +++ /dev/null @@ -1,78 +0,0 @@ -use chrono::ParseError; -use std::num::{ParseFloatError, ParseIntError, TryFromIntError}; -use std::str::ParseBoolError; -use std::string::FromUtf8Error; - -#[derive(thiserror::Error, Debug)] -pub enum TypeError { - #[error("invalid type")] - InvalidType, - #[error("must contain PrimaryKey!")] - PrimaryKeyNotFound, - #[error("not implemented sqlparser datatype: {0}")] - NotImplementedSqlparserDataType(String), - #[error("internal error: {0}")] - InternalError(String), - #[error("cast fail")] - CastFail, - #[error("too long")] - TooLong, - #[error("cannot be Null")] - NotNull, - #[error("try from int")] - TryFromInt( - #[source] - #[from] - TryFromIntError, - ), - #[error("parser int")] - ParseInt( - #[source] - #[from] - ParseIntError, - ), - #[error("parser bool")] - ParseBool( - #[source] - #[from] - ParseBoolError, - ), - #[error("parser float")] - ParseFloat( - #[source] - #[from] - ParseFloatError, - ), - #[error("parser date")] - ParseDate( - #[source] - #[from] - ParseError, - ), - #[error("bindcode")] - Bincode( - #[source] - #[from] - Box, - ), - #[error("try from decimal")] - TryFromDecimal( - #[source] - #[from] - rust_decimal::Error, - ), - #[error("from utf8")] - FromUtf8Error( - #[source] - #[from] - FromUtf8Error, - ), - #[error("{0} and {1} do not match")] - MisMatch(String, String), - #[error("io")] - IO( - #[source] - #[from] - std::io::Error, - ), -} diff --git a/src/types/mod.rs b/src/types/mod.rs index b5ed965c..e259b454 100644 --- a/src/types/mod.rs +++ b/src/types/mod.rs @@ -1,4 +1,3 @@ -pub mod errors; pub mod index; pub mod tuple; pub mod tuple_builder; @@ -9,11 +8,10 @@ use rust_decimal::Decimal; use serde::{Deserialize, Serialize}; use std::any::TypeId; +use crate::errors::DatabaseError; use sqlparser::ast::ExactNumberInfo; use strum_macros::AsRefStr; -use crate::types::errors::TypeError; - pub type ColumnId = u32; /// Sqlrs type conversion: @@ -160,7 +158,7 @@ impl LogicalType { pub fn max_logical_type( left: &LogicalType, right: &LogicalType, - ) -> Result { + ) -> Result { if left == right { return Ok(*left); } @@ -193,16 +191,13 @@ impl LogicalType { ) { return Ok(LogicalType::DateTime); } - Err(TypeError::InternalError(format!( - "can not compare two types: {:?} and {:?}", - left, right - ))) + Err(DatabaseError::Incomparable(*left, *right)) } fn combine_numeric_types( left: &LogicalType, right: &LogicalType, - ) -> Result { + ) -> Result { if left == right { return Ok(*left); } @@ -228,10 +223,7 @@ impl LogicalType { (LogicalType::Integer, _) | (_, LogicalType::UInteger) => Ok(LogicalType::Bigint), (LogicalType::Smallint, _) | (_, LogicalType::USmallint) => Ok(LogicalType::Integer), (LogicalType::Tinyint, _) | (_, LogicalType::UTinyint) => Ok(LogicalType::Smallint), - _ => Err(TypeError::InternalError(format!( - "can not combine these numeric types {:?} and {:?}", - left, right - ))), + _ => Err(DatabaseError::Incomparable(*left, *right)), } } @@ -303,7 +295,7 @@ impl LogicalType { /// sqlparser datatype to logical type impl TryFrom for LogicalType { - type Error = TypeError; + type Error = DatabaseError; fn try_from(value: sqlparser::ast::DataType) -> Result { match value { @@ -332,7 +324,7 @@ impl TryFrom for LogicalType { Ok(Self::Decimal(Some(p as u8), Some(s as u8))) } }, - other => Err(TypeError::NotImplementedSqlparserDataType( + other => Err(DatabaseError::NotImplementedSqlparserDataType( other.to_string(), )), } diff --git a/src/types/tuple_builder.rs b/src/types/tuple_builder.rs index c3cd5368..600406a3 100644 --- a/src/types/tuple_builder.rs +++ b/src/types/tuple_builder.rs @@ -1,5 +1,5 @@ use crate::catalog::{ColumnCatalog, ColumnRef}; -use crate::types::errors::TypeError; +use crate::errors::DatabaseError; use crate::types::tuple::Tuple; use crate::types::value::{DataValue, ValueRef}; use std::sync::Arc; @@ -13,7 +13,7 @@ impl TupleBuilder { TupleBuilder { columns } } - pub fn build_result(header: String, message: String) -> Result { + pub fn build_result(header: String, message: String) -> Result { let columns: Vec = vec![Arc::new(ColumnCatalog::new_dummy(header))]; let values: Vec = vec![Arc::new(DataValue::Utf8(Some(message)))]; @@ -27,7 +27,7 @@ impl TupleBuilder { pub fn build_with_row<'a>( &self, row: impl IntoIterator, - ) -> Result { + ) -> Result { let mut values = Vec::with_capacity(self.columns.len()); let mut primary_key = None; @@ -42,7 +42,7 @@ impl TupleBuilder { values.push(data_value); } if values.len() != self.columns.len() { - return Err(TypeError::MisMatch( + return Err(DatabaseError::MisMatch( "types".to_string(), "values".to_string(), )); diff --git a/src/types/value.rs b/src/types/value.rs index fca6b734..34dbdfad 100644 --- a/src/types/value.rs +++ b/src/types/value.rs @@ -10,7 +10,7 @@ use std::str::FromStr; use std::sync::Arc; use std::{cmp, fmt, mem}; -use crate::types::errors::TypeError; +use crate::errors::DatabaseError; use ordered_float::OrderedFloat; use rust_decimal::prelude::{FromPrimitive, ToPrimitive}; use serde::{Deserialize, Serialize}; @@ -229,7 +229,7 @@ macro_rules! varchar_cast { let string_value = format!("{}", v); if let Some(len) = $len { if string_value.len() > *len as usize { - return Err(TypeError::TooLong); + return Err(DatabaseError::TooLong); } } Ok(DataValue::Utf8(Some(string_value))) @@ -255,7 +255,7 @@ impl DataValue { } } - pub(crate) fn check_len(&self, logic_type: &LogicalType) -> Result<(), TypeError> { + pub(crate) fn check_len(&self, logic_type: &LogicalType) -> Result<(), DatabaseError> { let is_over_len = match (logic_type, self) { (LogicalType::Varchar(Some(len)), DataValue::Utf8(Some(val))) => { val.len() > *len as usize @@ -263,12 +263,12 @@ impl DataValue { (LogicalType::Decimal(full_len, scale_len), DataValue::Decimal(Some(val))) => { if let Some(len) = full_len { if val.mantissa().ilog10() + 1 > *len as u32 { - return Err(TypeError::TooLong); + return Err(DatabaseError::TooLong); } } if let Some(len) = scale_len { if val.scale() > *len as u32 { - return Err(TypeError::TooLong); + return Err(DatabaseError::TooLong); } } false @@ -277,7 +277,7 @@ impl DataValue { }; if is_over_len { - return Err(TypeError::TooLong); + return Err(DatabaseError::TooLong); } Ok(()) @@ -506,7 +506,7 @@ impl DataValue { } } - pub fn memcomparable_encode(&self, b: &mut Vec) -> Result<(), TypeError> { + pub fn memcomparable_encode(&self, b: &mut Vec) -> Result<(), DatabaseError> { match self { DataValue::Int8(Some(v)) => encode_u!(b, *v as u8 ^ 0x80_u8), DataValue::Int16(Some(v)) => encode_u!(b, *v as u16 ^ 0x8000_u16), @@ -548,7 +548,7 @@ impl DataValue { DataValue::Decimal(Some(_v)) => todo!(), value => { if !value.is_null() { - return Err(TypeError::InvalidType); + return Err(DatabaseError::InvalidType); } } } @@ -556,10 +556,10 @@ impl DataValue { Ok(()) } - pub fn cast(self, to: &LogicalType) -> Result { + pub fn cast(self, to: &LogicalType) -> Result { match self { DataValue::Null => match to { - LogicalType::Invalid => Err(TypeError::CastFail), + LogicalType::Invalid => Err(DatabaseError::CastFail), LogicalType::SqlNull => Ok(DataValue::Null), LogicalType::Boolean => Ok(DataValue::Boolean(None)), LogicalType::Tinyint => Ok(DataValue::Int8(None)), @@ -591,7 +591,7 @@ impl DataValue { LogicalType::Float => Ok(DataValue::Float32(value.map(|v| v.into()))), LogicalType::Double => Ok(DataValue::Float64(value.map(|v| v.into()))), LogicalType::Varchar(len) => varchar_cast!(value, len), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::Float32(value) => match to { LogicalType::SqlNull => Ok(DataValue::Null), @@ -601,14 +601,15 @@ impl DataValue { LogicalType::Decimal(_, option) => Ok(DataValue::Decimal( value .map(|v| { - let mut decimal = Decimal::from_f32(v).ok_or(TypeError::CastFail)?; + let mut decimal = + Decimal::from_f32(v).ok_or(DatabaseError::CastFail)?; Self::decimal_round_f(option, &mut decimal); - Ok::(decimal) + Ok::(decimal) }) .transpose()?, )), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::Float64(value) => match to { LogicalType::SqlNull => Ok(DataValue::Null), @@ -618,14 +619,15 @@ impl DataValue { LogicalType::Decimal(_, option) => Ok(DataValue::Decimal( value .map(|v| { - let mut decimal = Decimal::from_f64(v).ok_or(TypeError::CastFail)?; + let mut decimal = + Decimal::from_f64(v).ok_or(DatabaseError::CastFail)?; Self::decimal_round_f(option, &mut decimal); - Ok::(decimal) + Ok::(decimal) }) .transpose()?, )), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::Int8(value) => match to { LogicalType::SqlNull => Ok(DataValue::Null), @@ -652,7 +654,7 @@ impl DataValue { decimal }))), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::Int16(value) => match to { LogicalType::SqlNull => Ok(DataValue::Null), @@ -678,7 +680,7 @@ impl DataValue { decimal }))), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::Int32(value) => match to { LogicalType::SqlNull => Ok(DataValue::Null), @@ -703,7 +705,7 @@ impl DataValue { decimal }))), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::Int64(value) => match to { LogicalType::SqlNull => Ok(DataValue::Null), @@ -727,7 +729,7 @@ impl DataValue { decimal }))), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::UInt8(value) => match to { LogicalType::SqlNull => Ok(DataValue::Null), @@ -747,7 +749,7 @@ impl DataValue { decimal }))), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::UInt16(value) => match to { LogicalType::SqlNull => Ok(DataValue::Null), @@ -765,7 +767,7 @@ impl DataValue { decimal }))), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::UInt32(value) => match to { LogicalType::SqlNull => Ok(DataValue::Null), @@ -781,7 +783,7 @@ impl DataValue { decimal }))), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::UInt64(value) => match to { LogicalType::SqlNull => Ok(DataValue::Null), @@ -795,10 +797,10 @@ impl DataValue { decimal }))), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::Utf8(value) => match to { - LogicalType::Invalid => Err(TypeError::CastFail), + LogicalType::Invalid => Err(DatabaseError::CastFail), LogicalType::SqlNull => Ok(DataValue::Null), LogicalType::Boolean => Ok(DataValue::Boolean( value.map(|v| bool::from_str(&v)).transpose()?, @@ -875,7 +877,7 @@ impl DataValue { Ok(DataValue::Date64(option)) } - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::Date64(value) => match to { LogicalType::SqlNull => Ok(DataValue::Null), @@ -889,7 +891,7 @@ impl DataValue { Ok(DataValue::Date32(option)) } LogicalType::DateTime => Ok(DataValue::Date64(value)), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, DataValue::Decimal(value) => match to { LogicalType::SqlNull => Ok(DataValue::Null), @@ -897,7 +899,7 @@ impl DataValue { LogicalType::Double => Ok(DataValue::Float64(value.and_then(|v| v.to_f64()))), LogicalType::Decimal(_, _) => Ok(DataValue::Decimal(value)), LogicalType::Varchar(len) => varchar_cast!(value, len), - _ => Err(TypeError::CastFail), + _ => Err(DatabaseError::CastFail), }, } } @@ -1071,11 +1073,11 @@ impl fmt::Debug for DataValue { #[cfg(test)] mod test { - use crate::types::errors::TypeError; + use crate::errors::DatabaseError; use crate::types::value::DataValue; #[test] - fn test_mem_comparable_int() -> Result<(), TypeError> { + fn test_mem_comparable_int() -> Result<(), DatabaseError> { let mut key_i8_1 = Vec::new(); let mut key_i8_2 = Vec::new(); let mut key_i8_3 = Vec::new(); @@ -1132,7 +1134,7 @@ mod test { } #[test] - fn test_mem_comparable_float() -> Result<(), TypeError> { + fn test_mem_comparable_float() -> Result<(), DatabaseError> { let mut key_f32_1 = Vec::new(); let mut key_f32_2 = Vec::new(); let mut key_f32_3 = Vec::new(); diff --git a/tests/sqllogictest/src/lib.rs b/tests/sqllogictest/src/lib.rs index 9f13a515..52d23b6a 100644 --- a/tests/sqllogictest/src/lib.rs +++ b/tests/sqllogictest/src/lib.rs @@ -1,4 +1,5 @@ -use fnck_sql::db::{Database, DatabaseError}; +use fnck_sql::db::Database; +use fnck_sql::errors::DatabaseError; use fnck_sql::storage::kip::KipStorage; use sqllogictest::{AsyncDB, DBOutput, DefaultColumnType}; use std::time::Instant;