From d1b1429a68684ef551a9ed7bf482e9d86a1677a3 Mon Sep 17 00:00:00 2001 From: liuxy0551 Date: Mon, 21 Oct 2024 20:41:34 +0800 Subject: [PATCH] test: generic sql --- .../contextCollect/entityCollector.test.ts | 450 ++++++++++++++ .../sql/contextCollect/fixtures/common.sql | 31 + test/parser/sql/errorListener.test.ts | 67 ++ test/parser/sql/errorStrategy.test.ts | 64 ++ test/parser/sql/lexer.test.ts | 17 + test/parser/sql/listener.test.ts | 63 ++ .../suggestion/fixtures/multipleStatement.sql | 21 + .../fixtures/suggestionWithEntity.sql | 11 + .../suggestion/fixtures/syntaxSuggestion.sql | 55 ++ .../suggestion/fixtures/tokenSuggestion.sql | 18 + .../sql/suggestion/multipleStatement.test.ts | 69 +++ .../suggestion/suggestionWithEntity.test.ts | 156 +++++ .../sql/suggestion/syntaxSuggestion.test.ts | 496 +++++++++++++++ .../sql/suggestion/tokenSuggestion.test.ts | 196 ++++++ test/parser/sql/syntax/all.test.ts | 72 +++ test/parser/sql/syntax/fixtures/alter.sql | 73 +++ test/parser/sql/syntax/fixtures/create.sql | 282 +++++++++ test/parser/sql/syntax/fixtures/drop.sql | 49 ++ test/parser/sql/syntax/fixtures/insert.sql | 80 +++ test/parser/sql/syntax/fixtures/select.sql | 573 ++++++++++++++++++ test/parser/sql/syntax/fixtures/show.sql | 64 ++ test/parser/sql/syntax/fixtures/truncate.sql | 5 + test/parser/sql/syntax/fixtures/update.sql | 7 + test/parser/sql/syntax/fixtures/use.sql | 4 + test/parser/sql/validateInvalidSql.test.ts | 16 + test/parser/sql/visitor.test.ts | 33 + 26 files changed, 2972 insertions(+) create mode 100644 test/parser/sql/contextCollect/entityCollector.test.ts create mode 100644 test/parser/sql/contextCollect/fixtures/common.sql create mode 100644 test/parser/sql/errorListener.test.ts create mode 100644 test/parser/sql/errorStrategy.test.ts create mode 100644 test/parser/sql/lexer.test.ts create mode 100644 test/parser/sql/listener.test.ts create mode 100644 test/parser/sql/suggestion/fixtures/multipleStatement.sql create mode 100644 test/parser/sql/suggestion/fixtures/suggestionWithEntity.sql create mode 100644 test/parser/sql/suggestion/fixtures/syntaxSuggestion.sql create mode 100644 test/parser/sql/suggestion/fixtures/tokenSuggestion.sql create mode 100644 test/parser/sql/suggestion/multipleStatement.test.ts create mode 100644 test/parser/sql/suggestion/suggestionWithEntity.test.ts create mode 100644 test/parser/sql/suggestion/syntaxSuggestion.test.ts create mode 100644 test/parser/sql/suggestion/tokenSuggestion.test.ts create mode 100644 test/parser/sql/syntax/all.test.ts create mode 100644 test/parser/sql/syntax/fixtures/alter.sql create mode 100644 test/parser/sql/syntax/fixtures/create.sql create mode 100644 test/parser/sql/syntax/fixtures/drop.sql create mode 100644 test/parser/sql/syntax/fixtures/insert.sql create mode 100644 test/parser/sql/syntax/fixtures/select.sql create mode 100644 test/parser/sql/syntax/fixtures/show.sql create mode 100644 test/parser/sql/syntax/fixtures/truncate.sql create mode 100644 test/parser/sql/syntax/fixtures/update.sql create mode 100644 test/parser/sql/syntax/fixtures/use.sql create mode 100644 test/parser/sql/validateInvalidSql.test.ts create mode 100644 test/parser/sql/visitor.test.ts diff --git a/test/parser/sql/contextCollect/entityCollector.test.ts b/test/parser/sql/contextCollect/entityCollector.test.ts new file mode 100644 index 00000000..9ca11d6a --- /dev/null +++ b/test/parser/sql/contextCollect/entityCollector.test.ts @@ -0,0 +1,450 @@ +import { ParseTreeListener } from 'antlr4ng'; +import fs from 'fs'; +import path from 'path'; +import { SqlParserListener } from 'src/lib/sql/SqlParserListener'; +import { + AttrName, + isCommonEntityContext, + isFuncEntityContext, + StmtContextType, +} from 'src/parser/common/entityCollector'; +import { EntityContextType } from 'src/parser/common/types'; +import { SqlEntityCollector, Sql, SqlSplitListener } from 'src/parser/sql'; + +const commonSql = fs.readFileSync(path.join(__dirname, 'fixtures', 'common.sql'), 'utf-8'); + +describe('Sql entity collector tests', () => { + const sql = new Sql(); + const parseTree = sql.parse(commonSql); + const splitListener = new SqlSplitListener(); + sql.listen(splitListener as SqlParserListener, parseTree); + + test('validate common sql', () => { + expect(sql.validate(commonSql).length).toBe(0); + }); + + test('split results', () => { + expect(splitListener.statementsContext.length).toBe(12); + }); + + test('create table like', () => { + const columnCreateTableContext = splitListener.statementsContext[0]; + + const collectListener = new SqlEntityCollector(commonSql); + sql.listen(collectListener as ParseTreeListener, columnCreateTableContext); + + const allEntities = collectListener.getEntities(); + + expect(allEntities.length).toBe(2); + + const tableCreateEntity = allEntities[0]; + + expect(tableCreateEntity.entityContextType).toBe(EntityContextType.TABLE_CREATE); + expect(tableCreateEntity.text).toBe('new_tb1'); + expect(tableCreateEntity.position).toEqual({ + startIndex: 27, + endIndex: 33, + line: 1, + startColumn: 28, + endColumn: 35, + }); + + expect(tableCreateEntity.belongStmt.stmtContextType).toBe( + StmtContextType.CREATE_TABLE_STMT + ); + expect(tableCreateEntity.belongStmt.position).toEqual({ + startIndex: 0, + endIndex: 50, + startLine: 1, + endLine: 1, + startColumn: 1, + endColumn: 52, + }); + if (isCommonEntityContext(tableCreateEntity)) { + expect(tableCreateEntity.relatedEntities.length).toBe(1); + + const beLikedEntity = allEntities[1]; + + expect(tableCreateEntity.relatedEntities[0]).toBe(beLikedEntity); + expect(beLikedEntity.text).toBe('like_old_tb'); + expect(beLikedEntity.entityContextType).toBe(EntityContextType.TABLE); + expect(beLikedEntity.belongStmt).toBe(tableCreateEntity.belongStmt); + } + }); + + test('create hive format table', () => { + const columnCreateTableContext = splitListener.statementsContext[1]; + + const collectListener = new SqlEntityCollector(commonSql); + sql.listen(collectListener as ParseTreeListener, columnCreateTableContext); + + const allEntities = collectListener.getEntities(); + + expect(allEntities.length).toBe(1); + + const tableCreateEntity = allEntities[0]; + + expect(tableCreateEntity.entityContextType).toBe(EntityContextType.TABLE_CREATE); + expect(tableCreateEntity.text).toBe('new_tb2'); + expect(tableCreateEntity.position).toEqual({ + startIndex: 67, + endIndex: 73, + line: 3, + startColumn: 14, + endColumn: 21, + }); + expect(tableCreateEntity[AttrName.comment]).toEqual({ + text: "'this is new_tb2 comment'", + startIndex: 283, + endIndex: 307, + line: 9, + startColumn: 13, + endColumn: 38, + }); + + expect(tableCreateEntity.belongStmt.stmtContextType).toBe( + StmtContextType.CREATE_TABLE_STMT + ); + expect(tableCreateEntity.belongStmt.position).toEqual({ + startIndex: 54, + endIndex: 307, + startLine: 3, + endLine: 9, + startColumn: 1, + endColumn: 38, + }); + if (isCommonEntityContext(tableCreateEntity)) { + expect(tableCreateEntity.relatedEntities).toBeNull(); + expect(tableCreateEntity.columns.length).toBe(2); + + tableCreateEntity.columns.forEach((columEntity) => { + expect(columEntity.entityContextType).toBe(EntityContextType.COLUMN_CREATE); + expect(columEntity.belongStmt).toBe(tableCreateEntity.belongStmt); + expect(columEntity.text).toBe( + commonSql.slice( + columEntity.position.startIndex, + columEntity.position.endIndex + 1 + ) + ); + }); + expect(tableCreateEntity.columns[0][AttrName.comment]).toEqual({ + text: "'this is new col1'", + startIndex: 97, + endIndex: 114, + line: 3, + startColumn: 44, + endColumn: 62, + }); + expect(tableCreateEntity.columns[0][AttrName.colType]).toEqual({ + text: 'INT', + startIndex: 85, + endIndex: 87, + line: 3, + startColumn: 32, + endColumn: 35, + }); + expect(tableCreateEntity.columns[1][AttrName.colType]).toEqual({ + text: 'STRING', + startIndex: 126, + endIndex: 131, + line: 3, + startColumn: 73, + endColumn: 79, + }); + } + }); + + test('create data source table', () => { + const testingContext = splitListener.statementsContext[2]; + + const collectListener = new SqlEntityCollector(commonSql); + sql.listen(collectListener as ParseTreeListener, testingContext); + + const allEntities = collectListener.getEntities(); + + expect(allEntities.length).toBe(2); + + const tableCreateEntity = allEntities[0]; + const originTableEntity = allEntities[1]; + + expect(tableCreateEntity.entityContextType).toBe(EntityContextType.TABLE_CREATE); + expect(tableCreateEntity.text).toBe('student_copy'); + expect(tableCreateEntity.belongStmt.stmtContextType).toBe( + StmtContextType.CREATE_TABLE_STMT + ); + if (isCommonEntityContext(tableCreateEntity)) { + expect(tableCreateEntity.columns).toBeUndefined(); + expect(tableCreateEntity.relatedEntities.length).toBe(1); + expect(tableCreateEntity.relatedEntities[0]).toBe(originTableEntity); + } + expect(originTableEntity.entityContextType).toBe(EntityContextType.TABLE); + expect(originTableEntity.text).toBe('student'); + expect(originTableEntity.belongStmt.rootStmt).toBe(tableCreateEntity.belongStmt); + }); + + test('create view', () => { + const testingContext = splitListener.statementsContext[3]; + + const collectListener = new SqlEntityCollector(commonSql); + sql.listen(collectListener as ParseTreeListener, testingContext); + + const allEntities = collectListener.getEntities(); + + expect(allEntities.length).toBe(2); + + const viewEntity = allEntities[0]; + const tableEntity = allEntities[1]; + + expect(viewEntity.entityContextType).toBe(EntityContextType.VIEW_CREATE); + expect(viewEntity.belongStmt.stmtContextType).toBe(StmtContextType.CREATE_VIEW_STMT); + expect(viewEntity.text).toBe('new_view1'); + expect(viewEntity[AttrName.comment]).toEqual({ + text: "'View for experienced employees'", + startIndex: 455, + endIndex: 486, + line: 14, + startColumn: 9, + endColumn: 41, + }); + if (isCommonEntityContext(viewEntity)) { + expect(viewEntity.columns.length).toBe(2); + viewEntity.columns.forEach((columEntity) => { + expect(columEntity.entityContextType).toBe(EntityContextType.COLUMN_CREATE); + expect(columEntity.belongStmt).toBe(viewEntity.belongStmt); + expect(columEntity.text).toBe( + commonSql.slice( + columEntity.position.startIndex, + columEntity.position.endIndex + 1 + ) + ); + }); + expect(viewEntity.columns[0][AttrName.comment]).toEqual({ + text: "'Unique identification number'", + startIndex: 408, + endIndex: 437, + line: 13, + startColumn: 35, + endColumn: 65, + }); + } + expect(tableEntity.entityContextType).toBe(EntityContextType.TABLE); + expect(tableEntity.belongStmt.stmtContextType).toBe(StmtContextType.SELECT_STMT); + expect(tableEntity.belongStmt.rootStmt).toBe(viewEntity.belongStmt); + expect(tableEntity.text).toBe('old_tb_1'); + }); + + test('select from table', () => { + const testingContext = splitListener.statementsContext[4]; + + const collectListener = new SqlEntityCollector(commonSql); + sql.listen(collectListener as ParseTreeListener, testingContext); + + const allEntities = collectListener.getEntities(); + + expect(allEntities.length).toBe(2); + + const tableEntity1 = allEntities[0]; + const tableEntity2 = allEntities[1]; + + expect(tableEntity1.entityContextType).toBe(EntityContextType.TABLE); + expect(tableEntity1.belongStmt.stmtContextType).toBe(StmtContextType.SELECT_STMT); + expect(tableEntity1.text).toBe('employee'); + expect(tableEntity1[AttrName.alias]).toEqual({ + text: 'em', + startIndex: 602, + endIndex: 603, + line: 17, + startColumn: 55, + endColumn: 57, + }); + + expect(tableEntity2.entityContextType).toBe(EntityContextType.TABLE); + expect(tableEntity2.belongStmt.stmtContextType).toBe(StmtContextType.SELECT_STMT); + expect(tableEntity2.text).toBe('department'); + expect(tableEntity2[AttrName.alias]).toEqual({ + text: 'dept', + startIndex: 630, + endIndex: 633, + line: 17, + startColumn: 83, + endColumn: 87, + }); + }); + + test('insert into table values', () => { + const testingContext = splitListener.statementsContext[5]; + + const collectListener = new SqlEntityCollector(commonSql); + sql.listen(collectListener as ParseTreeListener, testingContext); + + const allEntities = collectListener.getEntities(); + + expect(allEntities.length).toBe(1); + + const tableEntity = allEntities[0]; + + expect(tableEntity.entityContextType).toBe(EntityContextType.TABLE); + expect(tableEntity.belongStmt.stmtContextType).toBe(StmtContextType.INSERT_STMT); + expect(tableEntity.text).toBe('insert_tb'); + }); + + test('insert overwrite table', () => { + const testingContext = splitListener.statementsContext[6]; + + const collectListener = new SqlEntityCollector(commonSql); + sql.listen(collectListener as ParseTreeListener, testingContext); + + const allEntities = collectListener.getEntities(); + + expect(allEntities.length).toBe(2); + + const targetTableEntity = allEntities[0]; + const sourceTableEntity = allEntities[1]; + + expect(targetTableEntity.entityContextType).toBe(EntityContextType.TABLE); + expect(targetTableEntity.belongStmt.stmtContextType).toBe(StmtContextType.INSERT_STMT); + expect(targetTableEntity.text).toBe('target_tb'); + + expect(sourceTableEntity.entityContextType).toBe(EntityContextType.TABLE); + expect(sourceTableEntity.belongStmt.stmtContextType).toBe(StmtContextType.SELECT_STMT); + expect(sourceTableEntity.belongStmt.rootStmt).toBe(targetTableEntity.belongStmt); + expect(sourceTableEntity.text).toBe('source_tb'); + }); + + test('insert overwrite dir', () => { + const testingContext = splitListener.statementsContext[7]; + + const collectListener = new SqlEntityCollector(commonSql); + sql.listen(collectListener as ParseTreeListener, testingContext); + + const allEntities = collectListener.getEntities(); + expect(allEntities.length).toBe(1); + + const sourceTableEntity = allEntities[0]; + + expect(sourceTableEntity.entityContextType).toBe(EntityContextType.TABLE); + expect(sourceTableEntity.belongStmt.stmtContextType).toBe(StmtContextType.SELECT_STMT); + expect(sourceTableEntity.text).toBe('from_tb'); + }); + + test('create database', () => { + const testingContext = splitListener.statementsContext[8]; + + const collectListener = new SqlEntityCollector(commonSql); + sql.listen(collectListener as ParseTreeListener, testingContext); + + const allEntities = collectListener.getEntities(); + expect(allEntities.length).toBe(1); + + const sourceTableEntity = allEntities[0]; + + expect(sourceTableEntity.entityContextType).toBe(EntityContextType.DATABASE_CREATE); + expect(sourceTableEntity.belongStmt.stmtContextType).toBe( + StmtContextType.CREATE_DATABASE_STMT + ); + expect(sourceTableEntity.text).toBe('customer_db'); + expect(sourceTableEntity[AttrName.comment]).toEqual({ + text: "'this is database comment'", + startIndex: 928, + endIndex: 953, + line: 25, + startColumn: 51, + endColumn: 77, + }); + }); + + test('use namespace', () => { + const testingContext = splitListener.statementsContext[9]; + + const collectListener = new SqlEntityCollector(commonSql); + sql.listen(collectListener as ParseTreeListener, testingContext); + + const allEntities = collectListener.getEntities(); + expect(allEntities.length).toBe(1); + + const sourceTableEntity = allEntities[0]; + + expect(sourceTableEntity.entityContextType).toBe(EntityContextType.DATABASE); + expect(sourceTableEntity.belongStmt.stmtContextType).toBe(StmtContextType.COMMON_STMT); + expect(sourceTableEntity.text).toBe('ns1'); + }); + + test('create function', () => { + const functionContext = splitListener.statementsContext[10]; + + const collectListener = new SqlEntityCollector(commonSql); + sql.listen(collectListener as ParseTreeListener, functionContext); + + const allEntities = collectListener.getEntities(); + + expect(allEntities.length).toBe(1); + + const functionEntity = allEntities[0]; + + expect(functionEntity.entityContextType).toBe(EntityContextType.FUNCTION_CREATE); + expect(functionEntity.text).toBe('simple_udf'); + expect(functionEntity.position).toEqual({ + endColumn: 38, + endIndex: 1013, + line: 29, + startColumn: 28, + startIndex: 1004, + }); + + expect(functionEntity.belongStmt.stmtContextType).toBe( + StmtContextType.CREATE_FUNCTION_STMT + ); + expect(functionEntity.belongStmt.position).toEqual({ + endColumn: 54, + endIndex: 1029, + endLine: 29, + startColumn: 1, + startIndex: 977, + startLine: 29, + }); + + if (isFuncEntityContext(functionEntity)) { + expect(functionEntity.arguments).toBeNull(); + expect(functionEntity.relatedEntities).toBeNull(); + } + }); + + test('create xxx function', () => { + const functionContext = splitListener.statementsContext[11]; + + const collectListener = new SqlEntityCollector(commonSql); + sql.listen(collectListener as ParseTreeListener, functionContext); + + const allEntities = collectListener.getEntities(); + + expect(allEntities.length).toBe(1); + + const functionEntity = allEntities[0]; + + expect(functionEntity.entityContextType).toBe(EntityContextType.FUNCTION_CREATE); + expect(functionEntity.text).toBe('simple_udf'); + expect(functionEntity.position).toEqual({ + endColumn: 27, + endIndex: 1058, + line: 31, + startColumn: 17, + startIndex: 1049, + }); + + expect(functionEntity.belongStmt.stmtContextType).toBe( + StmtContextType.CREATE_FUNCTION_STMT + ); + expect(functionEntity.belongStmt.position).toEqual({ + endColumn: 43, + endIndex: 1074, + endLine: 31, + startColumn: 1, + startIndex: 1033, + startLine: 31, + }); + if (isFuncEntityContext(functionEntity)) { + expect(functionEntity.arguments).toBeNull(); + expect(functionEntity.relatedEntities).toBeNull(); + } + }); +}); diff --git a/test/parser/sql/contextCollect/fixtures/common.sql b/test/parser/sql/contextCollect/fixtures/common.sql new file mode 100644 index 00000000..e3bbd97b --- /dev/null +++ b/test/parser/sql/contextCollect/fixtures/common.sql @@ -0,0 +1,31 @@ +CREATE TABLE IF NOT EXISTS new_tb1 like like_old_tb; + +CREATE TABLE new_tb2 (new_col1 INT COMMENT 'this is new col1', new_col2 STRING) + PARTITIONED BY (YEAR STRING) + CLUSTERED BY (new_col1, NAME) + SORTED BY (new_col1 ASC) + INTO 3 BUCKETS + STORED AS PARQUET + COMMENT 'this is new_tb2 comment'; + +CREATE TABLE student_copy USING CSV AS SELECT * FROM student; + +CREATE VIEW new_view1 (ID COMMENT 'Unique identification number', Name) +COMMENT 'View for experienced employees' +AS SELECT id, name FROM old_tb_1 WHERE working_years > 5; + +SELECT id, name, em.deptno, deptname FROM employee AS em CROSS JOIN department AS dept; + +INSERT INTO insert_tb (address, name, student_id) VALUES ('Hangzhou, China', 'Kent Yao', 11215016); + +INSERT OVERWRITE target_tb TABLE source_tb; + +INSERT OVERWRITE DIRECTORY '/path/to/output/directory' SELECT * FROM from_tb WHERE condition; + +CREATE DATABASE IF NOT EXISTS customer_db COMMENT 'this is database comment'; + +USE NAMESPACE ns1; + +CREATE OR REPLACE FUNCTION simple_udf AS 'SimpleUdfR'; + +CREATE FUNCTION simple_udf AS 'SimpleUdfR'; \ No newline at end of file diff --git a/test/parser/sql/errorListener.test.ts b/test/parser/sql/errorListener.test.ts new file mode 100644 index 00000000..60862059 --- /dev/null +++ b/test/parser/sql/errorListener.test.ts @@ -0,0 +1,67 @@ +import { Sql } from 'src/parser/sql'; + +const randomText = `dhsdansdnkla ndjnsla ndnalks`; +const sql1 = `ALTER VIEW`; +const sql2 = `SELECT * FROM `; +const sql3 = `DROP SCHEMA aaa aaa`; + +describe('Sql validate invalid sql and test msg', () => { + const sql = new Sql(); + + test('validate random text', () => { + const errors = sql.validate(randomText); + expect(errors.length).toBe(1); + expect(errors[0].message).toBe( + `'dhsdansdnkla' is not valid at this position, expecting a keyword` + ); + }); + + test('validate unComplete sql1', () => { + const errors = sql.validate(sql1); + expect(errors.length).toBe(1); + expect(errors[0].message).toBe('Statement is incomplete'); + }); + + test('validate unComplete sql2', () => { + const errors = sql.validate(sql2); + expect(errors.length).toBe(1); + expect(errors[0].message).toBe( + 'Statement is incomplete, expecting an existing table or an existing view or an existing function or a keyword' + ); + }); + + test('validate unComplete sql3', () => { + const errors = sql.validate(sql3); + expect(errors.length).toBe(1); + expect(errors[0].message).toBe( + `'aaa' is not valid at this position, expecting an existing namespace or a keyword` + ); + }); + + test('validate random text cn', () => { + sql.locale = 'zh_CN'; + const errors = sql.validate(randomText); + expect(errors.length).toBe(1); + expect(errors[0].message).toBe(`'dhsdansdnkla' 在此位置无效,期望一个关键字`); + }); + + test('validate unComplete sql1 cn', () => { + const errors = sql.validate(sql1); + expect(errors.length).toBe(1); + expect(errors[0].message).toBe('语句不完整'); + }); + + test('validate unComplete sql2 cn', () => { + const errors = sql.validate(sql2); + expect(errors.length).toBe(1); + expect(errors[0].message).toBe( + '语句不完整,期望一个存在的table或者一个存在的view或者一个存在的function或者一个关键字' + ); + }); + + test('validate unComplete sql3 cn', () => { + const errors = sql.validate(sql3); + expect(errors.length).toBe(1); + expect(errors[0].message).toBe(`'aaa' 在此位置无效,期望一个存在的namespace或者一个关键字`); + }); +}); diff --git a/test/parser/sql/errorStrategy.test.ts b/test/parser/sql/errorStrategy.test.ts new file mode 100644 index 00000000..7ee8f851 --- /dev/null +++ b/test/parser/sql/errorStrategy.test.ts @@ -0,0 +1,64 @@ +import { Sql, SqlSplitListener } from 'src/parser/sql'; +import { SqlParserListener } from 'src/lib/sql/SqlParserListener'; + +const validSQL1 = `INSERT INTO country_page_view +VALUES ('Chinese', 'mumiao', 18), + ('Amercian', 'georage', 22);`; +const validSQL2 = 'SELECT * FROM tb;'; +const inValidSQL = 'CREATE TABLE;'; + +describe('Sql ErrorStrategy test', () => { + const sql = new Sql(); + + // TODO: handle unexpected case + // test('begin inValid', () => { + // const sqlText = [inValidSQL, validSQL1, validSQL2].join('\n'); + // // parse with empty errorListener + // const parseTree = sql.parse(sqlText, () => {}); + // const splitListener = new SqlSplitListener(); + // sql.listen(splitListener as SqlParserListener, parseTree); + + // const statementCount = splitListener.statementsContext.length; + // splitListener.statementsContext.map((item, index) => { + // if (index !== statementCount - 1 && index !== statementCount - 2) { + // expect(item.exception).not.toBe(null); + // } else { + // expect(item.exception).toBe(null); + // } + // }); + // }); + + // TODO: handle unexpected case + // test('middle inValid', () => { + // const sqlText = [validSQL1, inValidSQL, validSQL2].join('\n'); + // // parse with empty errorListener + // const parseTree = sql.parse(sqlText, () => {}); + // const splitListener = new SqlSplitListener(); + // sql.listen(splitListener as SqlParserListener, parseTree); + + // const statementCount = splitListener.statementsContext.length; + // splitListener.statementsContext.map((item, index) => { + // if (index !== statementCount - 1 && index !== 0) { + // expect(item.exception).not.toBe(null); + // } else { + // expect(item.exception).toBe(null); + // } + // }); + // }); + + test('end inValid', () => { + const sqlText = [validSQL1, validSQL2, inValidSQL].join('\n'); + // parse with empty errorListener + const parseTree = sql.parse(sqlText, () => {}); + const splitListener = new SqlSplitListener(); + sql.listen(splitListener as SqlParserListener, parseTree); + + splitListener.statementsContext.map((item, index) => { + if (index !== 0 && index !== 1) { + expect(item.exception).not.toBe(null); + } else { + expect(item.exception).toBe(null); + } + }); + }); +}); diff --git a/test/parser/sql/lexer.test.ts b/test/parser/sql/lexer.test.ts new file mode 100644 index 00000000..fc566a83 --- /dev/null +++ b/test/parser/sql/lexer.test.ts @@ -0,0 +1,17 @@ +import { Sql } from 'src/parser/sql'; + +describe('Sql Lexer tests', () => { + const sql = new Sql(); + + test('select id,name from user1;', () => { + const sqlText = `select id,name from user1;`; + const tokens = sql.getAllTokens(sqlText); + expect(tokens.length).toBe(10); + }); + + test('SELECT * FROM t WHERE x = 1 AND y = 2;', () => { + const sqlText = `SELECT * FROM t WHERE x = 1 AND y = 2;`; + const tokens = sql.getAllTokens(sqlText); + expect(tokens.length).toBe(24); + }); +}); diff --git a/test/parser/sql/listener.test.ts b/test/parser/sql/listener.test.ts new file mode 100644 index 00000000..2da46474 --- /dev/null +++ b/test/parser/sql/listener.test.ts @@ -0,0 +1,63 @@ +import { Sql } from 'src/parser/sql'; +import { SqlParserListener } from 'src/lib/sql/SqlParserListener'; + +describe('Sql Listener Tests', () => { + const expectTableName = 'user1'; + const sqlText = `select id,name,sex from ${expectTableName};`; + const sql = new Sql(); + + const parseTree = sql.parse(sqlText); + + test('Listener exitTableName', () => { + class MyListener extends SqlParserListener { + result = ''; + exitTableName = (ctx): void => { + this.result = ctx.getText().toLowerCase(); + }; + } + const listener = new MyListener(); + + sql.listen(listener, parseTree); + expect(listener.result).toBe(expectTableName); + }); + + test('Split sql listener', async () => { + const singleStatementArr = [ + `SELECT /*+ REPARTITION(zip_code) */ name, age, zip_code FROM person SORT BY name ASC, age DESC;`, + + `INSERT INTO students FROM applicants SELECT name, address, student_id WHERE qualified = true;`, + + `CREATE TABLE student_bucket + USING parquet + CLUSTERED BY (id) INTO 4 buckets ( + WITH tmpTable AS ( + SELECT * FROM student WHERE id > 100 + ) + SELECT * FROM tmpTable + );`, + ]; + + const sqlText = singleStatementArr.join('\n'); + const sqlSlices = sql.splitSQLByStatement(sqlText); + + expect(sqlSlices).not.toBeNull(); + + // check text in result + expect(sqlSlices.map((item) => item.text)).toEqual(singleStatementArr); + + // check startIndex and endIndex in result + sqlSlices.forEach((slice, index) => { + expect(sqlText.slice(slice.startIndex, slice.endIndex + 1)).toBe( + singleStatementArr[index] + ); + }); + + // check lineNumber in result + expect(sqlSlices[0].startLine).toBe(1); + expect(sqlSlices[0].endLine).toBe(1); + expect(sqlSlices[1].startLine).toBe(2); + expect(sqlSlices[1].endLine).toBe(2); + expect(sqlSlices[2].startLine).toBe(3); + expect(sqlSlices[2].endLine).toBe(10); + }); +}); diff --git a/test/parser/sql/suggestion/fixtures/multipleStatement.sql b/test/parser/sql/suggestion/fixtures/multipleStatement.sql new file mode 100644 index 00000000..b794f302 --- /dev/null +++ b/test/parser/sql/suggestion/fixtures/multipleStatement.sql @@ -0,0 +1,21 @@ +CREATE TABLE VALUES -- unfinished + +CREATE TABLE student (id INT, name STRING, age INT) STORED AS ORC; + +CREATE SCHEMA customer_db WITH DBPROPERTIES (ID=001, Name='John'); + +ALTER TABLE StudentInfo ADD COLUMNS (LastName string, DOB timestamp); + +SELECT * FROM db. ; -- unfinished + +INSERT INTO weather (date, city, temp_hi, temp_lo) VALUES ('1994-11-29', 'Hayward', 54, 37); + +DESC EXTENDED students name; + +INSERT INTO weather (date, city, temp_hi, temp_lo) VALUES ('1994-11-29', 'Hayward', 54, 37); -- unfinished + +DROP TABLE IF EXISTS employable; + +DROP TEMPORARY FUNCTION test_avg; + +INSERT INTO products (product_no, name, price) SELECT * FROM db. ; -- unfinished \ No newline at end of file diff --git a/test/parser/sql/suggestion/fixtures/suggestionWithEntity.sql b/test/parser/sql/suggestion/fixtures/suggestionWithEntity.sql new file mode 100644 index 00000000..6bbbf123 --- /dev/null +++ b/test/parser/sql/suggestion/fixtures/suggestionWithEntity.sql @@ -0,0 +1,11 @@ +SELECT FROM my_db.tb; + +SELECT name, calculate_age(birthdate) AS age, FROM students; + +INSERT INTO insert_tb SELECT FROM from_tb; + +INSERT INTO insert_tb SELECT id, age, FROM from_tb; + +CREATE TABLE sorted_census_data AS SELECT FROM unsorted_census_data; + +CREATE TABLE sorted_census_data AS SELECT id, age, FROM unsorted_census_data; \ No newline at end of file diff --git a/test/parser/sql/suggestion/fixtures/syntaxSuggestion.sql b/test/parser/sql/suggestion/fixtures/syntaxSuggestion.sql new file mode 100644 index 00000000..93bcad60 --- /dev/null +++ b/test/parser/sql/suggestion/fixtures/syntaxSuggestion.sql @@ -0,0 +1,55 @@ +INSERT INTO db.tb ; + +SELECT * FROM db.; + +CREATE TABLE db. VALUES; + +DROP TABLE IF EXISTS db.a; + +CREATE OR REPLACE VIEW db.v; + +DROP VIEW db.v ; + +CREATE FUNCTION fn1; + +SELECT name, calculate_age(birthday) AS age FROM students; + +CREATE DATABASE db; + +DROP SCHEMA IF EXISTS sch; + +ANALYZE TABLE students COMPUTE STATISTICS FOR COLUMNS name, co ; + +ALTER TABLE StudentInfo ADD COLUMNS (LastName string, ); + +ALTER TABLE StudentInfo RENAME COLUMN ; + +ALTER TABLE StudentInfo RENAME COLUMN name TO t; + +ALTER TABLE StudentInfo DROP COLUMNS (LastName, ); + +ALTER TABLE StudentInfo CHANGE FirstName; + +INSERT INTO students ( ); + +INSERT INTO students ( id, n ); + +SELECT ; + +SELECT id, n; + +SELECT FROM tbl; + +SELECT id, n FROM tbl; + +SELECT id, n FROM tbl GROUP BY ; + +SELECT id, n FROM tbl ORDER BY name, i ; + +SELECT id FROM tb1 GROUP BY ROLLUP( ); + +OPTIMIZE db.tb; + +OPTIMIZE db.tb ZORDER BY ; + +OPTIMIZE db.tb ZORDER BY name, i; diff --git a/test/parser/sql/suggestion/fixtures/tokenSuggestion.sql b/test/parser/sql/suggestion/fixtures/tokenSuggestion.sql new file mode 100644 index 00000000..64741647 --- /dev/null +++ b/test/parser/sql/suggestion/fixtures/tokenSuggestion.sql @@ -0,0 +1,18 @@ +ALTER +; +CREATE +; +DELETE +; +DESCRIBE +; +DROP +; +INSERT +; +LOAD +; +SHOW +; +EXPORT +; diff --git a/test/parser/sql/suggestion/multipleStatement.test.ts b/test/parser/sql/suggestion/multipleStatement.test.ts new file mode 100644 index 00000000..83e9de75 --- /dev/null +++ b/test/parser/sql/suggestion/multipleStatement.test.ts @@ -0,0 +1,69 @@ +import fs from 'fs'; +import path from 'path'; +import { Sql } from 'src/parser/sql'; +import { CaretPosition, EntityContextType } from 'src/parser/common/types'; + +const syntaxSql = fs.readFileSync( + path.join(__dirname, 'fixtures', 'multipleStatement.sql'), + 'utf-8' +); + +describe('Sql Multiple Statements Syntax Suggestion', () => { + const sql = new Sql(); + + test('Create table ', () => { + const pos: CaretPosition = { + lineNumber: 1, + column: 14, + }; + const syntaxes = sql.getSuggestionAtCaretPosition(syntaxSql, pos)?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.TABLE_CREATE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + }); + + test('Select from table', () => { + const pos: CaretPosition = { + lineNumber: 9, + column: 18, + }; + const syntaxes = sql.getSuggestionAtCaretPosition(syntaxSql, pos)?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.TABLE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['db', '.']); + }); + + test('Insert into table ', () => { + const pos: CaretPosition = { + lineNumber: 15, + column: 13, + }; + const syntaxes = sql.getSuggestionAtCaretPosition(syntaxSql, pos)?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.TABLE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + }); + + test('Insert into select from table ', () => { + const pos: CaretPosition = { + lineNumber: 21, + column: 65, + }; + const syntaxes = sql.getSuggestionAtCaretPosition(syntaxSql, pos)?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.TABLE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['db', '.']); + }); +}); diff --git a/test/parser/sql/suggestion/suggestionWithEntity.test.ts b/test/parser/sql/suggestion/suggestionWithEntity.test.ts new file mode 100644 index 00000000..ae1cac49 --- /dev/null +++ b/test/parser/sql/suggestion/suggestionWithEntity.test.ts @@ -0,0 +1,156 @@ +import fs from 'fs'; +import path from 'path'; +import { Sql } from 'src/parser/sql'; +import { CaretPosition, EntityContextType } from 'src/parser/common/types'; +import { commentOtherLine } from 'test/helper'; + +const syntaxSql = fs.readFileSync( + path.join(__dirname, 'fixtures', 'suggestionWithEntity.sql'), + 'utf-8' +); + +describe('Sql Syntax Suggestion with collect entity', () => { + const sql = new Sql(); + + test('select with no column', () => { + const pos: CaretPosition = { + lineNumber: 1, + column: 8, + }; + const sqlText = commentOtherLine(syntaxSql, pos.lineNumber); + + const syntaxes = sql.getSuggestionAtCaretPosition(sqlText, pos)?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + + const entities = sql.getAllEntities(sqlText, pos); + expect(entities.length).toBe(1); + expect(entities[0].text).toBe('my_db.tb'); + expect(entities[0].entityContextType).toBe(EntityContextType.TABLE); + expect(entities[0].belongStmt.isContainCaret).toBeTruthy(); + }); + + test('select with columns with trailing comma', () => { + const pos: CaretPosition = { + lineNumber: 3, + column: 47, + }; + const sqlText = commentOtherLine(syntaxSql, pos.lineNumber); + + const syntaxes = sql.getSuggestionAtCaretPosition(sqlText, pos)?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + + const entities = sql.getAllEntities(sqlText, pos); + expect(entities.length).toBe(1); + expect(entities[0].text).toBe('students'); + expect(entities[0].entityContextType).toBe(EntityContextType.TABLE); + expect(entities[0].belongStmt.isContainCaret).toBeTruthy(); + }); + + test('insert into table as select with no column', () => { + const pos: CaretPosition = { + lineNumber: 5, + column: 30, + }; + const sqlText = commentOtherLine(syntaxSql, pos.lineNumber); + + const syntaxes = sql.getSuggestionAtCaretPosition(sqlText, pos)?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + + const entities = sql.getAllEntities(sqlText, pos); + expect(entities.length).toBe(2); + expect(entities[0].text).toBe('insert_tb'); + expect(entities[0].entityContextType).toBe(EntityContextType.TABLE); + expect(entities[0].belongStmt.isContainCaret).toBeTruthy(); + + expect(entities[1].text).toBe('from_tb'); + expect(entities[1].entityContextType).toBe(EntityContextType.TABLE); + expect(entities[1].belongStmt.isContainCaret).toBeTruthy(); + }); + + test('insert into table as select with trailing comma', () => { + const pos: CaretPosition = { + lineNumber: 7, + column: 39, + }; + const sqlText = commentOtherLine(syntaxSql, pos.lineNumber); + + const syntaxes = sql.getSuggestionAtCaretPosition(sqlText, pos)?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + + const entities = sql.getAllEntities(sqlText, pos); + expect(entities.length).toBe(2); + expect(entities[0].text).toBe('insert_tb'); + expect(entities[0].entityContextType).toBe(EntityContextType.TABLE); + expect(entities[0].belongStmt.isContainCaret).toBeTruthy(); + + expect(entities[1].text).toBe('from_tb'); + expect(entities[1].entityContextType).toBe(EntityContextType.TABLE); + expect(entities[1].belongStmt.isContainCaret).toBeTruthy(); + }); + + test('create table as select with no column', () => { + const pos: CaretPosition = { + lineNumber: 9, + column: 43, + }; + const sqlText = commentOtherLine(syntaxSql, pos.lineNumber); + + const syntaxes = sql.getSuggestionAtCaretPosition(sqlText, pos)?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + + const entities = sql.getAllEntities(sqlText, pos); + expect(entities.length).toBe(2); + expect(entities[0].text).toBe('sorted_census_data'); + expect(entities[0].entityContextType).toBe(EntityContextType.TABLE_CREATE); + expect(entities[0].belongStmt.isContainCaret).toBeTruthy(); + + expect(entities[1].text).toBe('unsorted_census_data'); + expect(entities[1].entityContextType).toBe(EntityContextType.TABLE); + expect(entities[1].belongStmt.isContainCaret).toBeTruthy(); + }); + + test('create table as select with trailing comma', () => { + const pos: CaretPosition = { + lineNumber: 11, + column: 52, + }; + const sqlText = commentOtherLine(syntaxSql, pos.lineNumber); + + const syntaxes = sql.getSuggestionAtCaretPosition(sqlText, pos)?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + + const entities = sql.getAllEntities(sqlText, pos); + expect(entities.length).toBe(2); + expect(entities[0].text).toBe('sorted_census_data'); + expect(entities[0].entityContextType).toBe(EntityContextType.TABLE_CREATE); + expect(entities[0].belongStmt.isContainCaret).toBeTruthy(); + + expect(entities[1].text).toBe('unsorted_census_data'); + expect(entities[1].entityContextType).toBe(EntityContextType.TABLE); + expect(entities[1].belongStmt.isContainCaret).toBeTruthy(); + }); +}); diff --git a/test/parser/sql/suggestion/syntaxSuggestion.test.ts b/test/parser/sql/suggestion/syntaxSuggestion.test.ts new file mode 100644 index 00000000..7ca8f252 --- /dev/null +++ b/test/parser/sql/suggestion/syntaxSuggestion.test.ts @@ -0,0 +1,496 @@ +import fs from 'fs'; +import path from 'path'; +import { Sql } from 'src/parser/sql'; +import { CaretPosition, EntityContextType } from 'src/parser/common/types'; +import { commentOtherLine } from 'test/helper'; + +const syntaxSql = fs.readFileSync( + path.join(__dirname, 'fixtures', 'syntaxSuggestion.sql'), + 'utf-8' +); + +describe('Sql Syntax Suggestion', () => { + const sql = new Sql(); + + test('Validate Syntax SQL', () => { + expect(sql.validate(syntaxSql).length).not.toBe(0); + expect(sql.validate(syntaxSql).length).not.toBe(0); + expect(sql.validate(syntaxSql).length).not.toBe(0); + }); + + test('Insert table ', () => { + const pos: CaretPosition = { + lineNumber: 1, + column: 18, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.TABLE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['db', '.', 'tb']); + }); + + test('Select table ', () => { + const pos: CaretPosition = { + lineNumber: 3, + column: 18, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.TABLE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['db', '.']); + }); + + test('Create table ', () => { + const pos: CaretPosition = { + lineNumber: 5, + column: 17, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.TABLE_CREATE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['db', '.']); + }); + + test('DROP table ', () => { + const pos: CaretPosition = { + lineNumber: 7, + column: 26, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.TABLE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['db', '.', 'a']); + }); + + test('Create view ', () => { + const pos: CaretPosition = { + lineNumber: 9, + column: 28, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.VIEW_CREATE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['db', '.', 'v']); + }); + + test('Drop view ', () => { + const pos: CaretPosition = { + lineNumber: 11, + column: 15, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.VIEW + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['db', '.', 'v']); + }); + + test('Create function ', () => { + const pos: CaretPosition = { + lineNumber: 13, + column: 20, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.FUNCTION_CREATE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['fn1']); + }); + + test('Use function', () => { + const pos: CaretPosition = { + lineNumber: 15, + column: 27, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.FUNCTION + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['calculate_age']); + }); + + test('Create database', () => { + const pos: CaretPosition = { + lineNumber: 17, + column: 19, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.DATABASE_CREATE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['db']); + }); + + test('Drop database', () => { + const pos: CaretPosition = { + lineNumber: 19, + column: 26, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.DATABASE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['sch']); + }); + + test('ANALYZE table for columns', () => { + const pos: CaretPosition = { + lineNumber: 21, + column: 63, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['co']); + }); + + test('Alter table add columns', () => { + const pos: CaretPosition = { + lineNumber: 23, + column: 55, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN_CREATE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + }); + + test('Alter table rename columns', () => { + const pos: CaretPosition = { + lineNumber: 25, + column: 39, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + }); + + test('Alter table rename columns to', () => { + const pos: CaretPosition = { + lineNumber: 27, + column: 48, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN_CREATE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['t']); + }); + + test('Alter table drop columns', () => { + const pos: CaretPosition = { + lineNumber: 29, + column: 49, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + }); + + test('Alter table change columns', () => { + const pos: CaretPosition = { + lineNumber: 31, + column: 41, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['FirstName']); + }); + + test('Insert into table spec columns', () => { + const pos: CaretPosition = { + lineNumber: 33, + column: 24, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + }); + + test('Insert into table spec columns2', () => { + const pos: CaretPosition = { + lineNumber: 35, + column: 29, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['n']); + }); + + test('Select columns case empty', () => { + const pos: CaretPosition = { + lineNumber: 37, + column: 8, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + }); + + test('Select columns case seq', () => { + const pos: CaretPosition = { + lineNumber: 39, + column: 13, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['n']); + }); + + test('Select columns from table case empty', () => { + const pos: CaretPosition = { + lineNumber: 41, + column: 8, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + }); + + test('Select columns from table case seq', () => { + const pos: CaretPosition = { + lineNumber: 43, + column: 13, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['n']); + }); + + test('Select group by', () => { + const pos: CaretPosition = { + lineNumber: 45, + column: 32, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + }); + + test('Select group by', () => { + const pos: CaretPosition = { + lineNumber: 47, + column: 39, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['i']); + }); + + test('Select group by rollup', () => { + const pos: CaretPosition = { + lineNumber: 49, + column: 37, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + }); + + test('Optimize table', () => { + const pos: CaretPosition = { + lineNumber: 51, + column: 15, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.TABLE + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['db', '.', 'tb']); + }); + + test('Optimize table zorder by empty', () => { + const pos: CaretPosition = { + lineNumber: 53, + column: 26, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual([]); + }); + + test('Optimize table zorder by columns', () => { + const pos: CaretPosition = { + lineNumber: 55, + column: 33, + }; + const syntaxes = sql.getSuggestionAtCaretPosition( + commentOtherLine(syntaxSql, pos.lineNumber), + pos + )?.syntax; + const suggestion = syntaxes?.find( + (syn) => syn.syntaxContextType === EntityContextType.COLUMN + ); + + expect(suggestion).not.toBeUndefined(); + expect(suggestion?.wordRanges.map((token) => token.text)).toEqual(['i']); + }); +}); diff --git a/test/parser/sql/suggestion/tokenSuggestion.test.ts b/test/parser/sql/suggestion/tokenSuggestion.test.ts new file mode 100644 index 00000000..afea67ea --- /dev/null +++ b/test/parser/sql/suggestion/tokenSuggestion.test.ts @@ -0,0 +1,196 @@ +import fs from 'fs'; +import path from 'path'; +import { Sql } from 'src/parser/sql'; +import { CaretPosition } from 'src/parser/common/types'; +import { commentOtherLine } from 'test/helper'; + +const tokenSql = fs.readFileSync(path.join(__dirname, 'fixtures', 'tokenSuggestion.sql'), 'utf-8'); + +describe('Sql Token Suggestion', () => { + const sql = new Sql(); + + test('After ALTER', () => { + const pos: CaretPosition = { + lineNumber: 1, + column: 7, + }; + const suggestion = sql.getSuggestionAtCaretPosition( + commentOtherLine(tokenSql, pos.lineNumber), + pos + )?.keywords; + + expect(suggestion).toMatchUnorderedArray([ + 'TABLE', + 'INDEX', + 'VIEW', + 'DATABASE', + 'NAMESPACE', + 'SCHEMA', + ]); + }); + + test('After CREATE', () => { + const pos: CaretPosition = { + lineNumber: 3, + column: 8, + }; + const suggestion = sql.getSuggestionAtCaretPosition( + commentOtherLine(tokenSql, pos.lineNumber), + pos + )?.keywords; + + expect(suggestion).toMatchUnorderedArray([ + 'TEMPORARY', + 'INDEX', + 'ROLE', + 'FUNCTION', + 'OR', + 'GLOBAL', + 'VIEW', + 'TABLE', + 'EXTERNAL', + 'DATABASE', + 'NAMESPACE', + 'SCHEMA', + ]); + }); + + test('After DELETE', () => { + const pos: CaretPosition = { + lineNumber: 5, + column: 8, + }; + const suggestion = sql.getSuggestionAtCaretPosition( + commentOtherLine(tokenSql, pos.lineNumber), + pos + )?.keywords; + + expect(suggestion).toMatchUnorderedArray(['FROM']); + }); + + test('After DESCRIBE', () => { + const pos: CaretPosition = { + lineNumber: 7, + column: 10, + }; + const suggestion = sql.getSuggestionAtCaretPosition( + commentOtherLine(tokenSql, pos.lineNumber), + pos + )?.keywords; + + expect(suggestion).toMatchUnorderedArray([ + 'WITH', + 'SELECT', + 'MAP', + 'REDUCE', + 'FROM', + 'TABLE', + 'VALUES', + 'QUERY', + 'EXTENDED', + 'FORMATTED', + 'DATABASE', + 'FUNCTION', + ]); + }); + + test('After DROP', () => { + const pos: CaretPosition = { + lineNumber: 9, + column: 6, + }; + const suggestion = sql.getSuggestionAtCaretPosition( + commentOtherLine(tokenSql, pos.lineNumber), + pos + )?.keywords; + + expect(suggestion).toMatchUnorderedArray([ + 'TEMPORARY', + 'INDEX', + 'ROLE', + 'FUNCTION', + 'VIEW', + 'TABLE', + 'DATABASE', + 'NAMESPACE', + 'SCHEMA', + ]); + }); + + test('After INSERT', () => { + const pos: CaretPosition = { + lineNumber: 11, + column: 8, + }; + const suggestion = sql.getSuggestionAtCaretPosition( + commentOtherLine(tokenSql, pos.lineNumber), + pos + )?.keywords; + + expect(suggestion).toMatchUnorderedArray(['OVERWRITE', 'INTO']); + }); + + test('After LOAD', () => { + const pos: CaretPosition = { + lineNumber: 13, + column: 6, + }; + const suggestion = sql.getSuggestionAtCaretPosition( + commentOtherLine(tokenSql, pos.lineNumber), + pos + )?.keywords; + + expect(suggestion).toMatchUnorderedArray(['DATA']); + }); + + test('After SHOW', () => { + const pos: CaretPosition = { + lineNumber: 15, + column: 6, + }; + const suggestion = sql.getSuggestionAtCaretPosition( + commentOtherLine(tokenSql, pos.lineNumber), + pos + )?.keywords; + + expect(suggestion).toMatchUnorderedArray([ + 'LOCKS', + 'INDEXES', + 'TRANSACTIONS', + 'CREATE', + 'COMPACTIONS', + 'CURRENT', + 'ROLES', + 'PRINCIPALS', + 'ROLE', + 'GRANT', + 'CATALOGS', + 'FUNCTIONS', + 'ALL', + 'SYSTEM', + 'USER', + 'PARTITIONS', + 'VIEWS', + 'COLUMNS', + 'TBLPROPERTIES', + 'TABLE', + 'TABLES', + 'DATABASES', + 'NAMESPACES', + 'SCHEMAS', + ]); + }); + + test('After EXPORT', () => { + const pos: CaretPosition = { + lineNumber: 17, + column: 8, + }; + const suggestion = sql.getSuggestionAtCaretPosition( + commentOtherLine(tokenSql, pos.lineNumber), + pos + )?.keywords; + + expect(suggestion).toMatchUnorderedArray(['TABLE']); + }); +}); diff --git a/test/parser/sql/syntax/all.test.ts b/test/parser/sql/syntax/all.test.ts new file mode 100644 index 00000000..07a4fe21 --- /dev/null +++ b/test/parser/sql/syntax/all.test.ts @@ -0,0 +1,72 @@ +import { Sql } from 'src/parser/sql'; +import { readSQL } from 'test/helper'; + +const features = { + alter: readSQL(__dirname, 'alter.sql'), + create: readSQL(__dirname, 'create.sql'), + drop: readSQL(__dirname, 'drop.sql'), + insert: readSQL(__dirname, 'insert.sql'), + select: readSQL(__dirname, 'select.sql'), + show: readSQL(__dirname, 'show.sql'), + truncate: readSQL(__dirname, 'truncate.sql'), + update: readSQL(__dirname, 'update.sql'), + use: readSQL(__dirname, 'use.sql'), +}; + +describe('Generic sql Syntax Tests', () => { + const flink = new Sql(); + + features.alter.forEach((itemSql) => { + it(itemSql, () => { + expect(flink.validate(itemSql).length).toBe(0); + }); + }); + + features.create.forEach((itemSql) => { + it(itemSql, () => { + expect(flink.validate(itemSql).length).toBe(0); + }); + }); + + features.drop.forEach((itemSql) => { + it(itemSql, () => { + expect(flink.validate(itemSql).length).toBe(0); + }); + }); + + features.insert.forEach((itemSql) => { + it(itemSql, () => { + expect(flink.validate(itemSql).length).toBe(0); + }); + }); + + features.select.forEach((itemSql) => { + it(itemSql, () => { + expect(flink.validate(itemSql).length).toBe(0); + }); + }); + + features.show.forEach((itemSql) => { + it(itemSql, () => { + expect(flink.validate(itemSql).length).toBe(0); + }); + }); + + features.truncate.forEach((itemSql) => { + it(itemSql, () => { + expect(flink.validate(itemSql).length).toBe(0); + }); + }); + + features.update.forEach((itemSql) => { + it(itemSql, () => { + expect(flink.validate(itemSql).length).toBe(0); + }); + }); + + features.use.forEach((itemSql) => { + it(itemSql, () => { + expect(flink.validate(itemSql).length).toBe(0); + }); + }); +}); diff --git a/test/parser/sql/syntax/fixtures/alter.sql b/test/parser/sql/syntax/fixtures/alter.sql new file mode 100644 index 00000000..dd5c21c3 --- /dev/null +++ b/test/parser/sql/syntax/fixtures/alter.sql @@ -0,0 +1,73 @@ +-- Syntax ALTER PROPERTIES +-- ALTER { DATABASE | SCHEMA | NAMESPACE } database_name SET { DBPROPERTIES | PROPERTIES } ( property_name = property_value [ , ... ] ) + +ALTER DATABASE inventory SET DBPROPERTIES ('Edited-by' = 'John', 'Edit-date' = '01/01/2001'); +ALTER DATABASE inventory SET PROPERTIES ('Edited-by' = 'John', 'Edit-date' = '01/01/2001'); + + +-- Syntax ALTER LOCATION +-- ALTER { DATABASE | SCHEMA | NAMESPACE } database_name SET LOCATION 'new_location' + +ALTER DATABASE inventory SET LOCATION 'file:/temp/spark-warehouse/new_inventory.db'; +ALTER SCHEMA inventory SET LOCATION 'file:/temp/spark-warehouse/new_inventory.db'; +ALTER NAMESPACE inventory SET LOCATION 'file:/temp/spark-warehouse/new_inventory.db'; + + +-- ============================================ + + +-- Syntax RENAME +ALTER TABLE Student RENAME TO StudentInfo; +ALTER TABLE default.StudentInfo PARTITION (age='10') RENAME TO PARTITION (age='15'); +ALTER TABLE default.StudentInfo PARTITION (age=10) RENAME TO PARTITION (age=12.323); + +-- Syntax ADD COLUMNS +ALTER TABLE StudentInfo ADD COLUMNS (LastName string, DOB timestamp); + +-- Syntax DROP COLUMNS +ALTER TABLE StudentInfo DROP columns (LastName, DOB); + +-- Syntax RENAME COLUMN +ALTER TABLE StudentInfo RENAME COLUMN name TO FirstName; + +-- Syntax ALTER OR CHANGE COLUMN +ALTER TABLE StudentInfo ALTER COLUMN FirstName COMMENT "new comment"; +ALTER TABLE StudentInfo CHANGE COLUMN FirstName COMMENT "new comment"; +ALTER TABLE StudentInfo ALTER FirstName COMMENT "new comment"; +ALTER TABLE StudentInfo CHANGE FirstName COMMENT "new comment"; + +-- Syntax REPLACE COLUMNS +ALTER TABLE StudentInfo REPLACE COLUMNS (name string, ID int COMMENT 'new comment'); +ALTER TABLE StudentInfo REPLACE COLUMNS name string, ID int COMMENT 'new comment'; + +-- Syntax ADD PARTITION +ALTER TABLE StudentInfo ADD IF NOT EXISTS PARTITION (age=18); +ALTER TABLE StudentInfo ADD PARTITION (age=18); +-- Adding multiple partitions to the table +ALTER TABLE StudentInfo ADD IF NOT EXISTS PARTITION (age=18) PARTITION (age=20); +ALTER TABLE StudentInfo ADD PARTITION (age=18) PARTITION (age=20); + +-- Syntax DROP PARTITION +ALTER TABLE StudentInfo DROP IF EXISTS PARTITION (age=18); + +-- Syntax SET SERDE +ALTER TABLE test_tab SET SERDE 'org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe'; +ALTER TABLE dbx.tab1 SET SERDE 'org.apache.hadoop' WITH SERDEPROPERTIES ('k' = 'v', 'kay' = 'vee') + +-- Syntax SET LOCATION And SET FILE FORMAT +ALTER TABLE loc_orc SET FILEFORMAT orc; +ALTER TABLE p1 partition (month=2, day=2) SET FILEFORMAT parquet; +ALTER TABLE dbx.tab1 PARTITION (a='1', b='2') SET LOCATION '/path/to/part/ways' +ALTER TABLE dbx.tab1 SET LOCATION '/path/to/part/ways' + +-- Syntax RECOVER PARTITIONS +ALTER TABLE dbx.tab1 RECOVER PARTITIONS; + + +-- ============================================ + + +-- Syntax RENAME View +ALTER VIEW tempdb1.v1 RENAME TO tempdb1.v2; +-- Syntax ALTER View AS SELECT +ALTER VIEW tempdb1.v2 AS SELECT * FROM tempdb1.v1; diff --git a/test/parser/sql/syntax/fixtures/create.sql b/test/parser/sql/syntax/fixtures/create.sql new file mode 100644 index 00000000..ac631cd0 --- /dev/null +++ b/test/parser/sql/syntax/fixtures/create.sql @@ -0,0 +1,282 @@ +-- Syntax +-- CREATE { DATABASE | SCHEMA } [ IF NOT EXISTS ] database_name [ COMMENT database_comment ] [ LOCATION database_directory ] [ WITH DBPROPERTIES ( property_name = property_value [ , ... ] ) ] + +CREATE DATABASE IF NOT EXISTS customer_db; +CREATE DATABASE customer_db; + +CREATE SCHEMA IF NOT EXISTS customer_db; +CREATE SCHEMA customer_db; + +CREATE DATABASE IF NOT EXISTS customer_db COMMENT 'This is customer database' LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John'); +CREATE DATABASE IF NOT EXISTS customer_db LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John'); +CREATE DATABASE IF NOT EXISTS customer_db WITH DBPROPERTIES (ID=001, Name='John'); +CREATE DATABASE customer_db COMMENT 'This is customer database' LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John'); +CREATE DATABASE customer_db LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John'); +CREATE DATABASE customer_db WITH DBPROPERTIES (ID=001, Name='John'); + +CREATE SCHEMA IF NOT EXISTS customer_db COMMENT 'This is customer database' LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John'); +CREATE SCHEMA IF NOT EXISTS customer_db LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John'); +CREATE SCHEMA IF NOT EXISTS customer_db WITH DBPROPERTIES (ID=001, Name='John'); +CREATE SCHEMA customer_db COMMENT 'This is customer database' LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John'); +CREATE SCHEMA customer_db LOCATION '/user' WITH DBPROPERTIES (ID=001, Name='John'); +CREATE SCHEMA customer_db WITH DBPROPERTIES (ID=001, Name='John'); + + +-- ============================================ + + +-- Syntax +-- CREATE [ OR REPLACE ] [ TEMPORARY ] FUNCTION [ IF NOT EXISTS ] function_name AS class_name [ resource_locations ] + +CREATE OR REPLACE TEMPORARY FUNCTION IF NOT EXISTS simple_udf AS 'SimpleUdfR' USING JAR '/tmp/SimpleUdfR.jar'; +CREATE OR REPLACE TEMPORARY FUNCTION IF NOT EXISTS simple_udf AS 'SimpleUdfR'; + +CREATE OR REPLACE FUNCTION IF NOT EXISTS simple_udf AS 'SimpleUdfR'; +CREATE TEMPORARY FUNCTION IF NOT EXISTS simple_udf AS 'SimpleUdfR'; +CREATE FUNCTION IF NOT EXISTS simple_udf AS 'SimpleUdfR'; + +CREATE OR REPLACE FUNCTION simple_udf AS 'SimpleUdfR'; +CREATE TEMPORARY FUNCTION simple_udf AS 'SimpleUdfR'; +CREATE FUNCTION simple_udf AS 'SimpleUdfR'; + +CREATE FUNCTION simple_udf AS 'SimpleUdf' USING JAR '/tmp/SimpleUdf.jar'; + +CREATE TEMPORARY FUNCTION simple_temp_udf AS 'SimpleUdf' USING JAR '/tmp/SimpleUdf.jar'; + + +-- ============================================ + + +-- Syntax +-- CREATE TABLE [ IF NOT EXISTS ] table_identifier +-- [ ( col_name1 col_type1 [ COMMENT col_comment1 ], ... ) ] +-- USING data_source +-- [ OPTIONS ( key1=val1, key2=val2, ... ) ] +-- [ PARTITIONED BY ( col_name1, col_name2, ... ) ] +-- [ CLUSTERED BY ( col_name3, col_name4, ... ) +-- [ SORTED BY ( col_name [ ASC | DESC ], ... ) ] +-- INTO num_buckets BUCKETS ] +-- [ LOCATION path ] +-- [ COMMENT table_comment ] +-- [ TBLPROPERTIES ( key1=val1, key2=val2, ... ) ] +-- [ AS select_statement ] + + +--Use data source +CREATE TABLE student (id INT, name STRING, age INT) USING CSV; +CREATE TABLE IF NOT EXISTS student (id INT, name STRING, age INT) USING CSV; + +--Use data from another table +CREATE TABLE student_copy USING CSV AS SELECT * FROM student; + +--Omit the USING clause, which uses the default data source (parquet by default) +CREATE TABLE student (id INT, name STRING, age INT); + +--Use parquet data source with parquet storage options +--The columns 'id' and 'name' enable the bloom filter during writing parquet file, +--column 'age' does not enable +CREATE TABLE student_parquet(id INT, name STRING, age INT) USING PARQUET + OPTIONS ( + 'parquet.bloom.filter.enabled'='true', + 'parquet.bloom.filter.enabled#age'='false' + ); + +--Specify table comment and properties +CREATE TABLE student (id INT, name STRING, age INT) USING CSV + LOCATION 'file:/temp/spark-warehouse/new_inventory.db' + COMMENT 'this is a comment' + TBLPROPERTIES ('foo'='bar'); + +--Specify table comment and properties with different clauses order +CREATE TABLE student (id INT, name STRING, age INT) USING CSV + TBLPROPERTIES ('foo'='bar') + COMMENT 'this is a comment'; + +--Create partitioned and bucketed table +CREATE TABLE student (id INT, name STRING, age INT) + USING CSV + PARTITIONED BY (age) + CLUSTERED BY (Id) + SORTED BY (Id ASC) INTO 4 buckets; + +--Create partitioned and bucketed table through CTAS +CREATE TABLE student_partition_bucket + USING parquet + PARTITIONED BY (age) + CLUSTERED BY (id) INTO 4 buckets + AS SELECT * FROM student; + +--Create bucketed table through CTAS and CTE +CREATE TABLE student_bucket + USING parquet + CLUSTERED BY (id) INTO 4 buckets ( + WITH tmpTable AS ( + SELECT * FROM student WHERE id > 100 + ) + SELECT * FROM tmpTable +); + +-- dtstack SparkSQL/HiveSQL lifecycle +CREATE TABLE IF NOT EXISTS t1 ( + id INT COMMENT '索引', + name STRING COMMENT '姓名', + age SMALLINT COMMENT '年龄' +) COMMENT "t1表" lifecycle 29; + + +-- ============================================ + + +-- Syntax +-- CREATE [ EXTERNAL ] TABLE [ IF NOT EXISTS ] table_identifier +-- [ ( col_name1[:] col_type1 [ COMMENT col_comment1 ], ... ) ] +-- [ COMMENT table_comment ] +-- [ PARTITIONED BY ( col_name2[:] col_type2 [ COMMENT col_comment2 ], ... ) +-- | ( col_name1, col_name2, ... ) ] +-- [ CLUSTERED BY ( col_name1, col_name2, ...) +-- [ SORTED BY ( col_name1 [ ASC | DESC ], col_name2 [ ASC | DESC ], ... ) ] +-- INTO num_buckets BUCKETS ] +-- [ ROW FORMAT row_format ] +-- [ STORED AS file_format ] +-- [ LOCATION path ] +-- [ TBLPROPERTIES ( key1=val1, key2=val2, ... ) ] +-- [ AS select_statement ] + +--Use hive format +CREATE TABLE student (id INT, name STRING, age INT) STORED AS ORC; + +--Use data from another table +CREATE TABLE student_copy STORED AS ORC + AS SELECT * FROM student; + +--Specify table comment and properties +CREATE TABLE student (id INT, name STRING, age INT) + COMMENT 'this is a comment' + STORED AS ORC + TBLPROPERTIES ('foo'='bar'); + +--Specify table comment and properties with different clauses order +CREATE TABLE student (id INT, name STRING, age INT) + STORED AS ORC + TBLPROPERTIES ('foo'='bar') + COMMENT 'this is a comment'; + +--Create partitioned table +CREATE TABLE student (id INT, name STRING) + PARTITIONED BY (age INT) + STORED AS ORC; + +--Create partitioned table with different clauses order +CREATE TABLE student (id INT, name STRING) + STORED AS ORC + PARTITIONED BY (age INT); + +--Use Row Format and file format +CREATE TABLE IF NOT EXISTS student (id INT, name STRING) + ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' + STORED AS TEXTFILE; + +--Use complex datatype +CREATE EXTERNAL TABLE family( + name STRING, + friends ARRAY, + children MAP, + address STRUCT + ) + ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' ESCAPED BY '\\' + COLLECTION ITEMS TERMINATED BY '_' + MAP KEYS TERMINATED BY ':' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'foonull' + STORED AS TEXTFILE + LOCATION '/tmp/family/'; + +--Use predefined custom SerDe +CREATE TABLE avroExample + ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe' + STORED AS INPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat' + OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat' + TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive", + "name": "first_schema", + "type": "record", + "fields": [ + { "name":"string1", "type":"string" }, + { "name":"string2", "type":"string" } + ] }'); + +--Use personalized custom SerDe(we may need to `ADD JAR xxx.jar` first to ensure we can find the serde_class, +--or you may run into `CLASSNOTFOUND` exception) +ADD JAR /tmp/hive_serde_example.jar; + +CREATE EXTERNAL TABLE family (id INT, name STRING) + ROW FORMAT SERDE 'com.ly.spark.serde.SerDeExample' + STORED AS INPUTFORMAT 'com.ly.spark.example.serde.io.SerDeExampleInputFormat' + OUTPUTFORMAT 'com.ly.spark.example.serde.io.SerDeExampleOutputFormat' + LOCATION '/tmp/family/'; + +--Use `CLUSTERED BY` clause to create bucket table without `SORTED BY` +CREATE TABLE clustered_by_test1 (ID INT, AGE STRING) + CLUSTERED BY (ID) + INTO 4 BUCKETS + STORED AS ORC + +--Use `CLUSTERED BY` clause to create bucket table with `SORTED BY` +CREATE TABLE clustered_by_test2 (ID INT, NAME STRING) + PARTITIONED BY (YEAR STRING) + CLUSTERED BY (ID, NAME) + SORTED BY (ID ASC) + INTO 3 BUCKETS + STORED AS PARQUET + + +-- ============================================ + + +-- Syntax +-- CREATE TABLE [IF NOT EXISTS] table_identifier LIKE source_table_identifier +-- USING data_source +-- [ ROW FORMAT row_format ] +-- [ STORED AS file_format ] +-- [ TBLPROPERTIES ( key1=val1, key2=val2, ... ) ] +-- [ LOCATION path ] + +-- Create table using an existing table +CREATE TABLE Student_Duple like Student; +CREATE TABLE IF NOT EXISTS Student_Duple like Student; + +-- Create table like using a data source +CREATE TABLE Student_Duple like Student USING CSV; + +-- Table is created as external table at the location specified +CREATE TABLE Student_Duple like Student location '/root1/home'; + +-- Create table like using a rowformat +CREATE TABLE Student_Duple like Student + ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' + STORED AS TEXTFILE + TBLPROPERTIES ('owner'='xxxx'); + + +-- ============================================ + + +-- dtstack SparkSQL/HiveSQL lifecycle +CREATE TABLE Student_Duple like Student + ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' + STORED AS TEXTFILE + TBLPROPERTIES ('owner'='xxxx') lifecycle 29; + + +-- ============================================ + + +-- Syntax +-- CREATE [ OR REPLACE ] [ [ GLOBAL ] TEMPORARY ] VIEW [ IF NOT EXISTS ] view_identifier create_view_clauses AS query + +CREATE OR REPLACE VIEW experienced_employee (ID COMMENT 'Unique identification number', Name) COMMENT 'View for experienced employees' AS SELECT id, name FROM all_employee WHERE working_years > 5; +CREATE VIEW experienced_employee (ID COMMENT 'Unique identification number', Name) COMMENT 'View for experienced employees' AS SELECT id, name FROM all_employee WHERE working_years > 5; + +CREATE OR REPLACE GLOBAL TEMPORARY VIEW IF NOT EXISTS subscribed_movies AS SELECT mo.member_id, mb.full_name, mo.movie_title FROM movies AS mo INNER JOIN members AS mb ON mo.member_id = mb.id; +CREATE OR REPLACE GLOBAL TEMPORARY VIEW subscribed_movies AS SELECT mo.member_id, mb.full_name, mo.movie_title FROM movies AS mo INNER JOIN members AS mb ON mo.member_id = mb.id; +CREATE GLOBAL TEMPORARY VIEW IF NOT EXISTS subscribed_movies AS SELECT mo.member_id, mb.full_name, mo.movie_title FROM movies AS mo INNER JOIN members AS mb ON mo.member_id = mb.id; +CREATE GLOBAL TEMPORARY VIEW subscribed_movies AS SELECT mo.member_id, mb.full_name, mo.movie_title FROM movies AS mo INNER JOIN members AS mb ON mo.member_id = mb.id; diff --git a/test/parser/sql/syntax/fixtures/drop.sql b/test/parser/sql/syntax/fixtures/drop.sql new file mode 100644 index 00000000..d17bea74 --- /dev/null +++ b/test/parser/sql/syntax/fixtures/drop.sql @@ -0,0 +1,49 @@ +-- Syntax +-- DROP { DATABASE | SCHEMA } [ IF EXISTS ] dbname [ RESTRICT | CASCADE ] + +CREATE DATABASE inventory_db COMMENT 'This database is used to maintain Inventory'; +CREATE SCHEMA inventory_db COMMENT 'This database is used to maintain Inventory'; + +DROP DATABASE inventory_db CASCADE; +DROP SCHEMA inventory_db CASCADE; + +DROP DATABASE IF EXISTS inventory_db CASCADE; +DROP SCHEMA IF EXISTS inventory_db CASCADE; +DROP DATABASE inventory_db RESTRICT; +DROP SCHEMA inventory_db RESTRICT; + + +-- ============================================ + + +-- Syntax +-- DROP [ TEMPORARY ] FUNCTION [ IF EXISTS ] function_name + +DROP FUNCTION test_avg; +DROP TEMPORARY FUNCTION test_avg; +DROP TEMPORARY FUNCTION IF EXISTS test_avg; +DROP TEMPORARY FUNCTION test_avg; + + +-- ============================================ + + +-- Syntax +-- DROP TABLE [ IF EXISTS ] table_identifier [ PURGE ] + +DROP TABLE userDB.employable; +DROP TABLE IF EXISTS employable; +DROP TABLE employable; +DROP TABLE IF EXISTS employable PURGE; +DROP TABLE employable PURGE; + + +-- ============================================ + + +-- Syntax +-- DROP VIEW [ IF EXISTS ] view_identifier + +DROP VIEW employeeView; +DROP VIEW userDB.employeeView; +DROP VIEW IF EXISTS employeeView; diff --git a/test/parser/sql/syntax/fixtures/insert.sql b/test/parser/sql/syntax/fixtures/insert.sql new file mode 100644 index 00000000..30cb3140 --- /dev/null +++ b/test/parser/sql/syntax/fixtures/insert.sql @@ -0,0 +1,80 @@ +-- Syntax +-- INSERT INTO [ TABLE ] table_identifier REPLACE WHERE boolean_expression query + +-- Single Row Insert Using a VALUES Clause +INSERT INTO students VALUES ('Amy Smith', '123 Park Ave, San Jose', 111, 12.34, '-=--@#!$%%'); + +-- Multi-Row Insert Using a VALUES Clause +INSERT INTO students VALUES ('Bob Brown', '456 Taylor St, Cupelation', 222222), ('Cathy Johnson', '789 Race Ave, Pale Alto', 333333); + +-- Insert Using a SELECT Statement +INSERT INTO students PARTITION (student_id = 444444) SELECT name, address FROM persons WHERE name = "Dora Williams"; + +-- Insert Using a REPLACE WHERE Statement +INSERT INTO persons REPLACE WHERE ssn = 123456789 SELECT * FROM persons2; + +-- Insert Using a TABLE Statement +INSERT INTO students TABLE visiting_students; + +-- Insert Using a FROM Statement +INSERT INTO students FROM applicants SELECT name, address, student_id WHERE qualified = true; + +-- Insert Using a Typed Date Literal for a Partition Column Value +INSERT INTO students PARTITION (birthday = date'2019-01-02') VALUES ('Amy Smith', '123 Park Ave, San Jose'); + +-- Insert with a column list +INSERT INTO students (address, name, student_id) VALUES ('Hangzhou, China', 'Kent Yao', 11215016); + +-- Insert with both a partition spec and a column list +INSERT INTO students PARTITION (student_id = 11215017) (address, name) VALUES ('Hangzhou, China', 'Kent Yao Jr.'); + + +-- ============================================ + + +-- Syntax +-- INSERT OVERWRITE [ LOCAL ] DIRECTORY [ directory_path ] +-- { spark_format | hive_format } +-- { VALUES ( { value | NULL } [ , ... ] ) [ , ( ... ) ] | query } +-- USING file_format [ OPTIONS ( key = val [ , ... ] ) ] +-- [ ROW FORMAT row_format ] [ STORED AS hive_serde ] + +INSERT OVERWRITE DIRECTORY '/path/to/output/directory' SELECT * FROM your_table WHERE condition; +INSERT OVERWRITE DIRECTORY '/tmp/destination' + USING parquet + OPTIONS (col1 1, col2 2, col3 'test') + SELECT * FROM test_table; +INSERT OVERWRITE DIRECTORY + USING parquet + OPTIONS ('path' '/tmp/destination', col1 1, col2 2, col3 'test') + SELECT * FROM test_table; +INSERT OVERWRITE LOCAL DIRECTORY '/tmp/destination' + USING parquet + OPTIONS (col1 1, col2 2, col3 'test') + SELECT * FROM test_table; +INSERT OVERWRITE LOCAL DIRECTORY + USING parquet + OPTIONS ('path' '/tmp/destination', col1 1, col2 2, col3 'test') + SELECT * FROM test_table; + + +-- ============================================ + + +-- Syntax +-- INSERT [ INTO | OVERWRITE ] [ TABLE ] table_identifier [ partition_spec ] [ ( column_list ) ] { VALUES ( { value | NULL } [ , ... ] ) [ , ( ... ) ] | query } + +-- Insert Using a VALUES Clause +INSERT OVERWRITE students VALUES ('Ashur Hill', '456 Erica Ct, Cupelation', 111111), ('Brian Reed', '723 Kern Ave, Pale Alto', 222222); +-- Insert Using a SELECT Statement +INSERT OVERWRITE students PARTITION (student_id = 222222) SELECT name, address FROM persons WHERE name = "Dora Williams"; +-- Insert Using a TABLE Statement +INSERT OVERWRITE students TABLE visiting_students; +-- Insert Using a FROM Statement +INSERT OVERWRITE students FROM applicants SELECT name, address, student_id WHERE qualified = true; +-- Insert Using a Typed Date Literal for a Partition Column Value +INSERT OVERWRITE students PARTITION (birthday = date'2019-01-02') VALUES('Jason Wang', '908 Bird St, Saratoga'); +-- Insert with a column list +INSERT OVERWRITE students (address, name, student_id) VALUES ('Hangzhou, China', 'Kent Yao', 11215016); +-- Insert with both a partition spec and a column list +INSERT OVERWRITE students PARTITION (student_id = 11215016) (address, name) VALUES ('Hangzhou, China', 'Kent Yao Jr.'); diff --git a/test/parser/sql/syntax/fixtures/select.sql b/test/parser/sql/syntax/fixtures/select.sql new file mode 100644 index 00000000..cd4f9d92 --- /dev/null +++ b/test/parser/sql/syntax/fixtures/select.sql @@ -0,0 +1,573 @@ +SELECT * FROM table_name WHERE NOT (age > 30); +SELECT * FROM table_name WHERE ! (age > 30); +SELECT * FROM table_name WHERE name RLIKE 'M+'; +SELECT * FROM table_name WHERE name REGEXP 'M+'; + + +-- ============================================ + + +-- aggregate_function(input1 [, input2, ...]) FILTER (WHERE boolean_expression) +-- { PERCENTILE_CONT | PERCENTILE_DISC }(percentile) WITHIN GROUP (ORDER BY { order_by_expression [ ASC | DESC ] [ NULLS { FIRST | LAST } ] [ , ... ] }) FILTER (WHERE boolean_expression) + +CREATE OR REPLACE TEMPORARY VIEW basic_pays AS SELECT * FROM VALUES +('Diane Murphy','Accounting',8435), +('Mary Patterson','Accounting',9998), +('Barry Jones','SCM',10586) +AS basic_pays(employee_name, department, salary); + +SELECT * FROM basic_pays; +SELECT + department, + percentile_cont(0.25) WITHIN GROUP (ORDER BY salary) AS pc1, + percentile_cont(0.25) WITHIN GROUP (ORDER BY salary) FILTER (WHERE employee_name LIKE '%Bo%') AS pc2, + percentile_cont(0.25) WITHIN GROUP (ORDER BY salary DESC) AS pc3, + percentile_cont(0.25) WITHIN GROUP (ORDER BY salary DESC) FILTER (WHERE employee_name LIKE '%Bo%') AS pc4, + percentile_disc(0.25) WITHIN GROUP (ORDER BY salary) AS pd1, + percentile_disc(0.25) WITHIN GROUP (ORDER BY salary) FILTER (WHERE employee_name LIKE '%Bo%') AS pd2, + percentile_disc(0.25) WITHIN GROUP (ORDER BY salary DESC) AS pd3, + percentile_disc(0.25) WITHIN GROUP (ORDER BY salary DESC) FILTER (WHERE employee_name LIKE '%Bo%') AS pd4 +FROM basic_pays +GROUP BY department +ORDER BY department; + + +-- ============================================ + + +-- CASE [ expression ] { WHEN boolean_expression THEN then_expression } [ ... ] +-- [ ELSE else_expression ] +-- END +SELECT id, CASE WHEN id > 200 THEN 'bigger' ELSE 'small' END FROM person; +SELECT id, CASE id WHEN 100 then 'bigger' WHEN id > 300 THEN '300' ELSE 'small' END FROM person; +SELECT * FROM person WHERE CASE 1 = 1 WHEN 100 THEN 'big' WHEN 200 THEN 'bigger' WHEN 300 THEN 'biggest' ELSE 'small' END = 'small'; + + +-- ============================================ + + +-- WITH common_table_expression [ , ... ] +-- expression_name [ ( column_name [ , ... ] ) ] [ AS ] ( query ) + +-- CTE with multiple column aliases +WITH t(x, y) AS (SELECT 1, 2) +SELECT * FROM t WHERE x = 1 AND y = 2; +-- CTE in CTE definition +WITH t AS (WITH t2 AS (SELECT 1) SELECT * FROM t2) SELECT * FROM t; +-- CTE in subquery expression +SELECT (WITH t AS (SELECT 1) SELECT * FROM t); +-- CTE in CREATE VIEW statement +CREATE VIEW v AS WITH t(a, b, c, d) AS (SELECT 1, 2, 3, 4) SELECT * FROM t; +SELECT * FROM v; + +-- If name conflict is detected in nested CTE, then AnalysisException is thrown by default. +-- SET spark.sql.legacy.ctePrecedencePolicy = CORRECTED (which is recommended), +-- inner CTE definitions take precedence over outer definitions. +SET spark.sql.legacy.ctePrecedencePolicy = CORRECTED; +WITH + t AS (SELECT 1), + t2 AS ( + WITH t AS (SELECT 2) + SELECT * FROM t + ) +SELECT * FROM t2; + + +-- ============================================ + + +-- CLUSTER BY { expression [ , ... ] } +SELECT name, age FROM person; +SELECT age, name FROM person CLUSTER BY age; + + +-- ============================================ + + +-- DISTRIBUTE BY { expression [ , ... ] } +SELECT age, name FROM person DISTRIBUTE BY age; + + +-- ============================================ + + +-- EXPLAIN [ EXTENDED | CODEGEN | COST | FORMATTED ] statement + +-- Default Output +EXPLAIN select k, sum(v) from values (1, 2), (1, 3) t(k, v) group by k; +-- Using Extended +EXPLAIN EXTENDED select k, sum(v) from values (1, 2), (1, 3) t(k, v) group by k; + +-- https://github.com/DTStack/dt-sql-parser/issues/131 +SELECT count(1) FROM tableName; +-- https://github.com/DTStack/dt-sql-parser/issues/131 +DELETE FROM tableName WHERE (SELECT count(1) FROM tableName WHERE pt = '20230601') > 0 AND pt = '20230601'; + + +-- ============================================ + + +-- file_format.`file_path` +SELECT * FROM parquet.`examples/src/main/resources/users.parquet`; +SELECT * FROM orc.`examples/src/main/resources/users.orc`; +SELECT * FROM json.`examples/src/main/resources/people.json`; + + +-- ============================================ + + +-- GROUP BY group_expression [ , group_expression [ , ... ] ] [ WITH { ROLLUP | CUBE } ] +-- GROUP BY { group_expression | { ROLLUP | CUBE | GROUPING SETS } (grouping_set [ , ...]) } [ , ... ] +-- aggregate_name ( [ DISTINCT ] expression [ , ... ] ) [ FILTER ( WHERE boolean_expression ) ] + +-- Sum of quantity per dealership. Group by `id`. +SELECT id, sum(quantity) FROM dealer GROUP BY id ORDER BY id; +-- Use column position in GROUP by clause. +SELECT id, sum(quantity) FROM dealer GROUP BY 1 ORDER BY 1; +-- Multiple aggregations. +-- 1. Sum of quantity per dealership. +-- 2. Max quantity per dealership. +SELECT id, sum(quantity) AS sum, max(quantity) AS max FROM dealer GROUP BY id ORDER BY id; +-- Count the number of distinct dealer cities per car_model. +SELECT car_model, count(DISTINCT city) AS count FROM dealer GROUP BY car_model; +-- Sum of only 'Honda Civic' and 'Honda CRV' quantities per dealership. +SELECT id, sum(quantity) FILTER ( + WHERE car_model IN ('Honda Civic', 'Honda CRV') + ) AS `sum(quantity)` FROM dealer + GROUP BY id ORDER BY id; + +-- Aggregations using multiple sets of grouping columns in a single statement. +-- Following performs aggregations based on four sets of grouping columns. +-- 1. city, car_model +-- 2. city +-- 3. car_model +-- 4. Empty grouping set. Returns quantities for all city and car models. +SELECT city, car_model, sum(quantity) AS sum FROM dealer + GROUP BY GROUPING SETS ((city, car_model), (city), (car_model), ()) + ORDER BY city; + +-- Group by processing with `ROLLUP` clause. +-- Equivalent GROUP BY GROUPING SETS ((city, car_model), (city), ()) +SELECT city, car_model, sum(quantity) AS sum FROM dealer + GROUP BY city, car_model WITH ROLLUP + ORDER BY city, car_model; + +-- Group by processing with `CUBE` clause. +-- Equivalent GROUP BY GROUPING SETS ((city, car_model), (city), (car_model), ()) +SELECT city, car_model, sum(quantity) AS sum FROM dealer + GROUP BY city, car_model WITH CUBE + ORDER BY city, car_model; + +--Select the first row in column age +SELECT FIRST(age) FROM person; + +--Get the first row in column `age` ignore nulls,last row in column `id` and sum of column `id`. +SELECT FIRST(age IGNORE NULLS), LAST(id), SUM(id) FROM person; + + +-- ============================================ + + +/*+ hint [ , ... ] */ +SELECT /*+ COALESCE(3) */ * FROM t; +SELECT /*+ REPARTITION(3) */ * FROM t; +SELECT /*+ REPARTITION(c) */ * FROM t; +SELECT /*+ REPARTITION(3, c) */ * FROM t; +SELECT /*+ REPARTITION_BY_RANGE(c) */ * FROM t; +SELECT /*+ REPARTITION_BY_RANGE(3, c) */ * FROM t; +SELECT /*+ REBALANCE */ * FROM t; +SELECT /*+ REBALANCE(3) */ * FROM t; +SELECT /*+ REBALANCE(c) */ * FROM t; +SELECT /*+ REBALANCE(3, c) */ * FROM t; + +-- multiple partitioning hints +EXPLAIN EXTENDED SELECT /*+ REPARTITION(100), COALESCE(500), REPARTITION_BY_RANGE(3, c) */ * FROM t; + +-- Join Hints for broadcast join +SELECT /*+ BROADCAST(t1) */ * FROM t1 INNER JOIN t2 ON t1.key = t2.key; +SELECT /*+ BROADCASTJOIN (t1) */ * FROM t1 left JOIN t2 ON t1.key = t2.key; +SELECT /*+ MAPJOIN(t2) */ * FROM t1 right JOIN t2 ON t1.key = t2.key; + +-- Join Hints for shuffle sort merge join +SELECT /*+ SHUFFLE_MERGE(t1) */ * FROM t1 INNER JOIN t2 ON t1.key = t2.key; +SELECT /*+ MERGEJOIN(t2) */ * FROM t1 INNER JOIN t2 ON t1.key = t2.key; +SELECT /*+ MERGE(t1) */ * FROM t1 INNER JOIN t2 ON t1.key = t2.key; + +-- Join Hints for shuffle hash join +SELECT /*+ SHUFFLE_HASH(t1) */ * FROM t1 INNER JOIN t2 ON t1.key = t2.key; + +-- Join Hints for shuffle-and-replicate nested loop join +SELECT /*+ SHUFFLE_REPLICATE_NL(t1) */ * FROM t1 INNER JOIN t2 ON t1.key = t2.key; + +SELECT /*+ BROADCAST(t1), MERGE(t1, t2) */ * FROM t1 INNER JOIN t2 ON t1.key = t2.key; + + +-- ============================================ + + +-- HAVING boolean_expression + +-- `HAVING` clause referring to column in `GROUP BY`. +SELECT city, sum(quantity) AS sum FROM dealer GROUP BY city HAVING city = 'Fremont'; +-- `HAVING` clause referring to aggregate function. +SELECT city, sum(quantity) AS sum FROM dealer GROUP BY city HAVING sum(quantity) > 15; +-- `HAVING` clause referring to aggregate function by its alias. +SELECT city, sum(quantity) AS sum FROM dealer GROUP BY city HAVING sum > 15; +-- `HAVING` clause referring to a different aggregate function than what is present in +-- `SELECT` list. +SELECT city, sum(quantity) AS sum FROM dealer GROUP BY city HAVING max(quantity) > 15; +-- `HAVING` clause referring to constant expression. +SELECT city, sum(quantity) AS sum FROM dealer GROUP BY city HAVING 1 > 0 ORDER BY city; +-- `HAVING` clause without a `GROUP BY` clause. +SELECT sum(quantity) AS sum FROM dealer HAVING sum(quantity) > 10; + + +-- ============================================ + + +-- VALUES ( expression [ , ... ] ) [ table_alias ] + +-- single row, without a table alias +SELECT * FROM VALUES ("one", 1); +-- three rows with a table alias +SELECT * FROM VALUES ("one", 1), ("two", 2), ("three", null) AS data(a, b); +-- complex types with a table alias +SELECT * FROM VALUES ("one", array(0, 1)), ("two", array(2, 3)) AS data(a, b); + + +-- ============================================ + + +-- relation { [ join_type ] JOIN [ LATERAL ] relation [ join_criteria ] | NATURAL join_type JOIN [ LATERAL ] relation } + +-- Use employee and department tables to demonstrate different type of joins. +SELECT * FROM employee; +SELECT * FROM department; +-- Use employee and department tables to demonstrate inner join. +SELECT id, name, employee.deptno, deptname FROM employee INNER JOIN department ON employee.deptno = department.deptno; +-- Use employee and department tables to demonstrate left join. +SELECT id, name, employee.deptno, deptname FROM employee LEFT JOIN department ON employee.deptno = department.deptno; +-- Use employee and department tables to demonstrate right join. +SELECT id, name, employee.deptno, deptname FROM employee RIGHT JOIN department ON employee.deptno = department.deptno; +-- Use employee and department tables to demonstrate full join. +SELECT id, name, employee.deptno, deptname FROM employee FULL JOIN department ON employee.deptno = department.deptno; +-- Use employee and department tables to demonstrate cross join. +SELECT id, name, employee.deptno, deptname FROM employee CROSS JOIN department; +-- Use employee and department tables to demonstrate semi join. +SELECT * FROM employee SEMI JOIN department ON employee.deptno = department.deptno; +-- Use employee and department tables to demonstrate anti join. +SELECT * FROM employee ANTI JOIN department ON employee.deptno = department.deptno; + + +-- ============================================ + + +-- [ LATERAL ] primary_relation [ join_relation ] +SELECT * FROM t1, + LATERAL (SELECT * FROM t2 WHERE t1.c1 = t2.c1); +SELECT a, b, c FROM t1, + LATERAL (SELECT c1 + c2 AS a), + LATERAL (SELECT c1 - c2 AS b), + LATERAL (SELECT a * b AS c); + + +-- ============================================ + + +-- LATERAL VIEW [ OUTER ] generator_function ( expression [ , ... ] ) [ table_alias ] AS column_alias [ , ... ] + +SELECT * FROM person + LATERAL VIEW EXPLODE(ARRAY(30, 60)) tableName AS c_age + LATERAL VIEW EXPLODE(ARRAY(40, 80)) AS d_age; +SELECT c_age, COUNT(1) FROM person + LATERAL VIEW EXPLODE(ARRAY(30, 60)) AS c_age + LATERAL VIEW EXPLODE(ARRAY(40, 80)) AS d_age +GROUP BY c_age; +SELECT * FROM person LATERAL VIEW EXPLODE(ARRAY()) tableName AS c_age; +SELECT * FROM person LATERAL VIEW OUTER EXPLODE(ARRAY()) tableName AS c_age; + + +-- ============================================ + + +-- [ NOT ] { LIKE search_pattern [ ESCAPE esc_char ] | [ RLIKE | REGEXP ] regex_pattern } +-- [ NOT ] { LIKE quantifiers ( search_pattern [ , ... ]) } +SELECT * FROM person WHERE name LIKE 'M%'; +SELECT * FROM person WHERE name LIKE 'M_ry'; +SELECT * FROM person WHERE name NOT LIKE 'M_ry'; +SELECT * FROM person WHERE name RLIKE 'M+'; +SELECT * FROM person WHERE name REGEXP 'M+'; +SELECT * FROM person WHERE name LIKE '%\_%'; +SELECT * FROM person WHERE name LIKE '%$_%' ESCAPE '$'; +SELECT * FROM person WHERE name LIKE ALL ('%an%', '%an'); +SELECT * FROM person WHERE name LIKE ANY ('%an%', '%an'); +SELECT * FROM person WHERE name LIKE SOME ('%an%', '%an'); +SELECT * FROM person WHERE name NOT LIKE ALL ('%an%', '%an'); +SELECT * FROM person WHERE name NOT LIKE ANY ('%an%', '%an'); +SELECT * FROM person WHERE name NOT LIKE SOME ('%an%', '%an'); + + +-- ============================================ + + +-- LIMIT { ALL | integer_expression } +-- Select the first two rows. +SELECT name, age FROM person ORDER BY name LIMIT 2; +-- Specifying ALL option on LIMIT returns all the rows. +SELECT name, age FROM person ORDER BY name LIMIT ALL; +-- A function expression as an input to LIMIT. +SELECT name, age FROM person ORDER BY name LIMIT length('SPARK'); +-- A non-foldable expression as an input to LIMIT is not allowed. +SELECT name, age FROM person ORDER BY name LIMIT length(name); + + +-- ============================================ + + +-- OFFSET integer_expression + +-- Skip the first two rows. +SELECT name, age FROM person ORDER BY name OFFSET 2; +-- Skip the first two rows and returns the next three rows. +SELECT name, age FROM person ORDER BY name LIMIT 3 OFFSET 2; +-- A function expression as an input to OFFSET. +SELECT name, age FROM person ORDER BY name OFFSET length('SPARK'); +-- A non-foldable expression as an input to OFFSET is not allowed. +SELECT name, age FROM person ORDER BY name OFFSET length(name); + + +-- ============================================ + + +-- ORDER BY { expression [ sort_direction | nulls_sort_order ] [ , ... ] } +-- Sort rows by age. By default rows are sorted in ascending manner with NULL FIRST. +SELECT name, age FROM person ORDER BY age; +-- Sort rows in ascending manner keeping null values to be last. +SELECT name, age FROM person ORDER BY age NULLS LAST; +-- Sort rows by age in descending manner, which defaults to NULL LAST. +SELECT name, age FROM person ORDER BY age DESC; +-- Sort rows in ascending manner keeping null values to be first. +SELECT name, age FROM person ORDER BY age DESC NULLS FIRST; +-- Sort rows based on more than one column with each column having different +-- sort direction. +SELECT * FROM person ORDER BY name ASC, age DESC; + + +-- ============================================ + + +-- PIVOT ( { aggregate_expression [ AS aggregate_expression_alias ] } [ , ... ] +-- FOR column_list IN ( expression_list ) ) +SELECT * FROM person PIVOT (SUM(age) AS a, AVG(class) AS c FOR name IN ('John' AS john, 'Mike' AS mike)); +SELECT * FROM person PIVOT (SUM(age) AS a, AVG(class) AS c FOR (name, age) IN (('John', 30) AS c1, ('Mike', 40) AS c2)); + + +-- ============================================ + + +-- [ ( ] relation [ ) ] EXCEPT | MINUS [ ALL | DISTINCT ] [ ( ] relation [ ) ] +-- Use number1 and number2 tables to demonstrate set operators in this page. +SELECT * FROM number1; +SELECT * FROM number2; +SELECT c FROM number1 EXCEPT SELECT c FROM number2; +SELECT c FROM number1 MINUS SELECT c FROM number2; +SELECT c FROM number1 EXCEPT ALL (SELECT c FROM number2); +SELECT c FROM number1 MINUS ALL (SELECT c FROM number2); + +-- [ ( ] relation [ ) ] INTERSECT [ ALL | DISTINCT ] [ ( ] relation [ ) ] +(SELECT c FROM number1) INTERSECT (SELECT c FROM number2); +(SELECT c FROM number1) INTERSECT DISTINCT (SELECT c FROM number2); +(SELECT c FROM number1) INTERSECT ALL (SELECT c FROM number2); + +-- [ ( ] relation [ ) ] UNION [ ALL | DISTINCT ] [ ( ] relation [ ) ] +(SELECT c FROM number1) UNION (SELECT c FROM number2); +(SELECT c FROM number1) UNION DISTINCT (SELECT c FROM number2); +SELECT c FROM number1 UNION ALL (SELECT c FROM number2); + + +-- ============================================ + + +-- SORT BY { expression [ sort_direction | nulls_sort_order ] [ , ... ] } + +-- Sort rows by `name` within each partition in ascending manner +SELECT /*+ REPARTITION(zip_code) */ name, age, zip_code FROM person SORT BY name; +-- Sort rows within each partition using column position. +SELECT /*+ REPARTITION(zip_code) */ name, age, zip_code FROM person SORT BY 1; +-- Sort rows within partition in ascending manner keeping null values to be last. +SELECT /*+ REPARTITION(zip_code) */ age, name, zip_code FROM person SORT BY age NULLS LAST; +-- Sort rows by age within each partition in descending manner, which defaults to NULL LAST. +SELECT /*+ REPARTITION(zip_code) */ age, name, zip_code FROM person SORT BY age DESC; +-- Sort rows by age within each partition in descending manner keeping null values to be first. +SELECT /*+ REPARTITION(zip_code) */ age, name, zip_code FROM person SORT BY age DESC NULLS FIRST; +-- Sort rows within each partition based on more than one column with each column having +-- different sort direction. +SELECT /*+ REPARTITION(zip_code) */ name, age, zip_code FROM person SORT BY name ASC, age DESC; + + +-- ============================================ + + +-- TABLESAMPLE ({ integer_expression | decimal_expression } PERCENT) +-- | TABLESAMPLE ( integer_expression ROWS ) +-- | TABLESAMPLE ( BUCKET integer_expression OUT OF integer_expression ) + +SELECT * FROM test TABLESAMPLE (50 PERCENT); +SELECT * FROM test TABLESAMPLE (5 ROWS); +SELECT * FROM test TABLESAMPLE (BUCKET 4 OUT OF 10); + + +-- ============================================ + + +-- SELECT TRANSFORM ( expression [ , ... ] ) +-- [ ROW FORMAT row_format ] +-- [ RECORDWRITER record_writer_class ] +-- USING command_or_script [ AS ( [ col_name [ col_type ] ] [ , ... ] ) ] +-- [ ROW FORMAT row_format ] +-- [ RECORDREADER record_reader_class ] + +-- With specified output without data type +SELECT TRANSFORM(zip_code, name, age) + USING 'cat' AS (a, b, c) +FROM person +WHERE zip_code > 94511; + +-- With specified output with data type +SELECT TRANSFORM(zip_code, name, age) + USING 'cat' AS (a STRING, b STRING, c STRING) +FROM person +WHERE zip_code > 94511; + +-- Using ROW FORMAT DELIMITED +SELECT TRANSFORM(name, age) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' + USING 'cat' AS (name_age string) + ROW FORMAT DELIMITED + FIELDS TERMINATED BY '@' + LINES TERMINATED BY '\n' + NULL DEFINED AS 'NULL' +FROM person; + +-- Using Hive Serde +SELECT TRANSFORM(zip_code, name, age) + ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + WITH SERDEPROPERTIES ( + 'field.delim' = '\t' + ) + USING 'cat' AS (a STRING, b STRING, c STRING) + ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' + WITH SERDEPROPERTIES ( + 'field.delim' = '\t' + ) +FROM person +WHERE zip_code > 94511; + +-- Schema-less mode +SELECT TRANSFORM(zip_code, name, age) + USING 'cat' +FROM person +WHERE zip_code > 94500; + + +-- ============================================ + + +-- range call with end +SELECT * FROM range(6 + cos(3)); +-- range call with start and end +SELECT * FROM range(5, 10); +-- range call with numPartitions +SELECT * FROM range(0, 10, 2, 200); +-- range call with a table alias +SELECT * FROM range(5, 8) AS test; +SELECT explode(array(10, 20)); +SELECT inline(array(struct(1, 'a'), struct(2, 'b'))); +SELECT posexplode(array(10,20)); +SELECT stack(2, 1, 2, 3); +SELECT json_tuple('{"a":1, "b":2}', 'a', 'b'); +SELECT parse_url('http://spark.apache.org/path?query=1', 'HOST'); +SELECT * FROM test LATERAL VIEW explode (ARRAY(3,4)) AS c2; + + +-- ============================================ + + +-- UNPIVOT [ { INCLUDE | EXCLUDE } NULLS ] ( +-- { single_value_column_unpivot | multi_value_column_unpivot } +-- ) [[AS] alias] + +-- single_value_column_unpivot: +-- values_column +-- FOR name_column +-- IN (unpivot_column [[AS] alias] [, ...]) + +-- multi_value_column_unpivot: +-- (values_column [, ...]) +-- FOR name_column +-- IN ((unpivot_column [, ...]) [[AS] alias] [, ...]) + +-- column names are used as unpivot columns +SELECT * FROM sales_quarterly + UNPIVOT ( + sales FOR quarter IN (q1, q2, q3, q4) + ); + +-- NULL values are excluded by default, they can be included +-- unpivot columns can be alias +-- unpivot result can be referenced via its alias +SELECT up.* FROM sales_quarterly + UNPIVOT INCLUDE NULLS ( + sales FOR quarter IN (q1 AS Q1, q2 AS Q2, q3 AS Q3, q4 AS Q4) + ) AS up; + +-- multiple value columns can be unpivoted per row +SELECT * FROM sales_quarterly + UNPIVOT EXCLUDE NULLS ( + (first_quarter, second_quarter) + FOR half_of_the_year IN ( + (q1, q2) AS H1, + (q3, q4) AS H2 + ) + ); + + +-- ============================================ + + +-- WHERE boolean_expression + +-- Comparison operator in `WHERE` clause. +SELECT * FROM person WHERE id > 200 ORDER BY id; +-- Comparison and logical operators in `WHERE` clause. +SELECT * FROM person WHERE id = 200 OR id = 300 ORDER BY id; +-- IS NULL expression in `WHERE` clause. +SELECT * FROM person WHERE id > 300 OR age IS NULL ORDER BY id; +-- Function expression in `WHERE` clause. +SELECT * FROM person WHERE length(name) > 3 ORDER BY id; +-- `BETWEEN` expression in `WHERE` clause. +SELECT * FROM person WHERE id BETWEEN 200 AND 300 ORDER BY id; +-- Scalar Subquery in `WHERE` clause. +SELECT * FROM person WHERE age > (SELECT avg(age) FROM person); +-- Correlated Subquery in `WHERE` clause. +SELECT * FROM person AS parent WHERE EXISTS (SELECT 1 FROM person AS child WHERE parent.id = child.id AND child.age IS NULL); + + +-- ============================================ + + +-- window_function [ nulls_option ] OVER +-- ( [ { PARTITION | DISTRIBUTE } BY partition_col_name = partition_col_val ( [ , ... ] ) ] +-- { ORDER | SORT } BY expression [ ASC | DESC ] [ NULLS { FIRST | LAST } ] [ , ... ] +-- [ window_frame ] ) + +SELECT * FROM employees; +SELECT name, dept, salary, RANK() OVER (PARTITION BY dept ORDER BY salary) AS rank FROM employees; +SELECT name, dept, salary, DENSE_RANK() OVER (PARTITION BY dept ORDER BY salary ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS dense_rank FROM employees; +SELECT name, dept, age, CUME_DIST() OVER (PARTITION BY dept ORDER BY age RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW) AS cume_dist FROM employees; +SELECT name, dept, salary, MIN(salary) OVER (PARTITION BY dept ORDER BY salary) AS min FROM employees; +SELECT name, salary, LAG(salary) OVER (PARTITION BY dept ORDER BY salary) AS lag, LEAD(salary, 1, 0) OVER (PARTITION BY dept ORDER BY salary) AS lead FROM employees; +SELECT id, v, LEAD(v, 0) IGNORE NULLS OVER w lead, LAG(v, 0) IGNORE NULLS OVER w lag, NTH_VALUE(v, 2) IGNORE NULLS OVER w nth_value, FIRST_VALUE(v) IGNORE NULLS OVER w first_value, LAST_VALUE(v) IGNORE NULLS OVER w last_value FROM test_ignore_null WINDOW w AS (ORDER BY id) ORDER BY id; diff --git a/test/parser/sql/syntax/fixtures/show.sql b/test/parser/sql/syntax/fixtures/show.sql new file mode 100644 index 00000000..209877f8 --- /dev/null +++ b/test/parser/sql/syntax/fixtures/show.sql @@ -0,0 +1,64 @@ +-- SHOW COLUMNS table_identifier [ database ] +SHOW COLUMNS IN customer; +SHOW COLUMNS IN salesdb.customer; +SHOW COLUMNS IN customer IN salesdb; + + +-- SHOW CREATE TABLE table_identifier [ AS SERDE ] +SHOW CREATE TABLE test; +SHOW CREATE TABLE test AS SERDE; + + +-- SHOW { DATABASES | SCHEMAS } [ LIKE regex_pattern ] +SHOW DATABASES; +SHOW DATABASES LIKE 'pay*'; +SHOW SCHEMAS; + + +-- SHOW [ function_kind ] FUNCTIONS [ { FROM | IN } database_name ] [ LIKE regex_pattern ] +SHOW FUNCTIONS trim; +SHOW SYSTEM FUNCTIONS concat; +SHOW SYSTEM FUNCTIONS FROM salesdb LIKE 'max'; +SHOW FUNCTIONS LIKE 't*'; +SHOW FUNCTIONS LIKE 'yea*|windo*'; +SHOW FUNCTIONS LIKE 't[a-z][a-z][a-z]'; + + +-- SHOW PARTITIONS table_identifier [ partition_spec ] +SHOW PARTITIONS customer; +SHOW PARTITIONS salesdb.customer; +SHOW PARTITIONS customer PARTITION (state = 'CA', city = 'Fremont'); +SHOW PARTITIONS customer PARTITION (state = 'CA'); +SHOW PARTITIONS customer PARTITION (city = 'San Jose'); + + +-- SHOW TABLE EXTENDED [ { IN | FROM } database_name ] LIKE regex_pattern +-- [ partition_spec ] +SHOW TABLE EXTENDED LIKE 'employee'; +SHOW TABLE EXTENDED LIKE 'employe*'; +SHOW TABLE EXTENDED IN default LIKE 'employee' PARTITION (grade=1); +SHOW TABLE EXTENDED IN default LIKE 'empl*' PARTITION (grade=1); + + +-- SHOW TABLES [ { FROM | IN } database_name ] [ LIKE regex_pattern ] +SHOW TABLES; +SHOW TABLES FROM userdb; +SHOW TABLES IN userdb; +SHOW TABLES FROM default LIKE 'sam*'; +SHOW TABLES LIKE 'sam*|suj'; + + +-- SHOW TBLPROPERTIES table_identifier +-- [ ( unquoted_property_key | property_key_as_string_literal ) ] +SHOW TBLPROPERTIES customer; +SHOW TBLPROPERTIES salesdb.customer; +SHOW TBLPROPERTIES customer (created.by.user); +SHOW TBLPROPERTIES customer ('created.date'); + + +-- SHOW VIEWS [ { FROM | IN } database_name ] [ LIKE regex_pattern ] +SHOW VIEWS; +SHOW VIEWS FROM userdb; +SHOW VIEWS IN global_temp; +SHOW VIEWS FROM default LIKE 'sam*'; +SHOW VIEWS LIKE 'sam|suj|temp*'; diff --git a/test/parser/sql/syntax/fixtures/truncate.sql b/test/parser/sql/syntax/fixtures/truncate.sql new file mode 100644 index 00000000..2b68fdac --- /dev/null +++ b/test/parser/sql/syntax/fixtures/truncate.sql @@ -0,0 +1,5 @@ +-- Syntax +-- TRUNCATE TABLE table_identifier [ partition_spec ] + +TRUNCATE TABLE Student partition(age=10); +TRUNCATE TABLE Student; diff --git a/test/parser/sql/syntax/fixtures/update.sql b/test/parser/sql/syntax/fixtures/update.sql new file mode 100644 index 00000000..c13dbddd --- /dev/null +++ b/test/parser/sql/syntax/fixtures/update.sql @@ -0,0 +1,7 @@ +UPDATE t1 SET col1 = col1 + 1; +UPDATE t1 SET col1 = col1 + 1, col2 = col1; +UPDATE t SET id = id + 1; +UPDATE items SET retail = retail * 0.9 WHERE id IN (SELECT id FROM items WHERE retail / wholesale >= 1.3 AND quantity > 100); +UPDATE LOW_PRIORITY t1 SET col1 = col1 + 1 WHERE age = 12; +UPDATE IGNORE t1 SET col1 = col1 + 1 WHERE age = 12; +UPDATE t1 SET col1 = col1 + 1 WHERE age = 12; diff --git a/test/parser/sql/syntax/fixtures/use.sql b/test/parser/sql/syntax/fixtures/use.sql new file mode 100644 index 00000000..7e284a3d --- /dev/null +++ b/test/parser/sql/syntax/fixtures/use.sql @@ -0,0 +1,4 @@ +-- Syntax +-- USE database_name + +USE userDB; diff --git a/test/parser/sql/validateInvalidSql.test.ts b/test/parser/sql/validateInvalidSql.test.ts new file mode 100644 index 00000000..7828e085 --- /dev/null +++ b/test/parser/sql/validateInvalidSql.test.ts @@ -0,0 +1,16 @@ +import { Sql } from 'src/parser/sql'; + +const randomText = `dhsdansdnkla ndjnsla ndnalks`; +const unCompleteSQL = `CREATE TABLE`; + +describe('Sql validate invalid sql', () => { + const sql = new Sql(); + + test('validate random text', () => { + expect(sql.validate(randomText).length).not.toBe(0); + }); + + test('validate unComplete sql', () => { + expect(sql.validate(unCompleteSQL).length).not.toBe(0); + }); +}); diff --git a/test/parser/sql/visitor.test.ts b/test/parser/sql/visitor.test.ts new file mode 100644 index 00000000..b6382f99 --- /dev/null +++ b/test/parser/sql/visitor.test.ts @@ -0,0 +1,33 @@ +import { Sql } from 'src/parser/sql'; +import { SqlParserVisitor } from 'src/lib/sql/SqlParserVisitor'; + +describe('Sql Visitor Tests', () => { + const expectTableName = 'user1'; + const sqlText = `select id,name,sex from ${expectTableName};`; + const sql = new Sql(); + + const parseTree = sql.parse(sqlText, (error) => { + console.error('Parse error:', error); + }); + + test('Visitor visitTableName', () => { + class MyVisitor extends SqlParserVisitor { + defaultResult(): string { + return ''; + } + aggregateResult(aggregate: string, nextResult: string): string { + return aggregate + nextResult; + } + visitProgram = (ctx) => { + return this.visitChildren(ctx); + }; + visitTableName = (ctx) => { + return ctx.getText().toLowerCase(); + }; + } + const visitor = new MyVisitor(); + const result = visitor.visit(parseTree); + + expect(result).toBe(expectTableName); + }); +});