From a627861e8af210cdb185232167b73a474d556edc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabr=C3=ADzio=20de=20Royes=20Mello?= Date: Tue, 3 Sep 2024 20:31:20 -0300 Subject: [PATCH] PG17: Regression Tests Added missing template output tests and also converted others to template due to output changes. Also fixed some small pieces of code to properly run in all current supported Postgres versions (14, 15, 16 and 17) --- .github/gh_matrix_builder.py | 3 +- src/utils.c | 3 + .../{agg_bookends.out => agg_bookends-14.out} | 0 test/expected/agg_bookends-15.out | 2507 ++++++++++++ test/expected/agg_bookends-16.out | 2507 ++++++++++++ test/expected/agg_bookends-17.out | 2458 ++++++++++++ test/expected/alter.out | 10 +- .../{drop_owned.out => drop_owned-14.out} | 0 test/expected/drop_owned-15.out | 194 + test/expected/drop_owned-16.out | 194 + test/expected/drop_owned-17.out | 194 + ...hypertable.out => grant_hypertable-14.out} | 0 test/expected/grant_hypertable-15.out | 394 ++ test/expected/grant_hypertable-16.out | 394 ++ test/expected/grant_hypertable-17.out | 394 ++ test/expected/insert-14.out | 11 +- test/expected/insert-15.out | 11 +- test/expected/insert-16.out | 11 +- test/expected/insert-17.out | 19 +- ...ll_exclusion.out => null_exclusion-14.out} | 0 test/expected/null_exclusion-15.out | 126 + test/expected/null_exclusion-16.out | 126 + test/expected/null_exclusion-17.out | 118 + test/expected/plan_expand_hypertable-17.out | 20 +- test/expected/rowsecurity-17.out | 274 +- test/sql/.gitignore | 4 + test/sql/CMakeLists.txt | 8 +- .../{agg_bookends.sql => agg_bookends.sql.in} | 0 test/sql/alter.sql | 4 +- .../sql/{drop_owned.sql => drop_owned.sql.in} | 0 ...hypertable.sql => grant_hypertable.sql.in} | 0 test/sql/insert.sql.in | 2 + ...ll_exclusion.sql => null_exclusion.sql.in} | 0 test/temp_schedule.backup | 27 + tsl/src/compression/compression_storage.c | 25 +- tsl/test/expected/cagg_ddl-17.out | 70 +- .../expected/cagg_migrate_function-17.out | 8 +- ...ermissions.out => cagg_permissions-14.out} | 0 tsl/test/expected/cagg_permissions-15.out | 321 ++ tsl/test/expected/cagg_permissions-16.out | 321 ++ tsl/test/expected/cagg_permissions-17.out | 321 ++ tsl/test/expected/cagg_repair-17.out | 12 +- tsl/test/expected/cagg_usage-17.out | 57 +- tsl/test/expected/chunk_column_stats-17.out | 544 +++ tsl/test/expected/chunk_utils_internal.out | 119 +- tsl/test/expected/compression.out | 3 +- tsl/test/expected/compression_errors-17.out | 7 - ...ons.out => compression_permissions-14.out} | 0 .../expected/compression_permissions-15.out | 374 ++ .../expected/compression_permissions-16.out | 374 ++ .../expected/compression_permissions-17.out | 374 ++ ...e.out => compression_update_delete-14.out} | 0 .../expected/compression_update_delete-15.out | 3405 +++++++++++++++++ .../expected/compression_update_delete-16.out | 3405 +++++++++++++++++ .../expected/compression_update_delete-17.out | 3405 +++++++++++++++++ .../merge_append_partially_compressed-17.out | 25 + tsl/test/expected/plan_skip_scan-17.out | 56 +- .../expected/transparent_decompression-17.out | 27 +- tsl/test/sql/CMakeLists.txt | 39 +- ...ermissions.sql => cagg_permissions.sql.in} | 0 tsl/test/sql/chunk_utils_internal.sql | 22 +- tsl/test/sql/compression.sql | 3 +- ...ons.sql => compression_permissions.sql.in} | 0 ...e.sql => compression_update_delete.sql.in} | 0 .../chunk_utils_internal_orderedappend.sql | 10 +- tsl/test/t/002_logrepl_decomp_marker.pl | 2 - 66 files changed, 22933 insertions(+), 409 deletions(-) rename test/expected/{agg_bookends.out => agg_bookends-14.out} (100%) create mode 100644 test/expected/agg_bookends-15.out create mode 100644 test/expected/agg_bookends-16.out create mode 100644 test/expected/agg_bookends-17.out rename test/expected/{drop_owned.out => drop_owned-14.out} (100%) create mode 100644 test/expected/drop_owned-15.out create mode 100644 test/expected/drop_owned-16.out create mode 100644 test/expected/drop_owned-17.out rename test/expected/{grant_hypertable.out => grant_hypertable-14.out} (100%) create mode 100644 test/expected/grant_hypertable-15.out create mode 100644 test/expected/grant_hypertable-16.out create mode 100644 test/expected/grant_hypertable-17.out rename test/expected/{null_exclusion.out => null_exclusion-14.out} (100%) create mode 100644 test/expected/null_exclusion-15.out create mode 100644 test/expected/null_exclusion-16.out create mode 100644 test/expected/null_exclusion-17.out rename test/sql/{agg_bookends.sql => agg_bookends.sql.in} (100%) rename test/sql/{drop_owned.sql => drop_owned.sql.in} (100%) rename test/sql/{grant_hypertable.sql => grant_hypertable.sql.in} (100%) rename test/sql/{null_exclusion.sql => null_exclusion.sql.in} (100%) create mode 100644 test/temp_schedule.backup rename tsl/test/expected/{cagg_permissions.out => cagg_permissions-14.out} (100%) create mode 100644 tsl/test/expected/cagg_permissions-15.out create mode 100644 tsl/test/expected/cagg_permissions-16.out create mode 100644 tsl/test/expected/cagg_permissions-17.out create mode 100644 tsl/test/expected/chunk_column_stats-17.out rename tsl/test/expected/{compression_permissions.out => compression_permissions-14.out} (100%) create mode 100644 tsl/test/expected/compression_permissions-15.out create mode 100644 tsl/test/expected/compression_permissions-16.out create mode 100644 tsl/test/expected/compression_permissions-17.out rename tsl/test/expected/{compression_update_delete.out => compression_update_delete-14.out} (100%) create mode 100644 tsl/test/expected/compression_update_delete-15.out create mode 100644 tsl/test/expected/compression_update_delete-16.out create mode 100644 tsl/test/expected/compression_update_delete-17.out rename tsl/test/sql/{cagg_permissions.sql => cagg_permissions.sql.in} (100%) rename tsl/test/sql/{compression_permissions.sql => compression_permissions.sql.in} (100%) rename tsl/test/sql/{compression_update_delete.sql => compression_update_delete.sql.in} (100%) diff --git a/.github/gh_matrix_builder.py b/.github/gh_matrix_builder.py index bf34849e0ef..c1ff5f324dc 100755 --- a/.github/gh_matrix_builder.py +++ b/.github/gh_matrix_builder.py @@ -187,7 +187,8 @@ def macos_config(overrides): "pg": "17", "snapshot": "snapshot", "tsdb_build_args": "-DEXPERIMENTAL=ON", - "skipped_tests": "merge_compress merge_dml merge size_utils ts_merge-17", + # @TODO: those skipped tests should be revisited later + "skipped_tests": "merge_compress merge_dml merge ts_merge-17 repair 001_job_crash_log", } ) ) diff --git a/src/utils.c b/src/utils.c index 272be2923cd..8755e1610db 100644 --- a/src/utils.c +++ b/src/utils.c @@ -1026,6 +1026,9 @@ ts_try_relation_cached_size(Relation rel, bool verbose) ForkNumber forkNum; bool cached = true; + if (!RELKIND_HAS_STORAGE(rel->rd_rel->relkind)) + return (int64) nblocks; + /* Get heap size, including FSM and VM */ for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++) { diff --git a/test/expected/agg_bookends.out b/test/expected/agg_bookends-14.out similarity index 100% rename from test/expected/agg_bookends.out rename to test/expected/agg_bookends-14.out diff --git a/test/expected/agg_bookends-15.out b/test/expected/agg_bookends-15.out new file mode 100644 index 00000000000..28e61df849a --- /dev/null +++ b/test/expected/agg_bookends-15.out @@ -0,0 +1,2507 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\set TEST_BASE_NAME agg_bookends +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') as "TEST_LOAD_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME", + format('%s/results/%s_results_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_OPTIMIZED", + format('%s/results/%s_results_unoptimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNOPTIMIZED" +\gset +SELECT format('\! diff -u --label "Unoptimized result" --label "Optimized result" %s %s', :'TEST_RESULTS_UNOPTIMIZED', :'TEST_RESULTS_OPTIMIZED') as "DIFF_CMD" +\gset +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE btest(time timestamp NOT NULL, time_alt timestamp, gp INTEGER, temp float, strid TEXT DEFAULT 'testing'); +SELECT schema_name, table_name, created FROM create_hypertable('btest', 'time'); +psql:include/agg_bookends_load.sql:6: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +psql:include/agg_bookends_load.sql:6: WARNING: column type "timestamp without time zone" used for "time_alt" does not follow best practices + schema_name | table_name | created +-------------+------------+--------- + public | btest | t +(1 row) + +INSERT INTO btest VALUES('2017-01-20T09:00:01', '2017-01-20T10:00:00', 1, 22.5); +INSERT INTO btest VALUES('2017-01-20T09:00:21', '2017-01-20T09:00:59', 1, 21.2); +INSERT INTO btest VALUES('2017-01-20T09:00:47', '2017-01-20T09:00:58', 1, 25.1); +INSERT INTO btest VALUES('2017-01-20T09:00:02', '2017-01-20T09:00:57', 2, 35.5); +INSERT INTO btest VALUES('2017-01-20T09:00:21', '2017-01-20T09:00:56', 2, 30.2); +--TOASTED; +INSERT INTO btest VALUES('2017-01-20T09:00:43', '2017-01-20T09:01:55', 2, 20.1, repeat('xyz', 1000000) ); +CREATE TABLE btest_numeric (time timestamp NOT NULL, quantity numeric); +SELECT schema_name, table_name, created FROM create_hypertable('btest_numeric', 'time'); +psql:include/agg_bookends_load.sql:16: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + schema_name | table_name | created +-------------+---------------+--------- + public | btest_numeric | t +(1 row) + +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be only output of results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations')) v(setting); + setting | value +----------------------------------+------- + timescaledb.enable_optimizations | on +(1 row) + +:PREFIX SELECT time, gp, temp FROM btest ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=6 loops=1) +(1 row) + +:PREFIX SELECT last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) +(5 rows) + +:PREFIX SELECT first(temp, time) FROM btest; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) +(4 rows) + +:PREFIX SELECT last(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(2 rows) + +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(2 rows) + +:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +--check whole row +:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +--check toasted col +:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +BEGIN; +--check null value as last element +INSERT INTO btest VALUES('2018-01-20T09:00:43', '2017-01-20T09:00:55', 2, NULL); +:PREFIX SELECT last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(9 rows) + +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest VALUES('2019-01-20T09:00:43', '2018-01-20T09:00:55', 2, 30.5); +:PREFIX SELECT last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(11 rows) + +--check null cmp element is skipped +INSERT INTO btest VALUES('2018-01-20T09:00:43', NULL, 2, 32.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) +(5 rows) + +-- fist returns NULL value +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) +(5 rows) + +-- test first return non NULL value +INSERT INTO btest VALUES('2016-01-20T09:00:00', '2016-01-20T09:00:00', 2, 36.5); +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=10 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) +(6 rows) + +--check non null cmp element insert after null cmp +INSERT INTO btest VALUES('2020-01-20T09:00:43', '2020-01-20T09:00:43', 2, 35.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(7 rows) + +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(7 rows) + +--cmp nulls should be ignored and not present in groups +:PREFIX SELECT gp, last(temp, time_alt) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(12 rows) + +--Previously, some bugs were found with NULLS and numeric types, so test that +INSERT INTO btest_numeric VALUES ('2019-01-20T09:00:43', NULL); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Index Scan using _hyper_2_6_chunk_btest_numeric_time_idx on _hyper_2_6_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) +(5 rows) + +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest_numeric VALUES('2020-01-20T09:00:43', 30.5); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" DESC + -> Index Scan using _hyper_2_7_chunk_btest_numeric_time_idx on _hyper_2_7_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_2_6_chunk_btest_numeric_time_idx on _hyper_2_6_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(9 rows) + +-- do index scan for last +:PREFIX SELECT last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(15 rows) + +-- do index scan for first +:PREFIX SELECT first(temp, time) FROM btest; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" + -> Index Scan Backward using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (never executed) +(10 rows) + +-- can't do index scan when ordering on non-index column +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(7 rows) + +-- do index scan for subquery +:PREFIX SELECT * FROM (SELECT last(temp, time) FROM btest) last; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(15 rows) + +-- can't do index scan when using group by +:PREFIX SELECT last(temp, time) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(12 rows) + +-- do index scan when agg function is used in CTE subquery +:PREFIX WITH last_temp AS (SELECT last(temp, time) FROM btest) SELECT * from last_temp; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(15 rows) + +-- do index scan when using both FIRST and LAST aggregate functions +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $1) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + InitPlan 2 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest btest_1 (actual rows=1 loops=1) + Order: btest_1."time" + -> Index Scan Backward using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk _hyper_1_3_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk _hyper_1_5_chunk_1 (never executed) +(24 rows) + +-- verify results when using both FIRST and LAST +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $1) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + InitPlan 2 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest btest_1 (actual rows=1 loops=1) + Order: btest_1."time" + -> Index Scan Backward using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk _hyper_1_3_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk _hyper_1_5_chunk_1 (never executed) +(24 rows) + +-- do index scan when using WHERE +:PREFIX SELECT last(temp, time) FROM btest WHERE time <= '2017-01-20T09:00:02'; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: (("time" IS NOT NULL) AND ("time" <= 'Fri Jan 20 09:00:02 2017'::timestamp without time zone)) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: (("time" IS NOT NULL) AND ("time" <= 'Fri Jan 20 09:00:02 2017'::timestamp without time zone)) +(9 rows) + +-- can't do index scan for MAX and LAST combined (MinMax optimization fails when having different aggregate functions) +:PREFIX SELECT max(time), last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(7 rows) + +-- can't do index scan when using FIRST/LAST in ORDER BY +:PREFIX SELECT last(temp, time) FROM btest ORDER BY last(temp, time); + QUERY PLAN +------------------------------------------------------------------------ + Sort (actual rows=1 loops=1) + Sort Key: (last(_hyper_1_1_chunk.temp, _hyper_1_1_chunk."time")) + Sort Method: quicksort + -> Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(10 rows) + +-- do index scan +:PREFIX SELECT last(temp, time) FROM btest WHERE temp < 30; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=0 loops=1) + Index Cond: ("time" IS NOT NULL) + Filter: (temp < '30'::double precision) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (actual rows=0 loops=1) + Index Cond: ("time" IS NOT NULL) + Filter: (temp < '30'::double precision) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (actual rows=0 loops=1) + Index Cond: ("time" IS NOT NULL) + Filter: (temp < '30'::double precision) + Rows Removed by Filter: 2 + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Filter: (temp < '30'::double precision) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Filter: (temp < '30'::double precision) +(23 rows) + +-- SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- do index scan +:PREFIX SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= 'Fri Jan 20 09:00:47 2017'::timestamp without time zone) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" >= 'Fri Jan 20 09:00:47 2017'::timestamp without time zone) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" >= 'Fri Jan 20 09:00:47 2017'::timestamp without time zone) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (never executed) + Index Cond: ("time" >= 'Fri Jan 20 09:00:47 2017'::timestamp without time zone) +(13 rows) + +-- can't do index scan when using WINDOW function +:PREFIX SELECT gp, last(temp, time) OVER (PARTITION BY gp) AS last FROM btest; + QUERY PLAN +------------------------------------------------------------------------ + WindowAgg (actual rows=11 loops=1) + -> Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(10 rows) + +-- test constants +:PREFIX SELECT first(100, 100) FROM btest; + QUERY PLAN +-------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Append (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_2_chunk (never executed) + -> Seq Scan on _hyper_1_3_chunk (never executed) + -> Seq Scan on _hyper_1_4_chunk (never executed) + -> Seq Scan on _hyper_1_5_chunk (never executed) +(10 rows) + +-- create an index so we can test optimization +CREATE INDEX btest_time_alt_idx ON btest(time_alt); +:PREFIX SELECT last(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk.time_alt DESC + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_alt_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_alt_idx on _hyper_1_2_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_alt_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) + -> Index Scan Backward using _hyper_1_4_chunk_btest_time_alt_idx on _hyper_1_4_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_alt_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) +(15 rows) + +--test nested FIRST/LAST - should optimize +:PREFIX SELECT abs(last(temp, time)) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(15 rows) + +-- test nested FIRST/LAST in ORDER BY - no optimization possible +:PREFIX SELECT abs(last(temp, time)) FROM btest ORDER BY abs(last(temp,time)); + QUERY PLAN +------------------------------------------------------------------------- + Sort (actual rows=1 loops=1) + Sort Key: (abs(last(_hyper_1_1_chunk.temp, _hyper_1_1_chunk."time"))) + Sort Method: quicksort + -> Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(10 rows) + +ROLLBACK; +-- Test with NULL numeric values +BEGIN; +TRUNCATE btest_numeric; +-- Empty table +:PREFIX SELECT first(btest_numeric, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(3 rows) + +:PREFIX SELECT last(btest_numeric, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(3 rows) + +-- Only NULL values +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_2_8_chunk_btest_numeric_time_idx on _hyper_2_8_chunk (actual rows=1 loops=1) +(4 rows) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Index Scan using _hyper_2_8_chunk_btest_numeric_time_idx on _hyper_2_8_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) +(5 rows) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) +(2 rows) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) +(2 rows) + +-- NULL values followed by non-NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" + -> Index Scan Backward using _hyper_2_8_chunk_btest_numeric_time_idx on _hyper_2_8_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_2_9_chunk_btest_numeric_time_idx on _hyper_2_9_chunk (never executed) +(7 rows) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" DESC + -> Index Scan using _hyper_2_9_chunk_btest_numeric_time_idx on _hyper_2_9_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_2_8_chunk_btest_numeric_time_idx on _hyper_2_8_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(9 rows) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=4 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_2_9_chunk (actual rows=2 loops=1) +(4 rows) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=4 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_2_9_chunk (actual rows=2 loops=1) +(4 rows) + +TRUNCATE btest_numeric; +-- non-NULL values followed by NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" + -> Index Scan Backward using _hyper_2_11_chunk_btest_numeric_time_idx on _hyper_2_11_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_2_10_chunk_btest_numeric_time_idx on _hyper_2_10_chunk (never executed) +(7 rows) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" DESC + -> Index Scan using _hyper_2_10_chunk_btest_numeric_time_idx on _hyper_2_10_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_2_11_chunk_btest_numeric_time_idx on _hyper_2_11_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(9 rows) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=4 loops=1) + -> Seq Scan on _hyper_2_10_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_2_11_chunk (actual rows=2 loops=1) +(4 rows) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=4 loops=1) + -> Seq Scan on _hyper_2_10_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_2_11_chunk (actual rows=2 loops=1) +(4 rows) + +ROLLBACK; +-- we want test results as part of the output too to make sure we produce correct output +\set PREFIX '' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be only output of results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations')) v(setting); + setting | value +----------------------------------+------- + timescaledb.enable_optimizations | on +(1 row) + +:PREFIX SELECT time, gp, temp FROM btest ORDER BY time; + time | gp | temp +--------------------------+----+------ + Fri Jan 20 09:00:01 2017 | 1 | 22.5 + Fri Jan 20 09:00:02 2017 | 2 | 35.5 + Fri Jan 20 09:00:21 2017 | 1 | 21.2 + Fri Jan 20 09:00:21 2017 | 2 | 30.2 + Fri Jan 20 09:00:43 2017 | 2 | 20.1 + Fri Jan 20 09:00:47 2017 | 1 | 25.1 +(6 rows) + +:PREFIX SELECT last(temp, time) FROM btest; + last +------ + 25.1 +(1 row) + +:PREFIX SELECT first(temp, time) FROM btest; + first +------- + 22.5 +(1 row) + +:PREFIX SELECT last(temp, time_alt) FROM btest; + last +------ + 22.5 +(1 row) + +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + 30.2 +(1 row) + +:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; + gp | last +----+------ + 1 | 25.1 + 2 | 20.1 +(2 rows) + +:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; + gp | first +----+------- + 1 | 22.5 + 2 | 35.5 +(2 rows) + +--check whole row +:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; + gp | first +----+------------------------------------------------------------------------ + 1 | ("Fri Jan 20 09:00:01 2017","Fri Jan 20 10:00:00 2017",1,22.5,testing) + 2 | ("Fri Jan 20 09:00:02 2017","Fri Jan 20 09:00:57 2017",2,35.5,testing) +(2 rows) + +--check toasted col +:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; + gp | left +----+------------ + 1 | testing + 2 | xyzxyzxyzx +(2 rows) + +:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; + gp | last +----+------ + 1 | 22.5 + 2 | 20.1 +(2 rows) + +:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; + gp | last +----+--------- + 1 | testing + 2 | testing +(2 rows) + +BEGIN; +--check null value as last element +INSERT INTO btest VALUES('2018-01-20T09:00:43', '2017-01-20T09:00:55', 2, NULL); +:PREFIX SELECT last(temp, time) FROM btest; + last +------ + +(1 row) + +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest VALUES('2019-01-20T09:00:43', '2018-01-20T09:00:55', 2, 30.5); +:PREFIX SELECT last(temp, time) FROM btest; + last +------ + 30.5 +(1 row) + +--check null cmp element is skipped +INSERT INTO btest VALUES('2018-01-20T09:00:43', NULL, 2, 32.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; + last +------ + 30.5 +(1 row) + +-- fist returns NULL value +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + +(1 row) + +-- test first return non NULL value +INSERT INTO btest VALUES('2016-01-20T09:00:00', '2016-01-20T09:00:00', 2, 36.5); +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + 36.5 +(1 row) + +--check non null cmp element insert after null cmp +INSERT INTO btest VALUES('2020-01-20T09:00:43', '2020-01-20T09:00:43', 2, 35.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; + last +------ + 35.3 +(1 row) + +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + 36.5 +(1 row) + +--cmp nulls should be ignored and not present in groups +:PREFIX SELECT gp, last(temp, time_alt) FROM btest GROUP BY gp ORDER BY gp; + gp | last +----+------ + 1 | 22.5 + 2 | 35.3 +(2 rows) + +--Previously, some bugs were found with NULLS and numeric types, so test that +INSERT INTO btest_numeric VALUES ('2019-01-20T09:00:43', NULL); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + +(1 row) + +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest_numeric VALUES('2020-01-20T09:00:43', 30.5); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + 30.5 +(1 row) + +-- do index scan for last +:PREFIX SELECT last(temp, time) FROM btest; + last +------ + 35.3 +(1 row) + +-- do index scan for first +:PREFIX SELECT first(temp, time) FROM btest; + first +------- + 36.5 +(1 row) + +-- can't do index scan when ordering on non-index column +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + 36.5 +(1 row) + +-- do index scan for subquery +:PREFIX SELECT * FROM (SELECT last(temp, time) FROM btest) last; + last +------ + 35.3 +(1 row) + +-- can't do index scan when using group by +:PREFIX SELECT last(temp, time) FROM btest GROUP BY gp ORDER BY gp; + last +------ + 25.1 + 35.3 +(2 rows) + +-- do index scan when agg function is used in CTE subquery +:PREFIX WITH last_temp AS (SELECT last(temp, time) FROM btest) SELECT * from last_temp; + last +------ + 35.3 +(1 row) + +-- do index scan when using both FIRST and LAST aggregate functions +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; + first | last +-------+------ + 36.5 | 35.3 +(1 row) + +-- verify results when using both FIRST and LAST +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; + first | last +-------+------ + 36.5 | 35.3 +(1 row) + +-- do index scan when using WHERE +:PREFIX SELECT last(temp, time) FROM btest WHERE time <= '2017-01-20T09:00:02'; + last +------ + 35.5 +(1 row) + +-- can't do index scan for MAX and LAST combined (MinMax optimization fails when having different aggregate functions) +:PREFIX SELECT max(time), last(temp, time) FROM btest; + max | last +--------------------------+------ + Mon Jan 20 09:00:43 2020 | 35.3 +(1 row) + +-- can't do index scan when using FIRST/LAST in ORDER BY +:PREFIX SELECT last(temp, time) FROM btest ORDER BY last(temp, time); + last +------ + 35.3 +(1 row) + +-- do index scan +:PREFIX SELECT last(temp, time) FROM btest WHERE temp < 30; + last +------ + 25.1 +(1 row) + +-- SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- do index scan +:PREFIX SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; + first +------- + 25.1 +(1 row) + +-- can't do index scan when using WINDOW function +:PREFIX SELECT gp, last(temp, time) OVER (PARTITION BY gp) AS last FROM btest; + gp | last +----+------ + 1 | 25.1 + 1 | 25.1 + 1 | 25.1 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 +(11 rows) + +-- test constants +:PREFIX SELECT first(100, 100) FROM btest; + first +------- + 100 +(1 row) + +-- create an index so we can test optimization +CREATE INDEX btest_time_alt_idx ON btest(time_alt); +:PREFIX SELECT last(temp, time_alt) FROM btest; + last +------ + 35.3 +(1 row) + +--test nested FIRST/LAST - should optimize +:PREFIX SELECT abs(last(temp, time)) FROM btest; + abs +------ + 35.3 +(1 row) + +-- test nested FIRST/LAST in ORDER BY - no optimization possible +:PREFIX SELECT abs(last(temp, time)) FROM btest ORDER BY abs(last(temp,time)); + abs +------ + 35.3 +(1 row) + +ROLLBACK; +-- Test with NULL numeric values +BEGIN; +TRUNCATE btest_numeric; +-- Empty table +:PREFIX SELECT first(btest_numeric, time) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(btest_numeric, time) FROM btest_numeric; + last +------ + +(1 row) + +-- Only NULL values +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + +(1 row) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + last +------ + +(1 row) + +-- NULL values followed by non-NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + 1 +(1 row) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + first +-------------------------- + Sun Jan 20 09:00:43 2019 +(1 row) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + last +-------------------------- + Sun Jan 20 09:00:43 2019 +(1 row) + +TRUNCATE btest_numeric; +-- non-NULL values followed by NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + 1 +(1 row) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + first +-------------------------- + Sun Jan 20 09:00:43 2019 +(1 row) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + last +-------------------------- + Sun Jan 20 09:00:43 2019 +(1 row) + +ROLLBACK; +-- diff results with optimizations disabled and enabled +\o :TEST_RESULTS_UNOPTIMIZED +SET timescaledb.enable_optimizations TO false; +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be only output of results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations')) v(setting); +:PREFIX SELECT time, gp, temp FROM btest ORDER BY time; +:PREFIX SELECT last(temp, time) FROM btest; +:PREFIX SELECT first(temp, time) FROM btest; +:PREFIX SELECT last(temp, time_alt) FROM btest; +:PREFIX SELECT first(temp, time_alt) FROM btest; +:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; +--check whole row +:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; +--check toasted col +:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; +BEGIN; +--check null value as last element +INSERT INTO btest VALUES('2018-01-20T09:00:43', '2017-01-20T09:00:55', 2, NULL); +:PREFIX SELECT last(temp, time) FROM btest; +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest VALUES('2019-01-20T09:00:43', '2018-01-20T09:00:55', 2, 30.5); +:PREFIX SELECT last(temp, time) FROM btest; +--check null cmp element is skipped +INSERT INTO btest VALUES('2018-01-20T09:00:43', NULL, 2, 32.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; +-- fist returns NULL value +:PREFIX SELECT first(temp, time_alt) FROM btest; +-- test first return non NULL value +INSERT INTO btest VALUES('2016-01-20T09:00:00', '2016-01-20T09:00:00', 2, 36.5); +:PREFIX SELECT first(temp, time_alt) FROM btest; +--check non null cmp element insert after null cmp +INSERT INTO btest VALUES('2020-01-20T09:00:43', '2020-01-20T09:00:43', 2, 35.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; +:PREFIX SELECT first(temp, time_alt) FROM btest; +--cmp nulls should be ignored and not present in groups +:PREFIX SELECT gp, last(temp, time_alt) FROM btest GROUP BY gp ORDER BY gp; +--Previously, some bugs were found with NULLS and numeric types, so test that +INSERT INTO btest_numeric VALUES ('2019-01-20T09:00:43', NULL); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest_numeric VALUES('2020-01-20T09:00:43', 30.5); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +-- do index scan for last +:PREFIX SELECT last(temp, time) FROM btest; +-- do index scan for first +:PREFIX SELECT first(temp, time) FROM btest; +-- can't do index scan when ordering on non-index column +:PREFIX SELECT first(temp, time_alt) FROM btest; +-- do index scan for subquery +:PREFIX SELECT * FROM (SELECT last(temp, time) FROM btest) last; +-- can't do index scan when using group by +:PREFIX SELECT last(temp, time) FROM btest GROUP BY gp ORDER BY gp; +-- do index scan when agg function is used in CTE subquery +:PREFIX WITH last_temp AS (SELECT last(temp, time) FROM btest) SELECT * from last_temp; +-- do index scan when using both FIRST and LAST aggregate functions +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; +-- verify results when using both FIRST and LAST +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; +-- do index scan when using WHERE +:PREFIX SELECT last(temp, time) FROM btest WHERE time <= '2017-01-20T09:00:02'; +-- can't do index scan for MAX and LAST combined (MinMax optimization fails when having different aggregate functions) +:PREFIX SELECT max(time), last(temp, time) FROM btest; +-- can't do index scan when using FIRST/LAST in ORDER BY +:PREFIX SELECT last(temp, time) FROM btest ORDER BY last(temp, time); +-- do index scan +:PREFIX SELECT last(temp, time) FROM btest WHERE temp < 30; +-- SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- do index scan +:PREFIX SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- can't do index scan when using WINDOW function +:PREFIX SELECT gp, last(temp, time) OVER (PARTITION BY gp) AS last FROM btest; +-- test constants +:PREFIX SELECT first(100, 100) FROM btest; +-- create an index so we can test optimization +CREATE INDEX btest_time_alt_idx ON btest(time_alt); +:PREFIX SELECT last(temp, time_alt) FROM btest; +--test nested FIRST/LAST - should optimize +:PREFIX SELECT abs(last(temp, time)) FROM btest; +-- test nested FIRST/LAST in ORDER BY - no optimization possible +:PREFIX SELECT abs(last(temp, time)) FROM btest ORDER BY abs(last(temp,time)); +ROLLBACK; +-- Test with NULL numeric values +BEGIN; +TRUNCATE btest_numeric; +-- Empty table +:PREFIX SELECT first(btest_numeric, time) FROM btest_numeric; +:PREFIX SELECT last(btest_numeric, time) FROM btest_numeric; +-- Only NULL values +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +-- NULL values followed by non-NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +TRUNCATE btest_numeric; +-- non-NULL values followed by NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +ROLLBACK; +\o +\o :TEST_RESULTS_OPTIMIZED +SET timescaledb.enable_optimizations TO true; +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be only output of results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations')) v(setting); +:PREFIX SELECT time, gp, temp FROM btest ORDER BY time; +:PREFIX SELECT last(temp, time) FROM btest; +:PREFIX SELECT first(temp, time) FROM btest; +:PREFIX SELECT last(temp, time_alt) FROM btest; +:PREFIX SELECT first(temp, time_alt) FROM btest; +:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; +--check whole row +:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; +--check toasted col +:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; +BEGIN; +--check null value as last element +INSERT INTO btest VALUES('2018-01-20T09:00:43', '2017-01-20T09:00:55', 2, NULL); +:PREFIX SELECT last(temp, time) FROM btest; +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest VALUES('2019-01-20T09:00:43', '2018-01-20T09:00:55', 2, 30.5); +:PREFIX SELECT last(temp, time) FROM btest; +--check null cmp element is skipped +INSERT INTO btest VALUES('2018-01-20T09:00:43', NULL, 2, 32.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; +-- fist returns NULL value +:PREFIX SELECT first(temp, time_alt) FROM btest; +-- test first return non NULL value +INSERT INTO btest VALUES('2016-01-20T09:00:00', '2016-01-20T09:00:00', 2, 36.5); +:PREFIX SELECT first(temp, time_alt) FROM btest; +--check non null cmp element insert after null cmp +INSERT INTO btest VALUES('2020-01-20T09:00:43', '2020-01-20T09:00:43', 2, 35.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; +:PREFIX SELECT first(temp, time_alt) FROM btest; +--cmp nulls should be ignored and not present in groups +:PREFIX SELECT gp, last(temp, time_alt) FROM btest GROUP BY gp ORDER BY gp; +--Previously, some bugs were found with NULLS and numeric types, so test that +INSERT INTO btest_numeric VALUES ('2019-01-20T09:00:43', NULL); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest_numeric VALUES('2020-01-20T09:00:43', 30.5); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +-- do index scan for last +:PREFIX SELECT last(temp, time) FROM btest; +-- do index scan for first +:PREFIX SELECT first(temp, time) FROM btest; +-- can't do index scan when ordering on non-index column +:PREFIX SELECT first(temp, time_alt) FROM btest; +-- do index scan for subquery +:PREFIX SELECT * FROM (SELECT last(temp, time) FROM btest) last; +-- can't do index scan when using group by +:PREFIX SELECT last(temp, time) FROM btest GROUP BY gp ORDER BY gp; +-- do index scan when agg function is used in CTE subquery +:PREFIX WITH last_temp AS (SELECT last(temp, time) FROM btest) SELECT * from last_temp; +-- do index scan when using both FIRST and LAST aggregate functions +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; +-- verify results when using both FIRST and LAST +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; +-- do index scan when using WHERE +:PREFIX SELECT last(temp, time) FROM btest WHERE time <= '2017-01-20T09:00:02'; +-- can't do index scan for MAX and LAST combined (MinMax optimization fails when having different aggregate functions) +:PREFIX SELECT max(time), last(temp, time) FROM btest; +-- can't do index scan when using FIRST/LAST in ORDER BY +:PREFIX SELECT last(temp, time) FROM btest ORDER BY last(temp, time); +-- do index scan +:PREFIX SELECT last(temp, time) FROM btest WHERE temp < 30; +-- SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- do index scan +:PREFIX SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- can't do index scan when using WINDOW function +:PREFIX SELECT gp, last(temp, time) OVER (PARTITION BY gp) AS last FROM btest; +-- test constants +:PREFIX SELECT first(100, 100) FROM btest; +-- create an index so we can test optimization +CREATE INDEX btest_time_alt_idx ON btest(time_alt); +:PREFIX SELECT last(temp, time_alt) FROM btest; +--test nested FIRST/LAST - should optimize +:PREFIX SELECT abs(last(temp, time)) FROM btest; +-- test nested FIRST/LAST in ORDER BY - no optimization possible +:PREFIX SELECT abs(last(temp, time)) FROM btest ORDER BY abs(last(temp,time)); +ROLLBACK; +-- Test with NULL numeric values +BEGIN; +TRUNCATE btest_numeric; +-- Empty table +:PREFIX SELECT first(btest_numeric, time) FROM btest_numeric; +:PREFIX SELECT last(btest_numeric, time) FROM btest_numeric; +-- Only NULL values +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +-- NULL values followed by non-NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +TRUNCATE btest_numeric; +-- non-NULL values followed by NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +ROLLBACK; +\o +:DIFF_CMD +--- Unoptimized result ++++ Optimized result +@@ -1,6 +1,6 @@ + setting | value + ----------------------------------+------- +- timescaledb.enable_optimizations | off ++ timescaledb.enable_optimizations | on + (1 row) + + time | gp | temp +-- Test partial aggregation +CREATE TABLE partial_aggregation (time timestamptz NOT NULL, quantity numeric, longvalue text); +SELECT schema_name, table_name, created FROM create_hypertable('partial_aggregation', 'time'); + schema_name | table_name | created +-------------+---------------------+--------- + public | partial_aggregation | t +(1 row) + +INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL); +INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL); +INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 1, 'hello'); +INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 2, 'world'); +INSERT INTO partial_aggregation VALUES('2020-01-20T09:00:43', 3, 'some'); +INSERT INTO partial_aggregation VALUES('2020-01-20T09:00:43', 3, 'more'); +INSERT INTO partial_aggregation VALUES('2021-01-20T09:00:43', 3, 'some'); +INSERT INTO partial_aggregation VALUES('2021-01-20T09:00:43', 3, 'more'); +INSERT INTO partial_aggregation VALUES('2022-01-20T09:00:43', 4, 'words'); +INSERT INTO partial_aggregation VALUES('2022-01-20T09:00:43', 5, 'words'); +INSERT INTO partial_aggregation VALUES('2023-01-20T09:00:43', 6, 'words'); +INSERT INTO partial_aggregation VALUES('2023-01-20T09:00:43', 7, 'words'); +-- Use enable_partitionwise_aggregate to create partial aggregates per chunk +SET enable_partitionwise_aggregate = ON; +SELECT + format('SELECT %3$s, %1$s FROM partial_aggregation WHERE %2$s GROUP BY %3$s ORDER BY 1, 2;', + function, condition, grouping) +FROM + unnest(array[ + 'first(time, quantity), last(time, quantity)', + 'last(longvalue, quantity)', + 'last(quantity, longvalue)', + 'last(quantity, time)', + 'last(time, longvalue)']) AS function, + unnest(array[ + 'true', + $$time < '2021-01-01'$$, + 'quantity is null', + 'quantity is not null', + 'quantity > 3']) AS condition, + unnest(array[ + '777::text' /* dummy grouping column */, + 'longvalue', + 'quantity', + $$time_bucket('1 year', time)$$, + $$time_bucket('3 year', time)$$]) AS grouping +\gexec +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+------------------------------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+------------------------------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST | Mon Jan 20 09:00:43 2020 PST +(1 row) + +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+-------+------ + 777 | | +(1 row) + +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+------------------------------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+------------------------------+------------------------------ + 777 | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------- + 777 | words +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | some +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------- + 777 | words +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------- + 777 | words +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 2 +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 2 +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 2 +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 4 +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 6 +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 3 +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 6 +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 6 +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------------------------------ + 777 | Thu Jan 20 09:00:43 2022 PST +(1 row) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+------------------------------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + words | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST + world | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + | | +(6 rows) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+------------------------------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + world | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + | | +(5 rows) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+-------+------ + | | +(1 row) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+------------------------------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + words | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST + world | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST +(5 rows) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+------------------------------+------------------------------ + words | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------- + hello | hello + more | more + some | some + words | words + world | world + | +(6 rows) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------- + hello | hello + more | more + some | some + world | world + | +(5 rows) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + | +(1 row) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------- + hello | hello + more | more + some | some + words | words + world | world +(5 rows) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------- + words | words +(1 row) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + words | 4 + world | 2 + | +(6 rows) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + world | 2 + | +(5 rows) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + | +(1 row) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + words | 4 + world | 2 +(5 rows) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + words | 4 +(1 row) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + words | 6 + world | 2 + | +(6 rows) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + world | 2 + | +(5 rows) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + | +(1 row) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + words | 6 + world | 2 +(5 rows) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + words | 6 +(1 row) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST + words | Thu Jan 20 09:00:43 2022 PST + world | Sun Jan 20 09:00:43 2019 PST + | +(6 rows) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST + world | Sun Jan 20 09:00:43 2019 PST + | +(5 rows) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + | +(1 row) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST + words | Thu Jan 20 09:00:43 2022 PST + world | Sun Jan 20 09:00:43 2019 PST +(5 rows) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------------------------------ + words | Thu Jan 20 09:00:43 2022 PST +(1 row) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+------------------------------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + 4 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST + | | +(8 rows) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+------------------------------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + | | +(4 rows) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+-------+------ + | | +(1 row) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+------------------------------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + 4 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(7 rows) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+------------------------------+------------------------------ + 4 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(4 rows) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------- + 1 | hello + 2 | world + 3 | some + 4 | words + 5 | words + 6 | words + 7 | words + | +(8 rows) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------- + 1 | hello + 2 | world + 3 | some + | +(4 rows) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + | +(1 row) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------- + 1 | hello + 2 | world + 3 | some + 4 | words + 5 | words + 6 | words + 7 | words +(7 rows) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------- + 4 | words + 5 | words + 6 | words + 7 | words +(4 rows) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + | +(8 rows) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + | +(4 rows) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + | +(1 row) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 +(7 rows) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 +(4 rows) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + | +(8 rows) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + | +(4 rows) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + | +(1 row) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 +(7 rows) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 +(4 rows) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST + 4 | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST + | +(8 rows) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST + | +(4 rows) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + | +(1 row) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST + 4 | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST +(7 rows) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------------------------------ + 4 | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST +(4 rows) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | | + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST | Wed Jan 20 09:00:43 2021 PST + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(6 rows) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | | + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST +(3 rows) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+-------+------ + Sun Dec 31 16:00:00 2017 PST | | +(1 row) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST | Wed Jan 20 09:00:43 2021 PST + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(5 rows) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(2 rows) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | world + Tue Dec 31 16:00:00 2019 PST | some + Thu Dec 31 16:00:00 2020 PST | some + Fri Dec 31 16:00:00 2021 PST | words + Sat Dec 31 16:00:00 2022 PST | words +(6 rows) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | world + Tue Dec 31 16:00:00 2019 PST | more +(3 rows) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Mon Dec 31 16:00:00 2018 PST | world + Tue Dec 31 16:00:00 2019 PST | some + Thu Dec 31 16:00:00 2020 PST | some + Fri Dec 31 16:00:00 2021 PST | words + Sat Dec 31 16:00:00 2022 PST | words +(5 rows) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Fri Dec 31 16:00:00 2021 PST | words + Sat Dec 31 16:00:00 2022 PST | words +(2 rows) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | 2 + Tue Dec 31 16:00:00 2019 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 3 + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(6 rows) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | 2 + Tue Dec 31 16:00:00 2019 PST | 3 +(3 rows) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Mon Dec 31 16:00:00 2018 PST | 2 + Tue Dec 31 16:00:00 2019 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 3 + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(5 rows) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(2 rows) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | 1 + Tue Dec 31 16:00:00 2019 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 3 + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(6 rows) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | 2 + Tue Dec 31 16:00:00 2019 PST | 3 +(3 rows) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Mon Dec 31 16:00:00 2018 PST | 1 + Tue Dec 31 16:00:00 2019 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 3 + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(5 rows) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(2 rows) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST +(6 rows) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST +(3 rows) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST +(5 rows) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST +(2 rows) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST | Fri Jan 20 09:00:43 2023 PST +(2 rows) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST | Mon Jan 20 09:00:43 2020 PST +(1 row) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+-------+------ + Sun Dec 31 16:00:00 2017 PST | | +(1 row) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST | Fri Jan 20 09:00:43 2023 PST +(2 rows) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Thu Dec 31 16:00:00 2020 PST | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Sun Dec 31 16:00:00 2017 PST | some + Thu Dec 31 16:00:00 2020 PST | words +(2 rows) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | more +(1 row) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Sun Dec 31 16:00:00 2017 PST | some + Thu Dec 31 16:00:00 2020 PST | words +(2 rows) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Thu Dec 31 16:00:00 2020 PST | words +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 2 + Thu Dec 31 16:00:00 2020 PST | 4 +(2 rows) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 2 +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 2 + Thu Dec 31 16:00:00 2020 PST | 4 +(2 rows) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Thu Dec 31 16:00:00 2020 PST | 4 +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 6 +(2 rows) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 3 +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 6 +(2 rows) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Thu Dec 31 16:00:00 2020 PST | 6 +(1 row) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST + Thu Dec 31 16:00:00 2020 PST | Thu Jan 20 09:00:43 2022 PST +(2 rows) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST + Thu Dec 31 16:00:00 2020 PST | Thu Jan 20 09:00:43 2022 PST +(2 rows) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Thu Dec 31 16:00:00 2020 PST | Thu Jan 20 09:00:43 2022 PST +(1 row) + +SET enable_partitionwise_aggregate = OFF; diff --git a/test/expected/agg_bookends-16.out b/test/expected/agg_bookends-16.out new file mode 100644 index 00000000000..28e61df849a --- /dev/null +++ b/test/expected/agg_bookends-16.out @@ -0,0 +1,2507 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\set TEST_BASE_NAME agg_bookends +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') as "TEST_LOAD_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME", + format('%s/results/%s_results_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_OPTIMIZED", + format('%s/results/%s_results_unoptimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNOPTIMIZED" +\gset +SELECT format('\! diff -u --label "Unoptimized result" --label "Optimized result" %s %s', :'TEST_RESULTS_UNOPTIMIZED', :'TEST_RESULTS_OPTIMIZED') as "DIFF_CMD" +\gset +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE btest(time timestamp NOT NULL, time_alt timestamp, gp INTEGER, temp float, strid TEXT DEFAULT 'testing'); +SELECT schema_name, table_name, created FROM create_hypertable('btest', 'time'); +psql:include/agg_bookends_load.sql:6: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +psql:include/agg_bookends_load.sql:6: WARNING: column type "timestamp without time zone" used for "time_alt" does not follow best practices + schema_name | table_name | created +-------------+------------+--------- + public | btest | t +(1 row) + +INSERT INTO btest VALUES('2017-01-20T09:00:01', '2017-01-20T10:00:00', 1, 22.5); +INSERT INTO btest VALUES('2017-01-20T09:00:21', '2017-01-20T09:00:59', 1, 21.2); +INSERT INTO btest VALUES('2017-01-20T09:00:47', '2017-01-20T09:00:58', 1, 25.1); +INSERT INTO btest VALUES('2017-01-20T09:00:02', '2017-01-20T09:00:57', 2, 35.5); +INSERT INTO btest VALUES('2017-01-20T09:00:21', '2017-01-20T09:00:56', 2, 30.2); +--TOASTED; +INSERT INTO btest VALUES('2017-01-20T09:00:43', '2017-01-20T09:01:55', 2, 20.1, repeat('xyz', 1000000) ); +CREATE TABLE btest_numeric (time timestamp NOT NULL, quantity numeric); +SELECT schema_name, table_name, created FROM create_hypertable('btest_numeric', 'time'); +psql:include/agg_bookends_load.sql:16: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + schema_name | table_name | created +-------------+---------------+--------- + public | btest_numeric | t +(1 row) + +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be only output of results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations')) v(setting); + setting | value +----------------------------------+------- + timescaledb.enable_optimizations | on +(1 row) + +:PREFIX SELECT time, gp, temp FROM btest ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=6 loops=1) +(1 row) + +:PREFIX SELECT last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) +(5 rows) + +:PREFIX SELECT first(temp, time) FROM btest; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) +(4 rows) + +:PREFIX SELECT last(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(2 rows) + +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(2 rows) + +:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +--check whole row +:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +--check toasted col +:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +BEGIN; +--check null value as last element +INSERT INTO btest VALUES('2018-01-20T09:00:43', '2017-01-20T09:00:55', 2, NULL); +:PREFIX SELECT last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(9 rows) + +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest VALUES('2019-01-20T09:00:43', '2018-01-20T09:00:55', 2, 30.5); +:PREFIX SELECT last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(11 rows) + +--check null cmp element is skipped +INSERT INTO btest VALUES('2018-01-20T09:00:43', NULL, 2, 32.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) +(5 rows) + +-- fist returns NULL value +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) +(5 rows) + +-- test first return non NULL value +INSERT INTO btest VALUES('2016-01-20T09:00:00', '2016-01-20T09:00:00', 2, 36.5); +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=10 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) +(6 rows) + +--check non null cmp element insert after null cmp +INSERT INTO btest VALUES('2020-01-20T09:00:43', '2020-01-20T09:00:43', 2, 35.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(7 rows) + +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(7 rows) + +--cmp nulls should be ignored and not present in groups +:PREFIX SELECT gp, last(temp, time_alt) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(12 rows) + +--Previously, some bugs were found with NULLS and numeric types, so test that +INSERT INTO btest_numeric VALUES ('2019-01-20T09:00:43', NULL); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Index Scan using _hyper_2_6_chunk_btest_numeric_time_idx on _hyper_2_6_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) +(5 rows) + +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest_numeric VALUES('2020-01-20T09:00:43', 30.5); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" DESC + -> Index Scan using _hyper_2_7_chunk_btest_numeric_time_idx on _hyper_2_7_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_2_6_chunk_btest_numeric_time_idx on _hyper_2_6_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(9 rows) + +-- do index scan for last +:PREFIX SELECT last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(15 rows) + +-- do index scan for first +:PREFIX SELECT first(temp, time) FROM btest; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" + -> Index Scan Backward using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (never executed) +(10 rows) + +-- can't do index scan when ordering on non-index column +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(7 rows) + +-- do index scan for subquery +:PREFIX SELECT * FROM (SELECT last(temp, time) FROM btest) last; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(15 rows) + +-- can't do index scan when using group by +:PREFIX SELECT last(temp, time) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(12 rows) + +-- do index scan when agg function is used in CTE subquery +:PREFIX WITH last_temp AS (SELECT last(temp, time) FROM btest) SELECT * from last_temp; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(15 rows) + +-- do index scan when using both FIRST and LAST aggregate functions +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $1) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + InitPlan 2 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest btest_1 (actual rows=1 loops=1) + Order: btest_1."time" + -> Index Scan Backward using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk _hyper_1_3_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk _hyper_1_5_chunk_1 (never executed) +(24 rows) + +-- verify results when using both FIRST and LAST +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $1) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + InitPlan 2 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest btest_1 (actual rows=1 loops=1) + Order: btest_1."time" + -> Index Scan Backward using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk _hyper_1_3_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk _hyper_1_5_chunk_1 (never executed) +(24 rows) + +-- do index scan when using WHERE +:PREFIX SELECT last(temp, time) FROM btest WHERE time <= '2017-01-20T09:00:02'; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: (("time" IS NOT NULL) AND ("time" <= 'Fri Jan 20 09:00:02 2017'::timestamp without time zone)) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: (("time" IS NOT NULL) AND ("time" <= 'Fri Jan 20 09:00:02 2017'::timestamp without time zone)) +(9 rows) + +-- can't do index scan for MAX and LAST combined (MinMax optimization fails when having different aggregate functions) +:PREFIX SELECT max(time), last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(7 rows) + +-- can't do index scan when using FIRST/LAST in ORDER BY +:PREFIX SELECT last(temp, time) FROM btest ORDER BY last(temp, time); + QUERY PLAN +------------------------------------------------------------------------ + Sort (actual rows=1 loops=1) + Sort Key: (last(_hyper_1_1_chunk.temp, _hyper_1_1_chunk."time")) + Sort Method: quicksort + -> Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(10 rows) + +-- do index scan +:PREFIX SELECT last(temp, time) FROM btest WHERE temp < 30; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=0 loops=1) + Index Cond: ("time" IS NOT NULL) + Filter: (temp < '30'::double precision) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (actual rows=0 loops=1) + Index Cond: ("time" IS NOT NULL) + Filter: (temp < '30'::double precision) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (actual rows=0 loops=1) + Index Cond: ("time" IS NOT NULL) + Filter: (temp < '30'::double precision) + Rows Removed by Filter: 2 + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + Filter: (temp < '30'::double precision) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + Filter: (temp < '30'::double precision) +(23 rows) + +-- SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- do index scan +:PREFIX SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= 'Fri Jan 20 09:00:47 2017'::timestamp without time zone) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" >= 'Fri Jan 20 09:00:47 2017'::timestamp without time zone) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" >= 'Fri Jan 20 09:00:47 2017'::timestamp without time zone) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (never executed) + Index Cond: ("time" >= 'Fri Jan 20 09:00:47 2017'::timestamp without time zone) +(13 rows) + +-- can't do index scan when using WINDOW function +:PREFIX SELECT gp, last(temp, time) OVER (PARTITION BY gp) AS last FROM btest; + QUERY PLAN +------------------------------------------------------------------------ + WindowAgg (actual rows=11 loops=1) + -> Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(10 rows) + +-- test constants +:PREFIX SELECT first(100, 100) FROM btest; + QUERY PLAN +-------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Append (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_2_chunk (never executed) + -> Seq Scan on _hyper_1_3_chunk (never executed) + -> Seq Scan on _hyper_1_4_chunk (never executed) + -> Seq Scan on _hyper_1_5_chunk (never executed) +(10 rows) + +-- create an index so we can test optimization +CREATE INDEX btest_time_alt_idx ON btest(time_alt); +:PREFIX SELECT last(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk.time_alt DESC + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_alt_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_alt_idx on _hyper_1_2_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_alt_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) + -> Index Scan Backward using _hyper_1_4_chunk_btest_time_alt_idx on _hyper_1_4_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_alt_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) +(15 rows) + +--test nested FIRST/LAST - should optimize +:PREFIX SELECT abs(last(temp, time)) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(15 rows) + +-- test nested FIRST/LAST in ORDER BY - no optimization possible +:PREFIX SELECT abs(last(temp, time)) FROM btest ORDER BY abs(last(temp,time)); + QUERY PLAN +------------------------------------------------------------------------- + Sort (actual rows=1 loops=1) + Sort Key: (abs(last(_hyper_1_1_chunk.temp, _hyper_1_1_chunk."time"))) + Sort Method: quicksort + -> Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(10 rows) + +ROLLBACK; +-- Test with NULL numeric values +BEGIN; +TRUNCATE btest_numeric; +-- Empty table +:PREFIX SELECT first(btest_numeric, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(3 rows) + +:PREFIX SELECT last(btest_numeric, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(3 rows) + +-- Only NULL values +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_2_8_chunk_btest_numeric_time_idx on _hyper_2_8_chunk (actual rows=1 loops=1) +(4 rows) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Index Scan using _hyper_2_8_chunk_btest_numeric_time_idx on _hyper_2_8_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) +(5 rows) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) +(2 rows) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) +(2 rows) + +-- NULL values followed by non-NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" + -> Index Scan Backward using _hyper_2_8_chunk_btest_numeric_time_idx on _hyper_2_8_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_2_9_chunk_btest_numeric_time_idx on _hyper_2_9_chunk (never executed) +(7 rows) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" DESC + -> Index Scan using _hyper_2_9_chunk_btest_numeric_time_idx on _hyper_2_9_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_2_8_chunk_btest_numeric_time_idx on _hyper_2_8_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(9 rows) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=4 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_2_9_chunk (actual rows=2 loops=1) +(4 rows) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=4 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_2_9_chunk (actual rows=2 loops=1) +(4 rows) + +TRUNCATE btest_numeric; +-- non-NULL values followed by NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" + -> Index Scan Backward using _hyper_2_11_chunk_btest_numeric_time_idx on _hyper_2_11_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_2_10_chunk_btest_numeric_time_idx on _hyper_2_10_chunk (never executed) +(7 rows) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" DESC + -> Index Scan using _hyper_2_10_chunk_btest_numeric_time_idx on _hyper_2_10_chunk (actual rows=1 loops=1) + Index Cond: ("time" IS NOT NULL) + -> Index Scan using _hyper_2_11_chunk_btest_numeric_time_idx on _hyper_2_11_chunk (never executed) + Index Cond: ("time" IS NOT NULL) +(9 rows) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=4 loops=1) + -> Seq Scan on _hyper_2_10_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_2_11_chunk (actual rows=2 loops=1) +(4 rows) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=4 loops=1) + -> Seq Scan on _hyper_2_10_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_2_11_chunk (actual rows=2 loops=1) +(4 rows) + +ROLLBACK; +-- we want test results as part of the output too to make sure we produce correct output +\set PREFIX '' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be only output of results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations')) v(setting); + setting | value +----------------------------------+------- + timescaledb.enable_optimizations | on +(1 row) + +:PREFIX SELECT time, gp, temp FROM btest ORDER BY time; + time | gp | temp +--------------------------+----+------ + Fri Jan 20 09:00:01 2017 | 1 | 22.5 + Fri Jan 20 09:00:02 2017 | 2 | 35.5 + Fri Jan 20 09:00:21 2017 | 1 | 21.2 + Fri Jan 20 09:00:21 2017 | 2 | 30.2 + Fri Jan 20 09:00:43 2017 | 2 | 20.1 + Fri Jan 20 09:00:47 2017 | 1 | 25.1 +(6 rows) + +:PREFIX SELECT last(temp, time) FROM btest; + last +------ + 25.1 +(1 row) + +:PREFIX SELECT first(temp, time) FROM btest; + first +------- + 22.5 +(1 row) + +:PREFIX SELECT last(temp, time_alt) FROM btest; + last +------ + 22.5 +(1 row) + +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + 30.2 +(1 row) + +:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; + gp | last +----+------ + 1 | 25.1 + 2 | 20.1 +(2 rows) + +:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; + gp | first +----+------- + 1 | 22.5 + 2 | 35.5 +(2 rows) + +--check whole row +:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; + gp | first +----+------------------------------------------------------------------------ + 1 | ("Fri Jan 20 09:00:01 2017","Fri Jan 20 10:00:00 2017",1,22.5,testing) + 2 | ("Fri Jan 20 09:00:02 2017","Fri Jan 20 09:00:57 2017",2,35.5,testing) +(2 rows) + +--check toasted col +:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; + gp | left +----+------------ + 1 | testing + 2 | xyzxyzxyzx +(2 rows) + +:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; + gp | last +----+------ + 1 | 22.5 + 2 | 20.1 +(2 rows) + +:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; + gp | last +----+--------- + 1 | testing + 2 | testing +(2 rows) + +BEGIN; +--check null value as last element +INSERT INTO btest VALUES('2018-01-20T09:00:43', '2017-01-20T09:00:55', 2, NULL); +:PREFIX SELECT last(temp, time) FROM btest; + last +------ + +(1 row) + +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest VALUES('2019-01-20T09:00:43', '2018-01-20T09:00:55', 2, 30.5); +:PREFIX SELECT last(temp, time) FROM btest; + last +------ + 30.5 +(1 row) + +--check null cmp element is skipped +INSERT INTO btest VALUES('2018-01-20T09:00:43', NULL, 2, 32.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; + last +------ + 30.5 +(1 row) + +-- fist returns NULL value +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + +(1 row) + +-- test first return non NULL value +INSERT INTO btest VALUES('2016-01-20T09:00:00', '2016-01-20T09:00:00', 2, 36.5); +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + 36.5 +(1 row) + +--check non null cmp element insert after null cmp +INSERT INTO btest VALUES('2020-01-20T09:00:43', '2020-01-20T09:00:43', 2, 35.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; + last +------ + 35.3 +(1 row) + +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + 36.5 +(1 row) + +--cmp nulls should be ignored and not present in groups +:PREFIX SELECT gp, last(temp, time_alt) FROM btest GROUP BY gp ORDER BY gp; + gp | last +----+------ + 1 | 22.5 + 2 | 35.3 +(2 rows) + +--Previously, some bugs were found with NULLS and numeric types, so test that +INSERT INTO btest_numeric VALUES ('2019-01-20T09:00:43', NULL); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + +(1 row) + +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest_numeric VALUES('2020-01-20T09:00:43', 30.5); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + 30.5 +(1 row) + +-- do index scan for last +:PREFIX SELECT last(temp, time) FROM btest; + last +------ + 35.3 +(1 row) + +-- do index scan for first +:PREFIX SELECT first(temp, time) FROM btest; + first +------- + 36.5 +(1 row) + +-- can't do index scan when ordering on non-index column +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + 36.5 +(1 row) + +-- do index scan for subquery +:PREFIX SELECT * FROM (SELECT last(temp, time) FROM btest) last; + last +------ + 35.3 +(1 row) + +-- can't do index scan when using group by +:PREFIX SELECT last(temp, time) FROM btest GROUP BY gp ORDER BY gp; + last +------ + 25.1 + 35.3 +(2 rows) + +-- do index scan when agg function is used in CTE subquery +:PREFIX WITH last_temp AS (SELECT last(temp, time) FROM btest) SELECT * from last_temp; + last +------ + 35.3 +(1 row) + +-- do index scan when using both FIRST and LAST aggregate functions +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; + first | last +-------+------ + 36.5 | 35.3 +(1 row) + +-- verify results when using both FIRST and LAST +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; + first | last +-------+------ + 36.5 | 35.3 +(1 row) + +-- do index scan when using WHERE +:PREFIX SELECT last(temp, time) FROM btest WHERE time <= '2017-01-20T09:00:02'; + last +------ + 35.5 +(1 row) + +-- can't do index scan for MAX and LAST combined (MinMax optimization fails when having different aggregate functions) +:PREFIX SELECT max(time), last(temp, time) FROM btest; + max | last +--------------------------+------ + Mon Jan 20 09:00:43 2020 | 35.3 +(1 row) + +-- can't do index scan when using FIRST/LAST in ORDER BY +:PREFIX SELECT last(temp, time) FROM btest ORDER BY last(temp, time); + last +------ + 35.3 +(1 row) + +-- do index scan +:PREFIX SELECT last(temp, time) FROM btest WHERE temp < 30; + last +------ + 25.1 +(1 row) + +-- SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- do index scan +:PREFIX SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; + first +------- + 25.1 +(1 row) + +-- can't do index scan when using WINDOW function +:PREFIX SELECT gp, last(temp, time) OVER (PARTITION BY gp) AS last FROM btest; + gp | last +----+------ + 1 | 25.1 + 1 | 25.1 + 1 | 25.1 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 +(11 rows) + +-- test constants +:PREFIX SELECT first(100, 100) FROM btest; + first +------- + 100 +(1 row) + +-- create an index so we can test optimization +CREATE INDEX btest_time_alt_idx ON btest(time_alt); +:PREFIX SELECT last(temp, time_alt) FROM btest; + last +------ + 35.3 +(1 row) + +--test nested FIRST/LAST - should optimize +:PREFIX SELECT abs(last(temp, time)) FROM btest; + abs +------ + 35.3 +(1 row) + +-- test nested FIRST/LAST in ORDER BY - no optimization possible +:PREFIX SELECT abs(last(temp, time)) FROM btest ORDER BY abs(last(temp,time)); + abs +------ + 35.3 +(1 row) + +ROLLBACK; +-- Test with NULL numeric values +BEGIN; +TRUNCATE btest_numeric; +-- Empty table +:PREFIX SELECT first(btest_numeric, time) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(btest_numeric, time) FROM btest_numeric; + last +------ + +(1 row) + +-- Only NULL values +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + +(1 row) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + last +------ + +(1 row) + +-- NULL values followed by non-NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + 1 +(1 row) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + first +-------------------------- + Sun Jan 20 09:00:43 2019 +(1 row) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + last +-------------------------- + Sun Jan 20 09:00:43 2019 +(1 row) + +TRUNCATE btest_numeric; +-- non-NULL values followed by NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + 1 +(1 row) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + first +-------------------------- + Sun Jan 20 09:00:43 2019 +(1 row) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + last +-------------------------- + Sun Jan 20 09:00:43 2019 +(1 row) + +ROLLBACK; +-- diff results with optimizations disabled and enabled +\o :TEST_RESULTS_UNOPTIMIZED +SET timescaledb.enable_optimizations TO false; +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be only output of results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations')) v(setting); +:PREFIX SELECT time, gp, temp FROM btest ORDER BY time; +:PREFIX SELECT last(temp, time) FROM btest; +:PREFIX SELECT first(temp, time) FROM btest; +:PREFIX SELECT last(temp, time_alt) FROM btest; +:PREFIX SELECT first(temp, time_alt) FROM btest; +:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; +--check whole row +:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; +--check toasted col +:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; +BEGIN; +--check null value as last element +INSERT INTO btest VALUES('2018-01-20T09:00:43', '2017-01-20T09:00:55', 2, NULL); +:PREFIX SELECT last(temp, time) FROM btest; +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest VALUES('2019-01-20T09:00:43', '2018-01-20T09:00:55', 2, 30.5); +:PREFIX SELECT last(temp, time) FROM btest; +--check null cmp element is skipped +INSERT INTO btest VALUES('2018-01-20T09:00:43', NULL, 2, 32.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; +-- fist returns NULL value +:PREFIX SELECT first(temp, time_alt) FROM btest; +-- test first return non NULL value +INSERT INTO btest VALUES('2016-01-20T09:00:00', '2016-01-20T09:00:00', 2, 36.5); +:PREFIX SELECT first(temp, time_alt) FROM btest; +--check non null cmp element insert after null cmp +INSERT INTO btest VALUES('2020-01-20T09:00:43', '2020-01-20T09:00:43', 2, 35.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; +:PREFIX SELECT first(temp, time_alt) FROM btest; +--cmp nulls should be ignored and not present in groups +:PREFIX SELECT gp, last(temp, time_alt) FROM btest GROUP BY gp ORDER BY gp; +--Previously, some bugs were found with NULLS and numeric types, so test that +INSERT INTO btest_numeric VALUES ('2019-01-20T09:00:43', NULL); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest_numeric VALUES('2020-01-20T09:00:43', 30.5); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +-- do index scan for last +:PREFIX SELECT last(temp, time) FROM btest; +-- do index scan for first +:PREFIX SELECT first(temp, time) FROM btest; +-- can't do index scan when ordering on non-index column +:PREFIX SELECT first(temp, time_alt) FROM btest; +-- do index scan for subquery +:PREFIX SELECT * FROM (SELECT last(temp, time) FROM btest) last; +-- can't do index scan when using group by +:PREFIX SELECT last(temp, time) FROM btest GROUP BY gp ORDER BY gp; +-- do index scan when agg function is used in CTE subquery +:PREFIX WITH last_temp AS (SELECT last(temp, time) FROM btest) SELECT * from last_temp; +-- do index scan when using both FIRST and LAST aggregate functions +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; +-- verify results when using both FIRST and LAST +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; +-- do index scan when using WHERE +:PREFIX SELECT last(temp, time) FROM btest WHERE time <= '2017-01-20T09:00:02'; +-- can't do index scan for MAX and LAST combined (MinMax optimization fails when having different aggregate functions) +:PREFIX SELECT max(time), last(temp, time) FROM btest; +-- can't do index scan when using FIRST/LAST in ORDER BY +:PREFIX SELECT last(temp, time) FROM btest ORDER BY last(temp, time); +-- do index scan +:PREFIX SELECT last(temp, time) FROM btest WHERE temp < 30; +-- SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- do index scan +:PREFIX SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- can't do index scan when using WINDOW function +:PREFIX SELECT gp, last(temp, time) OVER (PARTITION BY gp) AS last FROM btest; +-- test constants +:PREFIX SELECT first(100, 100) FROM btest; +-- create an index so we can test optimization +CREATE INDEX btest_time_alt_idx ON btest(time_alt); +:PREFIX SELECT last(temp, time_alt) FROM btest; +--test nested FIRST/LAST - should optimize +:PREFIX SELECT abs(last(temp, time)) FROM btest; +-- test nested FIRST/LAST in ORDER BY - no optimization possible +:PREFIX SELECT abs(last(temp, time)) FROM btest ORDER BY abs(last(temp,time)); +ROLLBACK; +-- Test with NULL numeric values +BEGIN; +TRUNCATE btest_numeric; +-- Empty table +:PREFIX SELECT first(btest_numeric, time) FROM btest_numeric; +:PREFIX SELECT last(btest_numeric, time) FROM btest_numeric; +-- Only NULL values +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +-- NULL values followed by non-NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +TRUNCATE btest_numeric; +-- non-NULL values followed by NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +ROLLBACK; +\o +\o :TEST_RESULTS_OPTIMIZED +SET timescaledb.enable_optimizations TO true; +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be only output of results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations')) v(setting); +:PREFIX SELECT time, gp, temp FROM btest ORDER BY time; +:PREFIX SELECT last(temp, time) FROM btest; +:PREFIX SELECT first(temp, time) FROM btest; +:PREFIX SELECT last(temp, time_alt) FROM btest; +:PREFIX SELECT first(temp, time_alt) FROM btest; +:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; +--check whole row +:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; +--check toasted col +:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; +BEGIN; +--check null value as last element +INSERT INTO btest VALUES('2018-01-20T09:00:43', '2017-01-20T09:00:55', 2, NULL); +:PREFIX SELECT last(temp, time) FROM btest; +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest VALUES('2019-01-20T09:00:43', '2018-01-20T09:00:55', 2, 30.5); +:PREFIX SELECT last(temp, time) FROM btest; +--check null cmp element is skipped +INSERT INTO btest VALUES('2018-01-20T09:00:43', NULL, 2, 32.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; +-- fist returns NULL value +:PREFIX SELECT first(temp, time_alt) FROM btest; +-- test first return non NULL value +INSERT INTO btest VALUES('2016-01-20T09:00:00', '2016-01-20T09:00:00', 2, 36.5); +:PREFIX SELECT first(temp, time_alt) FROM btest; +--check non null cmp element insert after null cmp +INSERT INTO btest VALUES('2020-01-20T09:00:43', '2020-01-20T09:00:43', 2, 35.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; +:PREFIX SELECT first(temp, time_alt) FROM btest; +--cmp nulls should be ignored and not present in groups +:PREFIX SELECT gp, last(temp, time_alt) FROM btest GROUP BY gp ORDER BY gp; +--Previously, some bugs were found with NULLS and numeric types, so test that +INSERT INTO btest_numeric VALUES ('2019-01-20T09:00:43', NULL); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest_numeric VALUES('2020-01-20T09:00:43', 30.5); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +-- do index scan for last +:PREFIX SELECT last(temp, time) FROM btest; +-- do index scan for first +:PREFIX SELECT first(temp, time) FROM btest; +-- can't do index scan when ordering on non-index column +:PREFIX SELECT first(temp, time_alt) FROM btest; +-- do index scan for subquery +:PREFIX SELECT * FROM (SELECT last(temp, time) FROM btest) last; +-- can't do index scan when using group by +:PREFIX SELECT last(temp, time) FROM btest GROUP BY gp ORDER BY gp; +-- do index scan when agg function is used in CTE subquery +:PREFIX WITH last_temp AS (SELECT last(temp, time) FROM btest) SELECT * from last_temp; +-- do index scan when using both FIRST and LAST aggregate functions +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; +-- verify results when using both FIRST and LAST +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; +-- do index scan when using WHERE +:PREFIX SELECT last(temp, time) FROM btest WHERE time <= '2017-01-20T09:00:02'; +-- can't do index scan for MAX and LAST combined (MinMax optimization fails when having different aggregate functions) +:PREFIX SELECT max(time), last(temp, time) FROM btest; +-- can't do index scan when using FIRST/LAST in ORDER BY +:PREFIX SELECT last(temp, time) FROM btest ORDER BY last(temp, time); +-- do index scan +:PREFIX SELECT last(temp, time) FROM btest WHERE temp < 30; +-- SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- do index scan +:PREFIX SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- can't do index scan when using WINDOW function +:PREFIX SELECT gp, last(temp, time) OVER (PARTITION BY gp) AS last FROM btest; +-- test constants +:PREFIX SELECT first(100, 100) FROM btest; +-- create an index so we can test optimization +CREATE INDEX btest_time_alt_idx ON btest(time_alt); +:PREFIX SELECT last(temp, time_alt) FROM btest; +--test nested FIRST/LAST - should optimize +:PREFIX SELECT abs(last(temp, time)) FROM btest; +-- test nested FIRST/LAST in ORDER BY - no optimization possible +:PREFIX SELECT abs(last(temp, time)) FROM btest ORDER BY abs(last(temp,time)); +ROLLBACK; +-- Test with NULL numeric values +BEGIN; +TRUNCATE btest_numeric; +-- Empty table +:PREFIX SELECT first(btest_numeric, time) FROM btest_numeric; +:PREFIX SELECT last(btest_numeric, time) FROM btest_numeric; +-- Only NULL values +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +-- NULL values followed by non-NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +TRUNCATE btest_numeric; +-- non-NULL values followed by NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +ROLLBACK; +\o +:DIFF_CMD +--- Unoptimized result ++++ Optimized result +@@ -1,6 +1,6 @@ + setting | value + ----------------------------------+------- +- timescaledb.enable_optimizations | off ++ timescaledb.enable_optimizations | on + (1 row) + + time | gp | temp +-- Test partial aggregation +CREATE TABLE partial_aggregation (time timestamptz NOT NULL, quantity numeric, longvalue text); +SELECT schema_name, table_name, created FROM create_hypertable('partial_aggregation', 'time'); + schema_name | table_name | created +-------------+---------------------+--------- + public | partial_aggregation | t +(1 row) + +INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL); +INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL); +INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 1, 'hello'); +INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 2, 'world'); +INSERT INTO partial_aggregation VALUES('2020-01-20T09:00:43', 3, 'some'); +INSERT INTO partial_aggregation VALUES('2020-01-20T09:00:43', 3, 'more'); +INSERT INTO partial_aggregation VALUES('2021-01-20T09:00:43', 3, 'some'); +INSERT INTO partial_aggregation VALUES('2021-01-20T09:00:43', 3, 'more'); +INSERT INTO partial_aggregation VALUES('2022-01-20T09:00:43', 4, 'words'); +INSERT INTO partial_aggregation VALUES('2022-01-20T09:00:43', 5, 'words'); +INSERT INTO partial_aggregation VALUES('2023-01-20T09:00:43', 6, 'words'); +INSERT INTO partial_aggregation VALUES('2023-01-20T09:00:43', 7, 'words'); +-- Use enable_partitionwise_aggregate to create partial aggregates per chunk +SET enable_partitionwise_aggregate = ON; +SELECT + format('SELECT %3$s, %1$s FROM partial_aggregation WHERE %2$s GROUP BY %3$s ORDER BY 1, 2;', + function, condition, grouping) +FROM + unnest(array[ + 'first(time, quantity), last(time, quantity)', + 'last(longvalue, quantity)', + 'last(quantity, longvalue)', + 'last(quantity, time)', + 'last(time, longvalue)']) AS function, + unnest(array[ + 'true', + $$time < '2021-01-01'$$, + 'quantity is null', + 'quantity is not null', + 'quantity > 3']) AS condition, + unnest(array[ + '777::text' /* dummy grouping column */, + 'longvalue', + 'quantity', + $$time_bucket('1 year', time)$$, + $$time_bucket('3 year', time)$$]) AS grouping +\gexec +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+------------------------------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+------------------------------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST | Mon Jan 20 09:00:43 2020 PST +(1 row) + +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+-------+------ + 777 | | +(1 row) + +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+------------------------------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+------------------------------+------------------------------ + 777 | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------- + 777 | words +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | some +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------- + 777 | words +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------- + 777 | words +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 2 +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 2 +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 2 +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 4 +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 6 +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 3 +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 6 +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 6 +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------------------------------ + 777 | Thu Jan 20 09:00:43 2022 PST +(1 row) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+------------------------------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + words | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST + world | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + | | +(6 rows) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+------------------------------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + world | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + | | +(5 rows) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+-------+------ + | | +(1 row) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+------------------------------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + words | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST + world | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST +(5 rows) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+------------------------------+------------------------------ + words | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------- + hello | hello + more | more + some | some + words | words + world | world + | +(6 rows) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------- + hello | hello + more | more + some | some + world | world + | +(5 rows) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + | +(1 row) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------- + hello | hello + more | more + some | some + words | words + world | world +(5 rows) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------- + words | words +(1 row) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + words | 4 + world | 2 + | +(6 rows) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + world | 2 + | +(5 rows) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + | +(1 row) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + words | 4 + world | 2 +(5 rows) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + words | 4 +(1 row) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + words | 6 + world | 2 + | +(6 rows) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + world | 2 + | +(5 rows) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + | +(1 row) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + words | 6 + world | 2 +(5 rows) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + words | 6 +(1 row) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST + words | Thu Jan 20 09:00:43 2022 PST + world | Sun Jan 20 09:00:43 2019 PST + | +(6 rows) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST + world | Sun Jan 20 09:00:43 2019 PST + | +(5 rows) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + | +(1 row) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST + words | Thu Jan 20 09:00:43 2022 PST + world | Sun Jan 20 09:00:43 2019 PST +(5 rows) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------------------------------ + words | Thu Jan 20 09:00:43 2022 PST +(1 row) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+------------------------------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + 4 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST + | | +(8 rows) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+------------------------------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + | | +(4 rows) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+-------+------ + | | +(1 row) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+------------------------------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + 4 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(7 rows) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+------------------------------+------------------------------ + 4 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(4 rows) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------- + 1 | hello + 2 | world + 3 | some + 4 | words + 5 | words + 6 | words + 7 | words + | +(8 rows) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------- + 1 | hello + 2 | world + 3 | some + | +(4 rows) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + | +(1 row) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------- + 1 | hello + 2 | world + 3 | some + 4 | words + 5 | words + 6 | words + 7 | words +(7 rows) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------- + 4 | words + 5 | words + 6 | words + 7 | words +(4 rows) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + | +(8 rows) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + | +(4 rows) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + | +(1 row) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 +(7 rows) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 +(4 rows) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + | +(8 rows) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + | +(4 rows) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + | +(1 row) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 +(7 rows) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 +(4 rows) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST + 4 | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST + | +(8 rows) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST + | +(4 rows) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + | +(1 row) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST + 4 | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST +(7 rows) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------------------------------ + 4 | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST +(4 rows) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | | + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST | Wed Jan 20 09:00:43 2021 PST + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(6 rows) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | | + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST +(3 rows) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+-------+------ + Sun Dec 31 16:00:00 2017 PST | | +(1 row) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST | Wed Jan 20 09:00:43 2021 PST + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(5 rows) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(2 rows) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | world + Tue Dec 31 16:00:00 2019 PST | some + Thu Dec 31 16:00:00 2020 PST | some + Fri Dec 31 16:00:00 2021 PST | words + Sat Dec 31 16:00:00 2022 PST | words +(6 rows) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | world + Tue Dec 31 16:00:00 2019 PST | more +(3 rows) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Mon Dec 31 16:00:00 2018 PST | world + Tue Dec 31 16:00:00 2019 PST | some + Thu Dec 31 16:00:00 2020 PST | some + Fri Dec 31 16:00:00 2021 PST | words + Sat Dec 31 16:00:00 2022 PST | words +(5 rows) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Fri Dec 31 16:00:00 2021 PST | words + Sat Dec 31 16:00:00 2022 PST | words +(2 rows) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | 2 + Tue Dec 31 16:00:00 2019 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 3 + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(6 rows) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | 2 + Tue Dec 31 16:00:00 2019 PST | 3 +(3 rows) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Mon Dec 31 16:00:00 2018 PST | 2 + Tue Dec 31 16:00:00 2019 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 3 + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(5 rows) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(2 rows) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | 1 + Tue Dec 31 16:00:00 2019 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 3 + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(6 rows) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | 2 + Tue Dec 31 16:00:00 2019 PST | 3 +(3 rows) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Mon Dec 31 16:00:00 2018 PST | 1 + Tue Dec 31 16:00:00 2019 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 3 + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(5 rows) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(2 rows) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST +(6 rows) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST +(3 rows) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST +(5 rows) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST +(2 rows) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST | Fri Jan 20 09:00:43 2023 PST +(2 rows) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST | Mon Jan 20 09:00:43 2020 PST +(1 row) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+-------+------ + Sun Dec 31 16:00:00 2017 PST | | +(1 row) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST | Fri Jan 20 09:00:43 2023 PST +(2 rows) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Thu Dec 31 16:00:00 2020 PST | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Sun Dec 31 16:00:00 2017 PST | some + Thu Dec 31 16:00:00 2020 PST | words +(2 rows) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | more +(1 row) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Sun Dec 31 16:00:00 2017 PST | some + Thu Dec 31 16:00:00 2020 PST | words +(2 rows) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Thu Dec 31 16:00:00 2020 PST | words +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 2 + Thu Dec 31 16:00:00 2020 PST | 4 +(2 rows) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 2 +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 2 + Thu Dec 31 16:00:00 2020 PST | 4 +(2 rows) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Thu Dec 31 16:00:00 2020 PST | 4 +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 6 +(2 rows) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 3 +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 6 +(2 rows) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Thu Dec 31 16:00:00 2020 PST | 6 +(1 row) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST + Thu Dec 31 16:00:00 2020 PST | Thu Jan 20 09:00:43 2022 PST +(2 rows) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST + Thu Dec 31 16:00:00 2020 PST | Thu Jan 20 09:00:43 2022 PST +(2 rows) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Thu Dec 31 16:00:00 2020 PST | Thu Jan 20 09:00:43 2022 PST +(1 row) + +SET enable_partitionwise_aggregate = OFF; diff --git a/test/expected/agg_bookends-17.out b/test/expected/agg_bookends-17.out new file mode 100644 index 00000000000..89eaf419f09 --- /dev/null +++ b/test/expected/agg_bookends-17.out @@ -0,0 +1,2458 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\set TEST_BASE_NAME agg_bookends +SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') as "TEST_LOAD_NAME", + format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME", + format('%s/results/%s_results_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_OPTIMIZED", + format('%s/results/%s_results_unoptimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNOPTIMIZED" +\gset +SELECT format('\! diff -u --label "Unoptimized result" --label "Optimized result" %s %s', :'TEST_RESULTS_UNOPTIMIZED', :'TEST_RESULTS_OPTIMIZED') as "DIFF_CMD" +\gset +\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' +\ir :TEST_LOAD_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +CREATE TABLE btest(time timestamp NOT NULL, time_alt timestamp, gp INTEGER, temp float, strid TEXT DEFAULT 'testing'); +SELECT schema_name, table_name, created FROM create_hypertable('btest', 'time'); +psql:include/agg_bookends_load.sql:6: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices +psql:include/agg_bookends_load.sql:6: WARNING: column type "timestamp without time zone" used for "time_alt" does not follow best practices + schema_name | table_name | created +-------------+------------+--------- + public | btest | t +(1 row) + +INSERT INTO btest VALUES('2017-01-20T09:00:01', '2017-01-20T10:00:00', 1, 22.5); +INSERT INTO btest VALUES('2017-01-20T09:00:21', '2017-01-20T09:00:59', 1, 21.2); +INSERT INTO btest VALUES('2017-01-20T09:00:47', '2017-01-20T09:00:58', 1, 25.1); +INSERT INTO btest VALUES('2017-01-20T09:00:02', '2017-01-20T09:00:57', 2, 35.5); +INSERT INTO btest VALUES('2017-01-20T09:00:21', '2017-01-20T09:00:56', 2, 30.2); +--TOASTED; +INSERT INTO btest VALUES('2017-01-20T09:00:43', '2017-01-20T09:01:55', 2, 20.1, repeat('xyz', 1000000) ); +CREATE TABLE btest_numeric (time timestamp NOT NULL, quantity numeric); +SELECT schema_name, table_name, created FROM create_hypertable('btest_numeric', 'time'); +psql:include/agg_bookends_load.sql:16: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + schema_name | table_name | created +-------------+---------------+--------- + public | btest_numeric | t +(1 row) + +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be only output of results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations')) v(setting); + setting | value +----------------------------------+------- + timescaledb.enable_optimizations | on +(1 row) + +:PREFIX SELECT time, gp, temp FROM btest ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=6 loops=1) +(1 row) + +:PREFIX SELECT last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) +(4 rows) + +:PREFIX SELECT first(temp, time) FROM btest; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) +(4 rows) + +:PREFIX SELECT last(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(2 rows) + +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(2 rows) + +:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +--check whole row +:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +--check toasted col +:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) + +BEGIN; +--check null value as last element +INSERT INTO btest VALUES('2018-01-20T09:00:43', '2017-01-20T09:00:55', 2, NULL); +:PREFIX SELECT last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) +(7 rows) + +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest VALUES('2019-01-20T09:00:43', '2018-01-20T09:00:55', 2, 30.5); +:PREFIX SELECT last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) +(8 rows) + +--check null cmp element is skipped +INSERT INTO btest VALUES('2018-01-20T09:00:43', NULL, 2, 32.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) +(5 rows) + +-- fist returns NULL value +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=9 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) +(5 rows) + +-- test first return non NULL value +INSERT INTO btest VALUES('2016-01-20T09:00:00', '2016-01-20T09:00:00', 2, 36.5); +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=10 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) +(6 rows) + +--check non null cmp element insert after null cmp +INSERT INTO btest VALUES('2020-01-20T09:00:43', '2020-01-20T09:00:43', 2, 35.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(7 rows) + +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(7 rows) + +--cmp nulls should be ignored and not present in groups +:PREFIX SELECT gp, last(temp, time_alt) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(12 rows) + +--Previously, some bugs were found with NULLS and numeric types, so test that +INSERT INTO btest_numeric VALUES ('2019-01-20T09:00:43', NULL); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Index Scan using _hyper_2_6_chunk_btest_numeric_time_idx on _hyper_2_6_chunk (actual rows=1 loops=1) +(4 rows) + +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest_numeric VALUES('2020-01-20T09:00:43', 30.5); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" DESC + -> Index Scan using _hyper_2_7_chunk_btest_numeric_time_idx on _hyper_2_7_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_2_6_chunk_btest_numeric_time_idx on _hyper_2_6_chunk (never executed) +(7 rows) + +-- do index scan for last +:PREFIX SELECT last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) +(10 rows) + +-- do index scan for first +:PREFIX SELECT first(temp, time) FROM btest; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" + -> Index Scan Backward using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (never executed) +(10 rows) + +-- can't do index scan when ordering on non-index column +:PREFIX SELECT first(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(7 rows) + +-- do index scan for subquery +:PREFIX SELECT * FROM (SELECT last(temp, time) FROM btest) last; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) +(10 rows) + +-- can't do index scan when using group by +:PREFIX SELECT last(temp, time) FROM btest GROUP BY gp ORDER BY gp; + QUERY PLAN +------------------------------------------------------------------------ + Sort (actual rows=2 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> HashAggregate (actual rows=2 loops=1) + Group Key: _hyper_1_1_chunk.gp + Batches: 1 + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(12 rows) + +-- do index scan when agg function is used in CTE subquery +:PREFIX WITH last_temp AS (SELECT last(temp, time) FROM btest) SELECT * from last_temp; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) +(10 rows) + +-- do index scan when using both FIRST and LAST aggregate functions +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + InitPlan 2 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest btest_1 (actual rows=1 loops=1) + Order: btest_1."time" + -> Index Scan Backward using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk _hyper_1_3_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk _hyper_1_5_chunk_1 (never executed) +(19 rows) + +-- verify results when using both FIRST and LAST +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + InitPlan 2 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest btest_1 (actual rows=1 loops=1) + Order: btest_1."time" + -> Index Scan Backward using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk _hyper_1_4_chunk_1 (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk _hyper_1_3_chunk_1 (never executed) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk _hyper_1_5_chunk_1 (never executed) +(19 rows) + +-- do index scan when using WHERE +:PREFIX SELECT last(temp, time) FROM btest WHERE time <= '2017-01-20T09:00:02'; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: ("time" <= 'Fri Jan 20 09:00:02 2017'::timestamp without time zone) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Index Cond: ("time" <= 'Fri Jan 20 09:00:02 2017'::timestamp without time zone) +(9 rows) + +-- can't do index scan for MAX and LAST combined (MinMax optimization fails when having different aggregate functions) +:PREFIX SELECT max(time), last(temp, time) FROM btest; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(7 rows) + +-- can't do index scan when using FIRST/LAST in ORDER BY +:PREFIX SELECT last(temp, time) FROM btest ORDER BY last(temp, time); + QUERY PLAN +------------------------------------------------------------------------ + Sort (actual rows=1 loops=1) + Sort Key: (last(_hyper_1_1_chunk.temp, _hyper_1_1_chunk."time")) + Sort Method: quicksort + -> Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(10 rows) + +-- do index scan +:PREFIX SELECT last(temp, time) FROM btest WHERE temp < 30; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=0 loops=1) + Filter: (temp < '30'::double precision) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (actual rows=0 loops=1) + Filter: (temp < '30'::double precision) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (actual rows=0 loops=1) + Filter: (temp < '30'::double precision) + Rows Removed by Filter: 2 + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Filter: (temp < '30'::double precision) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) + Filter: (temp < '30'::double precision) +(18 rows) + +-- SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- do index scan +:PREFIX SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= 'Fri Jan 20 09:00:47 2017'::timestamp without time zone) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + Index Cond: ("time" >= 'Fri Jan 20 09:00:47 2017'::timestamp without time zone) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + Index Cond: ("time" >= 'Fri Jan 20 09:00:47 2017'::timestamp without time zone) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (never executed) + Index Cond: ("time" >= 'Fri Jan 20 09:00:47 2017'::timestamp without time zone) +(13 rows) + +-- can't do index scan when using WINDOW function +:PREFIX SELECT gp, last(temp, time) OVER (PARTITION BY gp) AS last FROM btest; + QUERY PLAN +------------------------------------------------------------------------ + WindowAgg (actual rows=11 loops=1) + -> Sort (actual rows=11 loops=1) + Sort Key: _hyper_1_1_chunk.gp + Sort Method: quicksort + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(10 rows) + +-- test constants +:PREFIX SELECT first(100, 100) FROM btest; + QUERY PLAN +-------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Result (actual rows=1 loops=1) + -> Append (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_2_chunk (never executed) + -> Seq Scan on _hyper_1_3_chunk (never executed) + -> Seq Scan on _hyper_1_4_chunk (never executed) + -> Seq Scan on _hyper_1_5_chunk (never executed) +(10 rows) + +-- create an index so we can test optimization +CREATE INDEX btest_time_alt_idx ON btest(time_alt); +:PREFIX SELECT last(temp, time_alt) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Merge Append (actual rows=1 loops=1) + Sort Key: _hyper_1_1_chunk.time_alt DESC + -> Index Scan Backward using _hyper_1_1_chunk_btest_time_alt_idx on _hyper_1_1_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) + -> Index Scan Backward using _hyper_1_2_chunk_btest_time_alt_idx on _hyper_1_2_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) + -> Index Scan Backward using _hyper_1_3_chunk_btest_time_alt_idx on _hyper_1_3_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) + -> Index Scan Backward using _hyper_1_4_chunk_btest_time_alt_idx on _hyper_1_4_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) + -> Index Scan Backward using _hyper_1_5_chunk_btest_time_alt_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + Index Cond: (time_alt IS NOT NULL) +(15 rows) + +--test nested FIRST/LAST - should optimize +:PREFIX SELECT abs(last(temp, time)) FROM btest; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------ + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest (actual rows=1 loops=1) + Order: btest."time" DESC + -> Index Scan using _hyper_1_5_chunk_btest_time_idx on _hyper_1_5_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_1_3_chunk_btest_time_idx on _hyper_1_3_chunk (never executed) + -> Index Scan using _hyper_1_2_chunk_btest_time_idx on _hyper_1_2_chunk (never executed) + -> Index Scan using _hyper_1_1_chunk_btest_time_idx on _hyper_1_1_chunk (never executed) + -> Index Scan using _hyper_1_4_chunk_btest_time_idx on _hyper_1_4_chunk (never executed) +(10 rows) + +-- test nested FIRST/LAST in ORDER BY - no optimization possible +:PREFIX SELECT abs(last(temp, time)) FROM btest ORDER BY abs(last(temp,time)); + QUERY PLAN +------------------------------------------------------------------------- + Sort (actual rows=1 loops=1) + Sort Key: (abs(last(_hyper_1_1_chunk.temp, _hyper_1_1_chunk."time"))) + Sort Method: quicksort + -> Aggregate (actual rows=1 loops=1) + -> Append (actual rows=11 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_1_3_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_4_chunk (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_5_chunk (actual rows=1 loops=1) +(10 rows) + +ROLLBACK; +-- Test with NULL numeric values +BEGIN; +TRUNCATE btest_numeric; +-- Empty table +:PREFIX SELECT first(btest_numeric, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(3 rows) + +:PREFIX SELECT last(btest_numeric, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Result (actual rows=0 loops=1) + One-Time Filter: false +(3 rows) + +-- Only NULL values +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_2_8_chunk_btest_numeric_time_idx on _hyper_2_8_chunk (actual rows=1 loops=1) +(4 rows) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Index Scan using _hyper_2_8_chunk_btest_numeric_time_idx on _hyper_2_8_chunk (actual rows=1 loops=1) +(4 rows) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) +(2 rows) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) +(2 rows) + +-- NULL values followed by non-NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" + -> Index Scan Backward using _hyper_2_8_chunk_btest_numeric_time_idx on _hyper_2_8_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_2_9_chunk_btest_numeric_time_idx on _hyper_2_9_chunk (never executed) +(7 rows) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" DESC + -> Index Scan using _hyper_2_9_chunk_btest_numeric_time_idx on _hyper_2_9_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_2_8_chunk_btest_numeric_time_idx on _hyper_2_8_chunk (never executed) +(7 rows) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=4 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_2_9_chunk (actual rows=2 loops=1) +(4 rows) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=4 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_2_9_chunk (actual rows=2 loops=1) +(4 rows) + +TRUNCATE btest_numeric; +-- non-NULL values followed by NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" + -> Index Scan Backward using _hyper_2_11_chunk_btest_numeric_time_idx on _hyper_2_11_chunk (actual rows=1 loops=1) + -> Index Scan Backward using _hyper_2_10_chunk_btest_numeric_time_idx on _hyper_2_10_chunk (never executed) +(7 rows) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------- + Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on btest_numeric (actual rows=1 loops=1) + Order: btest_numeric."time" DESC + -> Index Scan using _hyper_2_10_chunk_btest_numeric_time_idx on _hyper_2_10_chunk (actual rows=1 loops=1) + -> Index Scan using _hyper_2_11_chunk_btest_numeric_time_idx on _hyper_2_11_chunk (never executed) +(7 rows) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=4 loops=1) + -> Seq Scan on _hyper_2_10_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_2_11_chunk (actual rows=2 loops=1) +(4 rows) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + QUERY PLAN +------------------------------------------------------------------- + Aggregate (actual rows=1 loops=1) + -> Append (actual rows=4 loops=1) + -> Seq Scan on _hyper_2_10_chunk (actual rows=2 loops=1) + -> Seq Scan on _hyper_2_11_chunk (actual rows=2 loops=1) +(4 rows) + +ROLLBACK; +-- we want test results as part of the output too to make sure we produce correct output +\set PREFIX '' +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be only output of results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations')) v(setting); + setting | value +----------------------------------+------- + timescaledb.enable_optimizations | on +(1 row) + +:PREFIX SELECT time, gp, temp FROM btest ORDER BY time; + time | gp | temp +--------------------------+----+------ + Fri Jan 20 09:00:01 2017 | 1 | 22.5 + Fri Jan 20 09:00:02 2017 | 2 | 35.5 + Fri Jan 20 09:00:21 2017 | 1 | 21.2 + Fri Jan 20 09:00:21 2017 | 2 | 30.2 + Fri Jan 20 09:00:43 2017 | 2 | 20.1 + Fri Jan 20 09:00:47 2017 | 1 | 25.1 +(6 rows) + +:PREFIX SELECT last(temp, time) FROM btest; + last +------ + 25.1 +(1 row) + +:PREFIX SELECT first(temp, time) FROM btest; + first +------- + 22.5 +(1 row) + +:PREFIX SELECT last(temp, time_alt) FROM btest; + last +------ + 22.5 +(1 row) + +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + 30.2 +(1 row) + +:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; + gp | last +----+------ + 1 | 25.1 + 2 | 20.1 +(2 rows) + +:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; + gp | first +----+------- + 1 | 22.5 + 2 | 35.5 +(2 rows) + +--check whole row +:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; + gp | first +----+------------------------------------------------------------------------ + 1 | ("Fri Jan 20 09:00:01 2017","Fri Jan 20 10:00:00 2017",1,22.5,testing) + 2 | ("Fri Jan 20 09:00:02 2017","Fri Jan 20 09:00:57 2017",2,35.5,testing) +(2 rows) + +--check toasted col +:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; + gp | left +----+------------ + 1 | testing + 2 | xyzxyzxyzx +(2 rows) + +:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; + gp | last +----+------ + 1 | 22.5 + 2 | 20.1 +(2 rows) + +:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; + gp | last +----+--------- + 1 | testing + 2 | testing +(2 rows) + +BEGIN; +--check null value as last element +INSERT INTO btest VALUES('2018-01-20T09:00:43', '2017-01-20T09:00:55', 2, NULL); +:PREFIX SELECT last(temp, time) FROM btest; + last +------ + +(1 row) + +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest VALUES('2019-01-20T09:00:43', '2018-01-20T09:00:55', 2, 30.5); +:PREFIX SELECT last(temp, time) FROM btest; + last +------ + 30.5 +(1 row) + +--check null cmp element is skipped +INSERT INTO btest VALUES('2018-01-20T09:00:43', NULL, 2, 32.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; + last +------ + 30.5 +(1 row) + +-- fist returns NULL value +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + +(1 row) + +-- test first return non NULL value +INSERT INTO btest VALUES('2016-01-20T09:00:00', '2016-01-20T09:00:00', 2, 36.5); +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + 36.5 +(1 row) + +--check non null cmp element insert after null cmp +INSERT INTO btest VALUES('2020-01-20T09:00:43', '2020-01-20T09:00:43', 2, 35.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; + last +------ + 35.3 +(1 row) + +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + 36.5 +(1 row) + +--cmp nulls should be ignored and not present in groups +:PREFIX SELECT gp, last(temp, time_alt) FROM btest GROUP BY gp ORDER BY gp; + gp | last +----+------ + 1 | 22.5 + 2 | 35.3 +(2 rows) + +--Previously, some bugs were found with NULLS and numeric types, so test that +INSERT INTO btest_numeric VALUES ('2019-01-20T09:00:43', NULL); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + +(1 row) + +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest_numeric VALUES('2020-01-20T09:00:43', 30.5); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + 30.5 +(1 row) + +-- do index scan for last +:PREFIX SELECT last(temp, time) FROM btest; + last +------ + 35.3 +(1 row) + +-- do index scan for first +:PREFIX SELECT first(temp, time) FROM btest; + first +------- + 36.5 +(1 row) + +-- can't do index scan when ordering on non-index column +:PREFIX SELECT first(temp, time_alt) FROM btest; + first +------- + 36.5 +(1 row) + +-- do index scan for subquery +:PREFIX SELECT * FROM (SELECT last(temp, time) FROM btest) last; + last +------ + 35.3 +(1 row) + +-- can't do index scan when using group by +:PREFIX SELECT last(temp, time) FROM btest GROUP BY gp ORDER BY gp; + last +------ + 25.1 + 35.3 +(2 rows) + +-- do index scan when agg function is used in CTE subquery +:PREFIX WITH last_temp AS (SELECT last(temp, time) FROM btest) SELECT * from last_temp; + last +------ + 35.3 +(1 row) + +-- do index scan when using both FIRST and LAST aggregate functions +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; + first | last +-------+------ + 36.5 | 35.3 +(1 row) + +-- verify results when using both FIRST and LAST +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; + first | last +-------+------ + 36.5 | 35.3 +(1 row) + +-- do index scan when using WHERE +:PREFIX SELECT last(temp, time) FROM btest WHERE time <= '2017-01-20T09:00:02'; + last +------ + 35.5 +(1 row) + +-- can't do index scan for MAX and LAST combined (MinMax optimization fails when having different aggregate functions) +:PREFIX SELECT max(time), last(temp, time) FROM btest; + max | last +--------------------------+------ + Mon Jan 20 09:00:43 2020 | 35.3 +(1 row) + +-- can't do index scan when using FIRST/LAST in ORDER BY +:PREFIX SELECT last(temp, time) FROM btest ORDER BY last(temp, time); + last +------ + 35.3 +(1 row) + +-- do index scan +:PREFIX SELECT last(temp, time) FROM btest WHERE temp < 30; + last +------ + 25.1 +(1 row) + +-- SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- do index scan +:PREFIX SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; + first +------- + 25.1 +(1 row) + +-- can't do index scan when using WINDOW function +:PREFIX SELECT gp, last(temp, time) OVER (PARTITION BY gp) AS last FROM btest; + gp | last +----+------ + 1 | 25.1 + 1 | 25.1 + 1 | 25.1 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 + 2 | 35.3 +(11 rows) + +-- test constants +:PREFIX SELECT first(100, 100) FROM btest; + first +------- + 100 +(1 row) + +-- create an index so we can test optimization +CREATE INDEX btest_time_alt_idx ON btest(time_alt); +:PREFIX SELECT last(temp, time_alt) FROM btest; + last +------ + 35.3 +(1 row) + +--test nested FIRST/LAST - should optimize +:PREFIX SELECT abs(last(temp, time)) FROM btest; + abs +------ + 35.3 +(1 row) + +-- test nested FIRST/LAST in ORDER BY - no optimization possible +:PREFIX SELECT abs(last(temp, time)) FROM btest ORDER BY abs(last(temp,time)); + abs +------ + 35.3 +(1 row) + +ROLLBACK; +-- Test with NULL numeric values +BEGIN; +TRUNCATE btest_numeric; +-- Empty table +:PREFIX SELECT first(btest_numeric, time) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(btest_numeric, time) FROM btest_numeric; + last +------ + +(1 row) + +-- Only NULL values +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + +(1 row) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + last +------ + +(1 row) + +-- NULL values followed by non-NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + 1 +(1 row) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + first +-------------------------- + Sun Jan 20 09:00:43 2019 +(1 row) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + last +-------------------------- + Sun Jan 20 09:00:43 2019 +(1 row) + +TRUNCATE btest_numeric; +-- non-NULL values followed by NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; + first +------- + +(1 row) + +:PREFIX SELECT last(quantity, time) FROM btest_numeric; + last +------ + 1 +(1 row) + +:PREFIX SELECT first(time, quantity) FROM btest_numeric; + first +-------------------------- + Sun Jan 20 09:00:43 2019 +(1 row) + +:PREFIX SELECT last(time, quantity) FROM btest_numeric; + last +-------------------------- + Sun Jan 20 09:00:43 2019 +(1 row) + +ROLLBACK; +-- diff results with optimizations disabled and enabled +\o :TEST_RESULTS_UNOPTIMIZED +SET timescaledb.enable_optimizations TO false; +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be only output of results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations')) v(setting); +:PREFIX SELECT time, gp, temp FROM btest ORDER BY time; +:PREFIX SELECT last(temp, time) FROM btest; +:PREFIX SELECT first(temp, time) FROM btest; +:PREFIX SELECT last(temp, time_alt) FROM btest; +:PREFIX SELECT first(temp, time_alt) FROM btest; +:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; +--check whole row +:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; +--check toasted col +:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; +BEGIN; +--check null value as last element +INSERT INTO btest VALUES('2018-01-20T09:00:43', '2017-01-20T09:00:55', 2, NULL); +:PREFIX SELECT last(temp, time) FROM btest; +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest VALUES('2019-01-20T09:00:43', '2018-01-20T09:00:55', 2, 30.5); +:PREFIX SELECT last(temp, time) FROM btest; +--check null cmp element is skipped +INSERT INTO btest VALUES('2018-01-20T09:00:43', NULL, 2, 32.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; +-- fist returns NULL value +:PREFIX SELECT first(temp, time_alt) FROM btest; +-- test first return non NULL value +INSERT INTO btest VALUES('2016-01-20T09:00:00', '2016-01-20T09:00:00', 2, 36.5); +:PREFIX SELECT first(temp, time_alt) FROM btest; +--check non null cmp element insert after null cmp +INSERT INTO btest VALUES('2020-01-20T09:00:43', '2020-01-20T09:00:43', 2, 35.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; +:PREFIX SELECT first(temp, time_alt) FROM btest; +--cmp nulls should be ignored and not present in groups +:PREFIX SELECT gp, last(temp, time_alt) FROM btest GROUP BY gp ORDER BY gp; +--Previously, some bugs were found with NULLS and numeric types, so test that +INSERT INTO btest_numeric VALUES ('2019-01-20T09:00:43', NULL); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest_numeric VALUES('2020-01-20T09:00:43', 30.5); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +-- do index scan for last +:PREFIX SELECT last(temp, time) FROM btest; +-- do index scan for first +:PREFIX SELECT first(temp, time) FROM btest; +-- can't do index scan when ordering on non-index column +:PREFIX SELECT first(temp, time_alt) FROM btest; +-- do index scan for subquery +:PREFIX SELECT * FROM (SELECT last(temp, time) FROM btest) last; +-- can't do index scan when using group by +:PREFIX SELECT last(temp, time) FROM btest GROUP BY gp ORDER BY gp; +-- do index scan when agg function is used in CTE subquery +:PREFIX WITH last_temp AS (SELECT last(temp, time) FROM btest) SELECT * from last_temp; +-- do index scan when using both FIRST and LAST aggregate functions +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; +-- verify results when using both FIRST and LAST +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; +-- do index scan when using WHERE +:PREFIX SELECT last(temp, time) FROM btest WHERE time <= '2017-01-20T09:00:02'; +-- can't do index scan for MAX and LAST combined (MinMax optimization fails when having different aggregate functions) +:PREFIX SELECT max(time), last(temp, time) FROM btest; +-- can't do index scan when using FIRST/LAST in ORDER BY +:PREFIX SELECT last(temp, time) FROM btest ORDER BY last(temp, time); +-- do index scan +:PREFIX SELECT last(temp, time) FROM btest WHERE temp < 30; +-- SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- do index scan +:PREFIX SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- can't do index scan when using WINDOW function +:PREFIX SELECT gp, last(temp, time) OVER (PARTITION BY gp) AS last FROM btest; +-- test constants +:PREFIX SELECT first(100, 100) FROM btest; +-- create an index so we can test optimization +CREATE INDEX btest_time_alt_idx ON btest(time_alt); +:PREFIX SELECT last(temp, time_alt) FROM btest; +--test nested FIRST/LAST - should optimize +:PREFIX SELECT abs(last(temp, time)) FROM btest; +-- test nested FIRST/LAST in ORDER BY - no optimization possible +:PREFIX SELECT abs(last(temp, time)) FROM btest ORDER BY abs(last(temp,time)); +ROLLBACK; +-- Test with NULL numeric values +BEGIN; +TRUNCATE btest_numeric; +-- Empty table +:PREFIX SELECT first(btest_numeric, time) FROM btest_numeric; +:PREFIX SELECT last(btest_numeric, time) FROM btest_numeric; +-- Only NULL values +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +-- NULL values followed by non-NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +TRUNCATE btest_numeric; +-- non-NULL values followed by NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +ROLLBACK; +\o +\o :TEST_RESULTS_OPTIMIZED +SET timescaledb.enable_optimizations TO true; +\ir :TEST_QUERY_NAME +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +-- canary for results diff +-- this should be only output of results diff +SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.enable_optimizations')) v(setting); +:PREFIX SELECT time, gp, temp FROM btest ORDER BY time; +:PREFIX SELECT last(temp, time) FROM btest; +:PREFIX SELECT first(temp, time) FROM btest; +:PREFIX SELECT last(temp, time_alt) FROM btest; +:PREFIX SELECT first(temp, time_alt) FROM btest; +:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; +--check whole row +:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; +--check toasted col +:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; +:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; +BEGIN; +--check null value as last element +INSERT INTO btest VALUES('2018-01-20T09:00:43', '2017-01-20T09:00:55', 2, NULL); +:PREFIX SELECT last(temp, time) FROM btest; +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest VALUES('2019-01-20T09:00:43', '2018-01-20T09:00:55', 2, 30.5); +:PREFIX SELECT last(temp, time) FROM btest; +--check null cmp element is skipped +INSERT INTO btest VALUES('2018-01-20T09:00:43', NULL, 2, 32.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; +-- fist returns NULL value +:PREFIX SELECT first(temp, time_alt) FROM btest; +-- test first return non NULL value +INSERT INTO btest VALUES('2016-01-20T09:00:00', '2016-01-20T09:00:00', 2, 36.5); +:PREFIX SELECT first(temp, time_alt) FROM btest; +--check non null cmp element insert after null cmp +INSERT INTO btest VALUES('2020-01-20T09:00:43', '2020-01-20T09:00:43', 2, 35.3); +:PREFIX SELECT last(temp, time_alt) FROM btest; +:PREFIX SELECT first(temp, time_alt) FROM btest; +--cmp nulls should be ignored and not present in groups +:PREFIX SELECT gp, last(temp, time_alt) FROM btest GROUP BY gp ORDER BY gp; +--Previously, some bugs were found with NULLS and numeric types, so test that +INSERT INTO btest_numeric VALUES ('2019-01-20T09:00:43', NULL); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +--check non-null element "overrides" NULL because it comes after. +INSERT INTO btest_numeric VALUES('2020-01-20T09:00:43', 30.5); +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +-- do index scan for last +:PREFIX SELECT last(temp, time) FROM btest; +-- do index scan for first +:PREFIX SELECT first(temp, time) FROM btest; +-- can't do index scan when ordering on non-index column +:PREFIX SELECT first(temp, time_alt) FROM btest; +-- do index scan for subquery +:PREFIX SELECT * FROM (SELECT last(temp, time) FROM btest) last; +-- can't do index scan when using group by +:PREFIX SELECT last(temp, time) FROM btest GROUP BY gp ORDER BY gp; +-- do index scan when agg function is used in CTE subquery +:PREFIX WITH last_temp AS (SELECT last(temp, time) FROM btest) SELECT * from last_temp; +-- do index scan when using both FIRST and LAST aggregate functions +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; +-- verify results when using both FIRST and LAST +:PREFIX SELECT first(temp, time), last(temp, time) FROM btest; +-- do index scan when using WHERE +:PREFIX SELECT last(temp, time) FROM btest WHERE time <= '2017-01-20T09:00:02'; +-- can't do index scan for MAX and LAST combined (MinMax optimization fails when having different aggregate functions) +:PREFIX SELECT max(time), last(temp, time) FROM btest; +-- can't do index scan when using FIRST/LAST in ORDER BY +:PREFIX SELECT last(temp, time) FROM btest ORDER BY last(temp, time); +-- do index scan +:PREFIX SELECT last(temp, time) FROM btest WHERE temp < 30; +-- SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- do index scan +:PREFIX SELECT first(temp, time) FROM btest WHERE time >= '2017-01-20 09:00:47'; +-- can't do index scan when using WINDOW function +:PREFIX SELECT gp, last(temp, time) OVER (PARTITION BY gp) AS last FROM btest; +-- test constants +:PREFIX SELECT first(100, 100) FROM btest; +-- create an index so we can test optimization +CREATE INDEX btest_time_alt_idx ON btest(time_alt); +:PREFIX SELECT last(temp, time_alt) FROM btest; +--test nested FIRST/LAST - should optimize +:PREFIX SELECT abs(last(temp, time)) FROM btest; +-- test nested FIRST/LAST in ORDER BY - no optimization possible +:PREFIX SELECT abs(last(temp, time)) FROM btest ORDER BY abs(last(temp,time)); +ROLLBACK; +-- Test with NULL numeric values +BEGIN; +TRUNCATE btest_numeric; +-- Empty table +:PREFIX SELECT first(btest_numeric, time) FROM btest_numeric; +:PREFIX SELECT last(btest_numeric, time) FROM btest_numeric; +-- Only NULL values +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +-- NULL values followed by non-NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +TRUNCATE btest_numeric; +-- non-NULL values followed by NULL values +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); +INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 2); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); +:PREFIX SELECT first(quantity, time) FROM btest_numeric; +:PREFIX SELECT last(quantity, time) FROM btest_numeric; +:PREFIX SELECT first(time, quantity) FROM btest_numeric; +:PREFIX SELECT last(time, quantity) FROM btest_numeric; +ROLLBACK; +\o +:DIFF_CMD +--- Unoptimized result ++++ Optimized result +@@ -1,6 +1,6 @@ + setting | value + ----------------------------------+------- +- timescaledb.enable_optimizations | off ++ timescaledb.enable_optimizations | on + (1 row) + + time | gp | temp +-- Test partial aggregation +CREATE TABLE partial_aggregation (time timestamptz NOT NULL, quantity numeric, longvalue text); +SELECT schema_name, table_name, created FROM create_hypertable('partial_aggregation', 'time'); + schema_name | table_name | created +-------------+---------------------+--------- + public | partial_aggregation | t +(1 row) + +INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL); +INSERT INTO partial_aggregation VALUES('2018-01-20T09:00:43', NULL, NULL); +INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 1, 'hello'); +INSERT INTO partial_aggregation VALUES('2019-01-20T09:00:43', 2, 'world'); +INSERT INTO partial_aggregation VALUES('2020-01-20T09:00:43', 3, 'some'); +INSERT INTO partial_aggregation VALUES('2020-01-20T09:00:43', 3, 'more'); +INSERT INTO partial_aggregation VALUES('2021-01-20T09:00:43', 3, 'some'); +INSERT INTO partial_aggregation VALUES('2021-01-20T09:00:43', 3, 'more'); +INSERT INTO partial_aggregation VALUES('2022-01-20T09:00:43', 4, 'words'); +INSERT INTO partial_aggregation VALUES('2022-01-20T09:00:43', 5, 'words'); +INSERT INTO partial_aggregation VALUES('2023-01-20T09:00:43', 6, 'words'); +INSERT INTO partial_aggregation VALUES('2023-01-20T09:00:43', 7, 'words'); +-- Use enable_partitionwise_aggregate to create partial aggregates per chunk +SET enable_partitionwise_aggregate = ON; +SELECT + format('SELECT %3$s, %1$s FROM partial_aggregation WHERE %2$s GROUP BY %3$s ORDER BY 1, 2;', + function, condition, grouping) +FROM + unnest(array[ + 'first(time, quantity), last(time, quantity)', + 'last(longvalue, quantity)', + 'last(quantity, longvalue)', + 'last(quantity, time)', + 'last(time, longvalue)']) AS function, + unnest(array[ + 'true', + $$time < '2021-01-01'$$, + 'quantity is null', + 'quantity is not null', + 'quantity > 3']) AS condition, + unnest(array[ + '777::text' /* dummy grouping column */, + 'longvalue', + 'quantity', + $$time_bucket('1 year', time)$$, + $$time_bucket('3 year', time)$$]) AS grouping +\gexec +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+------------------------------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+------------------------------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST | Mon Jan 20 09:00:43 2020 PST +(1 row) + +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+-------+------ + 777 | | +(1 row) + +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+------------------------------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT 777::text, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | first | last +------+------------------------------+------------------------------ + 777 | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------- + 777 | words +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | some +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------- + 777 | words +(1 row) + +SELECT 777::text, last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------- + 777 | words +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 2 +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 2 +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 2 +(1 row) + +SELECT 777::text, last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 4 +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 6 +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 3 +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 6 +(1 row) + +SELECT 777::text, last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | 6 +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------ + 777 | +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------------------------------ + 777 | Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT 777::text, last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY 777::text ORDER BY 1, 2; + text | last +------+------------------------------ + 777 | Thu Jan 20 09:00:43 2022 PST +(1 row) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+------------------------------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + words | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST + world | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + | | +(6 rows) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+------------------------------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + world | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + | | +(5 rows) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+-------+------ + | | +(1 row) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+------------------------------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + words | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST + world | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST +(5 rows) + +SELECT longvalue, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | first | last +-----------+------------------------------+------------------------------ + words | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------- + hello | hello + more | more + some | some + words | words + world | world + | +(6 rows) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------- + hello | hello + more | more + some | some + world | world + | +(5 rows) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + | +(1 row) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------- + hello | hello + more | more + some | some + words | words + world | world +(5 rows) + +SELECT longvalue, last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------- + words | words +(1 row) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + words | 4 + world | 2 + | +(6 rows) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + world | 2 + | +(5 rows) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + | +(1 row) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + words | 4 + world | 2 +(5 rows) + +SELECT longvalue, last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + words | 4 +(1 row) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + words | 6 + world | 2 + | +(6 rows) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + world | 2 + | +(5 rows) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + | +(1 row) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + hello | 1 + more | 3 + some | 3 + words | 6 + world | 2 +(5 rows) + +SELECT longvalue, last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + words | 6 +(1 row) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST + words | Thu Jan 20 09:00:43 2022 PST + world | Sun Jan 20 09:00:43 2019 PST + | +(6 rows) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST + world | Sun Jan 20 09:00:43 2019 PST + | +(5 rows) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------ + | +(1 row) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------------------------------ + hello | Sun Jan 20 09:00:43 2019 PST + more | Mon Jan 20 09:00:43 2020 PST + some | Mon Jan 20 09:00:43 2020 PST + words | Thu Jan 20 09:00:43 2022 PST + world | Sun Jan 20 09:00:43 2019 PST +(5 rows) + +SELECT longvalue, last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY longvalue ORDER BY 1, 2; + longvalue | last +-----------+------------------------------ + words | Thu Jan 20 09:00:43 2022 PST +(1 row) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+------------------------------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + 4 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST + | | +(8 rows) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+------------------------------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + | | +(4 rows) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+-------+------ + | | +(1 row) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+------------------------------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + 4 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(7 rows) + +SELECT quantity, first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | first | last +----------+------------------------------+------------------------------ + 4 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(4 rows) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------- + 1 | hello + 2 | world + 3 | some + 4 | words + 5 | words + 6 | words + 7 | words + | +(8 rows) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------- + 1 | hello + 2 | world + 3 | some + | +(4 rows) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + | +(1 row) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------- + 1 | hello + 2 | world + 3 | some + 4 | words + 5 | words + 6 | words + 7 | words +(7 rows) + +SELECT quantity, last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------- + 4 | words + 5 | words + 6 | words + 7 | words +(4 rows) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + | +(8 rows) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + | +(4 rows) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + | +(1 row) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 +(7 rows) + +SELECT quantity, last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 +(4 rows) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 + | +(8 rows) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + | +(4 rows) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + | +(1 row) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 1 | 1 + 2 | 2 + 3 | 3 + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 +(7 rows) + +SELECT quantity, last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + 4 | 4 + 5 | 5 + 6 | 6 + 7 | 7 +(4 rows) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST + 4 | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST + | +(8 rows) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST + | +(4 rows) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------ + | +(1 row) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------------------------------ + 1 | Sun Jan 20 09:00:43 2019 PST + 2 | Sun Jan 20 09:00:43 2019 PST + 3 | Mon Jan 20 09:00:43 2020 PST + 4 | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST +(7 rows) + +SELECT quantity, last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY quantity ORDER BY 1, 2; + quantity | last +----------+------------------------------ + 4 | Thu Jan 20 09:00:43 2022 PST + 5 | Thu Jan 20 09:00:43 2022 PST + 6 | Fri Jan 20 09:00:43 2023 PST + 7 | Fri Jan 20 09:00:43 2023 PST +(4 rows) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | | + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST | Wed Jan 20 09:00:43 2021 PST + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(6 rows) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | | + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST +(3 rows) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+-------+------ + Sun Dec 31 16:00:00 2017 PST | | +(1 row) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST | Wed Jan 20 09:00:43 2021 PST + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(5 rows) + +SELECT time_bucket('1 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST | Fri Jan 20 09:00:43 2023 PST +(2 rows) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | world + Tue Dec 31 16:00:00 2019 PST | some + Thu Dec 31 16:00:00 2020 PST | some + Fri Dec 31 16:00:00 2021 PST | words + Sat Dec 31 16:00:00 2022 PST | words +(6 rows) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | world + Tue Dec 31 16:00:00 2019 PST | more +(3 rows) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Mon Dec 31 16:00:00 2018 PST | world + Tue Dec 31 16:00:00 2019 PST | some + Thu Dec 31 16:00:00 2020 PST | some + Fri Dec 31 16:00:00 2021 PST | words + Sat Dec 31 16:00:00 2022 PST | words +(5 rows) + +SELECT time_bucket('1 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Fri Dec 31 16:00:00 2021 PST | words + Sat Dec 31 16:00:00 2022 PST | words +(2 rows) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | 2 + Tue Dec 31 16:00:00 2019 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 3 + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(6 rows) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | 2 + Tue Dec 31 16:00:00 2019 PST | 3 +(3 rows) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Mon Dec 31 16:00:00 2018 PST | 2 + Tue Dec 31 16:00:00 2019 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 3 + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(5 rows) + +SELECT time_bucket('1 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(2 rows) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | 1 + Tue Dec 31 16:00:00 2019 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 3 + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(6 rows) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | 2 + Tue Dec 31 16:00:00 2019 PST | 3 +(3 rows) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Mon Dec 31 16:00:00 2018 PST | 1 + Tue Dec 31 16:00:00 2019 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 3 + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(5 rows) + +SELECT time_bucket('1 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Fri Dec 31 16:00:00 2021 PST | 4 + Sat Dec 31 16:00:00 2022 PST | 6 +(2 rows) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST +(6 rows) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST +(3 rows) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Mon Dec 31 16:00:00 2018 PST | Sun Jan 20 09:00:43 2019 PST + Tue Dec 31 16:00:00 2019 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST +(5 rows) + +SELECT time_bucket('1 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('1 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Fri Dec 31 16:00:00 2021 PST | Thu Jan 20 09:00:43 2022 PST + Sat Dec 31 16:00:00 2022 PST | Fri Jan 20 09:00:43 2023 PST +(2 rows) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST | Fri Jan 20 09:00:43 2023 PST +(2 rows) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST | Mon Jan 20 09:00:43 2020 PST +(1 row) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+-------+------ + Sun Dec 31 16:00:00 2017 PST | | +(1 row) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST | Mon Jan 20 09:00:43 2020 PST + Thu Dec 31 16:00:00 2020 PST | Wed Jan 20 09:00:43 2021 PST | Fri Jan 20 09:00:43 2023 PST +(2 rows) + +SELECT time_bucket('3 year', time), first(time, quantity), last(time, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | first | last +------------------------------+------------------------------+------------------------------ + Thu Dec 31 16:00:00 2020 PST | Thu Jan 20 09:00:43 2022 PST | Fri Jan 20 09:00:43 2023 PST +(1 row) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Sun Dec 31 16:00:00 2017 PST | some + Thu Dec 31 16:00:00 2020 PST | words +(2 rows) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | more +(1 row) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Sun Dec 31 16:00:00 2017 PST | some + Thu Dec 31 16:00:00 2020 PST | words +(2 rows) + +SELECT time_bucket('3 year', time), last(longvalue, quantity) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------- + Thu Dec 31 16:00:00 2020 PST | words +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 2 + Thu Dec 31 16:00:00 2020 PST | 4 +(2 rows) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 2 +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 2 + Thu Dec 31 16:00:00 2020 PST | 4 +(2 rows) + +SELECT time_bucket('3 year', time), last(quantity, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Thu Dec 31 16:00:00 2020 PST | 4 +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 6 +(2 rows) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 3 +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | 3 + Thu Dec 31 16:00:00 2020 PST | 6 +(2 rows) + +SELECT time_bucket('3 year', time), last(quantity, time) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Thu Dec 31 16:00:00 2020 PST | 6 +(1 row) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE true GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST + Thu Dec 31 16:00:00 2020 PST | Thu Jan 20 09:00:43 2022 PST +(2 rows) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE time < '2021-01-01' GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST +(1 row) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity is null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------ + Sun Dec 31 16:00:00 2017 PST | +(1 row) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity is not null GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Sun Dec 31 16:00:00 2017 PST | Sun Jan 20 09:00:43 2019 PST + Thu Dec 31 16:00:00 2020 PST | Thu Jan 20 09:00:43 2022 PST +(2 rows) + +SELECT time_bucket('3 year', time), last(time, longvalue) FROM partial_aggregation WHERE quantity > 3 GROUP BY time_bucket('3 year', time) ORDER BY 1, 2; + time_bucket | last +------------------------------+------------------------------ + Thu Dec 31 16:00:00 2020 PST | Thu Jan 20 09:00:43 2022 PST +(1 row) + +SET enable_partitionwise_aggregate = OFF; diff --git a/test/expected/alter.out b/test/expected/alter.out index 1e1db9173f6..24ba2ac6dc6 100644 --- a/test/expected/alter.out +++ b/test/expected/alter.out @@ -30,7 +30,7 @@ SELECT * FROM alter_before; -- Show that deleted column is marked as dropped and that attnums are -- now different for the root table and the chunk -- PG17 made attstattarget NULLABLE and changed the default from -1 to NULL -SELECT c.relname, a.attname, a.attnum, a.attoptions, CASE WHEN a.attstattarget = -1 THEN NULL ELSE a.attstattarget END attstattarget, a.attstorage FROM pg_attribute a, pg_class c +SELECT c.relname, a.attname, a.attnum, a.attoptions, CASE WHEN a.attstattarget = -1 OR (a.attisdropped AND a.attstattarget = 0) THEN NULL ELSE a.attstattarget END attstattarget, a.attstorage FROM pg_attribute a, pg_class c WHERE a.attrelid = c.oid AND (c.relname LIKE '_hyper_1%_chunk' OR c.relname = 'alter_before') AND a.attnum > 0 @@ -42,7 +42,7 @@ ORDER BY c.relname, a.attnum; _hyper_1_1_chunk | colorid | 3 | | | p _hyper_1_1_chunk | notes | 4 | | | e _hyper_1_1_chunk | notes_2 | 5 | | | x - alter_before | ........pg.dropped.1........ | 1 | | 0 | p + alter_before | ........pg.dropped.1........ | 1 | | | p alter_before | time | 2 | | | p alter_before | temp | 3 | {n_distinct=10} | 100 | p alter_before | colorid | 4 | | | p @@ -128,14 +128,14 @@ ALTER TABLE _timescaledb_internal._hyper_2_4_chunk ALTER COLUMN temp SET (n_dis ALTER TABLE _timescaledb_internal._hyper_2_4_chunk ALTER COLUMN temp SET STATISTICS 201; ALTER TABLE _timescaledb_internal._hyper_2_4_chunk ALTER COLUMN notes SET STORAGE EXTERNAL; -- PG17 made attstattarget NULLABLE and changed the default from -1 to NULL -SELECT c.relname, a.attname, a.attnum, a.attoptions, CASE WHEN a.attstattarget = -1 THEN NULL ELSE a.attstattarget END attstattarget, a.attstorage FROM pg_attribute a, pg_class c +SELECT c.relname, a.attname, a.attnum, a.attoptions, CASE WHEN a.attstattarget = -1 OR (a.attisdropped AND a.attstattarget = 0) THEN NULL ELSE a.attstattarget END attstattarget, a.attstorage FROM pg_attribute a, pg_class c WHERE a.attrelid = c.oid AND (c.relname LIKE '_hyper_2%_chunk' OR c.relname = 'alter_after') AND a.attnum > 0 ORDER BY c.relname, a.attnum; relname | attname | attnum | attoptions | attstattarget | attstorage ------------------+------------------------------+--------+-----------------+---------------+------------ - _hyper_2_2_chunk | ........pg.dropped.1........ | 1 | | 0 | p + _hyper_2_2_chunk | ........pg.dropped.1........ | 1 | | | p _hyper_2_2_chunk | time | 2 | | | p _hyper_2_2_chunk | temp | 3 | {n_distinct=10} | | p _hyper_2_2_chunk | colorid | 4 | | 101 | p @@ -154,7 +154,7 @@ ORDER BY c.relname, a.attnum; _hyper_2_4_chunk | notes | 4 | | | e _hyper_2_4_chunk | notes_2 | 5 | | | e _hyper_2_4_chunk | id | 6 | | | p - alter_after | ........pg.dropped.1........ | 1 | | 0 | p + alter_after | ........pg.dropped.1........ | 1 | | | p alter_after | time | 2 | | | p alter_after | temp | 3 | {n_distinct=10} | | p alter_after | colorid | 4 | | 101 | p diff --git a/test/expected/drop_owned.out b/test/expected/drop_owned-14.out similarity index 100% rename from test/expected/drop_owned.out rename to test/expected/drop_owned-14.out diff --git a/test/expected/drop_owned-15.out b/test/expected/drop_owned-15.out new file mode 100644 index 00000000000..204e4aa4daf --- /dev/null +++ b/test/expected/drop_owned-15.out @@ -0,0 +1,194 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE SCHEMA hypertable_schema; +GRANT ALL ON SCHEMA hypertable_schema TO :ROLE_DEFAULT_PERM_USER; +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE TABLE hypertable_schema.default_perm_user (time timestamptz, temp float, location int); +SELECT create_hypertable('hypertable_schema.default_perm_user', 'time', 'location', 2); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------------------------- + (1,hypertable_schema,default_perm_user,t) +(1 row) + +INSERT INTO hypertable_schema.default_perm_user VALUES ('2001-01-01 01:01:01', 23.3, 1); +RESET ROLE; +CREATE TABLE hypertable_schema.superuser (time timestamptz, temp float, location int); +SELECT create_hypertable('hypertable_schema.superuser', 'time', 'location', 2); +NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------------------- + (2,hypertable_schema,superuser,t) +(1 row) + +INSERT INTO hypertable_schema.superuser VALUES ('2001-01-01 01:01:01', 23.3, 1); +SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id; + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | status +----+-------------------+-------------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------- + 1 | hypertable_schema | default_perm_user | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | 0 + 2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | 0 +(2 rows) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+------------------+---------------------+---------+--------+----------- + 1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | | f | 0 | f + 2 | 2 | _timescaledb_internal | _hyper_2_2_chunk | | f | 0 | f +(2 rows) + +DROP OWNED BY :ROLE_DEFAULT_PERM_USER; +SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id; + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | status +----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------- + 2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | 0 +(1 row) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+------------------+---------------------+---------+--------+----------- + 2 | 2 | _timescaledb_internal | _hyper_2_2_chunk | | f | 0 | f +(1 row) + +DROP TABLE hypertable_schema.superuser; +--everything should be cleaned up +SELECT * FROM _timescaledb_catalog.hypertable GROUP BY id; + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+-------------------+--------------------------+-------- +(0 rows) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-------------+------------+---------------------+---------+--------+----------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.dimension; + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func +----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ +(0 rows) + +SELECT * FROM _timescaledb_catalog.dimension_slice; + id | dimension_id | range_start | range_end +----+--------------+-------------+----------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.chunk_index; + chunk_id | index_name | hypertable_id | hypertable_index_name +----------+------------+---------------+----------------------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.chunk_constraint; + chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----------+--------------------+-----------------+---------------------------- +(0 rows) + +-- test drop owned in database without extension installed +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE database test_drop_owned; +\c test_drop_owned +DROP OWNED BY :ROLE_SUPERUSER; +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP DATABASE test_drop_owned WITH (FORCE); +-- Test that dependencies on roles are added to chunks when creating +-- new chunks. If that is not done, DROP OWNED BY will not revoke the +-- privilege on the chunk. +CREATE TABLE sensor_data(time timestamptz not null, cpu double precision null); +SELECT * FROM create_hypertable('sensor_data','time'); + hypertable_id | schema_name | table_name | created +---------------+-------------+-------------+--------- + 3 | public | sensor_data | t +(1 row) + +INSERT INTO sensor_data +SELECT time, + random() AS cpu +FROM generate_series('2020-01-01'::timestamptz, '2020-01-24'::timestamptz, INTERVAL '10 minute') AS g1(time); +\dp sensor_data + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------+-------+-------------------+-------------------+---------- + public | sensor_data | table | | | +(1 row) + +\dp _timescaledb_internal._hyper_3* + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_3_chunk | table | | | + _timescaledb_internal | _hyper_3_4_chunk | table | | | + _timescaledb_internal | _hyper_3_5_chunk | table | | | + _timescaledb_internal | _hyper_3_6_chunk | table | | | + _timescaledb_internal | _hyper_3_7_chunk | table | | | +(5 rows) + +GRANT SELECT ON sensor_data TO :ROLE_DEFAULT_PERM_USER; +\dp sensor_data + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------+-------+--------------------------------+-------------------+---------- + public | sensor_data | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | +(1 row) + +\dp _timescaledb_internal._hyper_3* + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_3_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_4_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_5_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_6_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_7_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | +(5 rows) + +-- Insert more chunks after adding the user to the hypertable. These +-- will now get the privileges of the hypertable. +INSERT INTO sensor_data +SELECT time, + random() AS cpu +FROM generate_series('2020-01-20'::timestamptz, '2020-02-05'::timestamptz, INTERVAL '10 minute') AS g1(time); +\dp _timescaledb_internal._hyper_3* + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_3_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_4_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_5_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_6_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_7_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_8_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | +(6 rows) + +-- This should revoke the privileges on both the hypertable and the chunks. +DROP OWNED BY :ROLE_DEFAULT_PERM_USER; +\dp sensor_data + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------+-------+-------------------------------+-------------------+---------- + public | sensor_data | table | super_user=arwdDxt/super_user | | +(1 row) + +\dp _timescaledb_internal._hyper_3* + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_3_chunk | table | super_user=arwdDxt/super_user | | + _timescaledb_internal | _hyper_3_4_chunk | table | super_user=arwdDxt/super_user | | + _timescaledb_internal | _hyper_3_5_chunk | table | super_user=arwdDxt/super_user | | + _timescaledb_internal | _hyper_3_6_chunk | table | super_user=arwdDxt/super_user | | + _timescaledb_internal | _hyper_3_7_chunk | table | super_user=arwdDxt/super_user | | + _timescaledb_internal | _hyper_3_8_chunk | table | super_user=arwdDxt/super_user | | +(6 rows) + diff --git a/test/expected/drop_owned-16.out b/test/expected/drop_owned-16.out new file mode 100644 index 00000000000..204e4aa4daf --- /dev/null +++ b/test/expected/drop_owned-16.out @@ -0,0 +1,194 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE SCHEMA hypertable_schema; +GRANT ALL ON SCHEMA hypertable_schema TO :ROLE_DEFAULT_PERM_USER; +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE TABLE hypertable_schema.default_perm_user (time timestamptz, temp float, location int); +SELECT create_hypertable('hypertable_schema.default_perm_user', 'time', 'location', 2); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------------------------- + (1,hypertable_schema,default_perm_user,t) +(1 row) + +INSERT INTO hypertable_schema.default_perm_user VALUES ('2001-01-01 01:01:01', 23.3, 1); +RESET ROLE; +CREATE TABLE hypertable_schema.superuser (time timestamptz, temp float, location int); +SELECT create_hypertable('hypertable_schema.superuser', 'time', 'location', 2); +NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------------------- + (2,hypertable_schema,superuser,t) +(1 row) + +INSERT INTO hypertable_schema.superuser VALUES ('2001-01-01 01:01:01', 23.3, 1); +SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id; + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | status +----+-------------------+-------------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------- + 1 | hypertable_schema | default_perm_user | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | 0 + 2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | 0 +(2 rows) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+------------------+---------------------+---------+--------+----------- + 1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | | f | 0 | f + 2 | 2 | _timescaledb_internal | _hyper_2_2_chunk | | f | 0 | f +(2 rows) + +DROP OWNED BY :ROLE_DEFAULT_PERM_USER; +SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id; + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | status +----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------- + 2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | 0 +(1 row) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+------------------+---------------------+---------+--------+----------- + 2 | 2 | _timescaledb_internal | _hyper_2_2_chunk | | f | 0 | f +(1 row) + +DROP TABLE hypertable_schema.superuser; +--everything should be cleaned up +SELECT * FROM _timescaledb_catalog.hypertable GROUP BY id; + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+-------------------+--------------------------+-------- +(0 rows) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-------------+------------+---------------------+---------+--------+----------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.dimension; + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func +----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ +(0 rows) + +SELECT * FROM _timescaledb_catalog.dimension_slice; + id | dimension_id | range_start | range_end +----+--------------+-------------+----------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.chunk_index; + chunk_id | index_name | hypertable_id | hypertable_index_name +----------+------------+---------------+----------------------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.chunk_constraint; + chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----------+--------------------+-----------------+---------------------------- +(0 rows) + +-- test drop owned in database without extension installed +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE database test_drop_owned; +\c test_drop_owned +DROP OWNED BY :ROLE_SUPERUSER; +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP DATABASE test_drop_owned WITH (FORCE); +-- Test that dependencies on roles are added to chunks when creating +-- new chunks. If that is not done, DROP OWNED BY will not revoke the +-- privilege on the chunk. +CREATE TABLE sensor_data(time timestamptz not null, cpu double precision null); +SELECT * FROM create_hypertable('sensor_data','time'); + hypertable_id | schema_name | table_name | created +---------------+-------------+-------------+--------- + 3 | public | sensor_data | t +(1 row) + +INSERT INTO sensor_data +SELECT time, + random() AS cpu +FROM generate_series('2020-01-01'::timestamptz, '2020-01-24'::timestamptz, INTERVAL '10 minute') AS g1(time); +\dp sensor_data + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------+-------+-------------------+-------------------+---------- + public | sensor_data | table | | | +(1 row) + +\dp _timescaledb_internal._hyper_3* + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_3_chunk | table | | | + _timescaledb_internal | _hyper_3_4_chunk | table | | | + _timescaledb_internal | _hyper_3_5_chunk | table | | | + _timescaledb_internal | _hyper_3_6_chunk | table | | | + _timescaledb_internal | _hyper_3_7_chunk | table | | | +(5 rows) + +GRANT SELECT ON sensor_data TO :ROLE_DEFAULT_PERM_USER; +\dp sensor_data + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------+-------+--------------------------------+-------------------+---------- + public | sensor_data | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | +(1 row) + +\dp _timescaledb_internal._hyper_3* + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_3_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_4_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_5_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_6_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_7_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | +(5 rows) + +-- Insert more chunks after adding the user to the hypertable. These +-- will now get the privileges of the hypertable. +INSERT INTO sensor_data +SELECT time, + random() AS cpu +FROM generate_series('2020-01-20'::timestamptz, '2020-02-05'::timestamptz, INTERVAL '10 minute') AS g1(time); +\dp _timescaledb_internal._hyper_3* + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_3_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_4_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_5_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_6_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_7_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_8_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user=r/super_user | | +(6 rows) + +-- This should revoke the privileges on both the hypertable and the chunks. +DROP OWNED BY :ROLE_DEFAULT_PERM_USER; +\dp sensor_data + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------+-------+-------------------------------+-------------------+---------- + public | sensor_data | table | super_user=arwdDxt/super_user | | +(1 row) + +\dp _timescaledb_internal._hyper_3* + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_3_chunk | table | super_user=arwdDxt/super_user | | + _timescaledb_internal | _hyper_3_4_chunk | table | super_user=arwdDxt/super_user | | + _timescaledb_internal | _hyper_3_5_chunk | table | super_user=arwdDxt/super_user | | + _timescaledb_internal | _hyper_3_6_chunk | table | super_user=arwdDxt/super_user | | + _timescaledb_internal | _hyper_3_7_chunk | table | super_user=arwdDxt/super_user | | + _timescaledb_internal | _hyper_3_8_chunk | table | super_user=arwdDxt/super_user | | +(6 rows) + diff --git a/test/expected/drop_owned-17.out b/test/expected/drop_owned-17.out new file mode 100644 index 00000000000..ecfe069b87f --- /dev/null +++ b/test/expected/drop_owned-17.out @@ -0,0 +1,194 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE SCHEMA hypertable_schema; +GRANT ALL ON SCHEMA hypertable_schema TO :ROLE_DEFAULT_PERM_USER; +SET ROLE :ROLE_DEFAULT_PERM_USER; +CREATE TABLE hypertable_schema.default_perm_user (time timestamptz, temp float, location int); +SELECT create_hypertable('hypertable_schema.default_perm_user', 'time', 'location', 2); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------------------------- + (1,hypertable_schema,default_perm_user,t) +(1 row) + +INSERT INTO hypertable_schema.default_perm_user VALUES ('2001-01-01 01:01:01', 23.3, 1); +RESET ROLE; +CREATE TABLE hypertable_schema.superuser (time timestamptz, temp float, location int); +SELECT create_hypertable('hypertable_schema.superuser', 'time', 'location', 2); +NOTICE: adding not-null constraint to column "time" + create_hypertable +----------------------------------- + (2,hypertable_schema,superuser,t) +(1 row) + +INSERT INTO hypertable_schema.superuser VALUES ('2001-01-01 01:01:01', 23.3, 1); +SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id; + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | status +----+-------------------+-------------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------- + 1 | hypertable_schema | default_perm_user | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | 0 + 2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | 0 +(2 rows) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+------------------+---------------------+---------+--------+----------- + 1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | | f | 0 | f + 2 | 2 | _timescaledb_internal | _hyper_2_2_chunk | | f | 0 | f +(2 rows) + +DROP OWNED BY :ROLE_DEFAULT_PERM_USER; +SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id; + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | status +----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+-------------------+--------------------------+-------- + 2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_functions | calculate_chunk_interval | 0 | 0 | | 0 +(1 row) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-----------------------+------------------+---------------------+---------+--------+----------- + 2 | 2 | _timescaledb_internal | _hyper_2_2_chunk | | f | 0 | f +(1 row) + +DROP TABLE hypertable_schema.superuser; +--everything should be cleaned up +SELECT * FROM _timescaledb_catalog.hypertable GROUP BY id; + id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compression_state | compressed_hypertable_id | status +----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+-------------------+--------------------------+-------- +(0 rows) + +SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk; + id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk +----+---------------+-------------+------------+---------------------+---------+--------+----------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.dimension; + id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func +----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+--------------------------+-------------------------+------------------ +(0 rows) + +SELECT * FROM _timescaledb_catalog.dimension_slice; + id | dimension_id | range_start | range_end +----+--------------+-------------+----------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.chunk_index; + chunk_id | index_name | hypertable_id | hypertable_index_name +----------+------------+---------------+----------------------- +(0 rows) + +SELECT * FROM _timescaledb_catalog.chunk_constraint; + chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name +----------+--------------------+-----------------+---------------------------- +(0 rows) + +-- test drop owned in database without extension installed +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE database test_drop_owned; +\c test_drop_owned +DROP OWNED BY :ROLE_SUPERUSER; +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP DATABASE test_drop_owned WITH (FORCE); +-- Test that dependencies on roles are added to chunks when creating +-- new chunks. If that is not done, DROP OWNED BY will not revoke the +-- privilege on the chunk. +CREATE TABLE sensor_data(time timestamptz not null, cpu double precision null); +SELECT * FROM create_hypertable('sensor_data','time'); + hypertable_id | schema_name | table_name | created +---------------+-------------+-------------+--------- + 3 | public | sensor_data | t +(1 row) + +INSERT INTO sensor_data +SELECT time, + random() AS cpu +FROM generate_series('2020-01-01'::timestamptz, '2020-01-24'::timestamptz, INTERVAL '10 minute') AS g1(time); +\dp sensor_data + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------+-------+-------------------+-------------------+---------- + public | sensor_data | table | | | +(1 row) + +\dp _timescaledb_internal._hyper_3* + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_3_chunk | table | | | + _timescaledb_internal | _hyper_3_4_chunk | table | | | + _timescaledb_internal | _hyper_3_5_chunk | table | | | + _timescaledb_internal | _hyper_3_6_chunk | table | | | + _timescaledb_internal | _hyper_3_7_chunk | table | | | +(5 rows) + +GRANT SELECT ON sensor_data TO :ROLE_DEFAULT_PERM_USER; +\dp sensor_data + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------+-------+--------------------------------+-------------------+---------- + public | sensor_data | table | super_user=arwdDxtm/super_user+| | + | | | default_perm_user=r/super_user | | +(1 row) + +\dp _timescaledb_internal._hyper_3* + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_3_chunk | table | super_user=arwdDxtm/super_user+| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_4_chunk | table | super_user=arwdDxtm/super_user+| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_5_chunk | table | super_user=arwdDxtm/super_user+| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_6_chunk | table | super_user=arwdDxtm/super_user+| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_7_chunk | table | super_user=arwdDxtm/super_user+| | + | | | default_perm_user=r/super_user | | +(5 rows) + +-- Insert more chunks after adding the user to the hypertable. These +-- will now get the privileges of the hypertable. +INSERT INTO sensor_data +SELECT time, + random() AS cpu +FROM generate_series('2020-01-20'::timestamptz, '2020-02-05'::timestamptz, INTERVAL '10 minute') AS g1(time); +\dp _timescaledb_internal._hyper_3* + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_3_chunk | table | super_user=arwdDxtm/super_user+| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_4_chunk | table | super_user=arwdDxtm/super_user+| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_5_chunk | table | super_user=arwdDxtm/super_user+| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_6_chunk | table | super_user=arwdDxtm/super_user+| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_7_chunk | table | super_user=arwdDxtm/super_user+| | + | | | default_perm_user=r/super_user | | + _timescaledb_internal | _hyper_3_8_chunk | table | super_user=arwdDxtm/super_user+| | + | | | default_perm_user=r/super_user | | +(6 rows) + +-- This should revoke the privileges on both the hypertable and the chunks. +DROP OWNED BY :ROLE_DEFAULT_PERM_USER; +\dp sensor_data + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------+-------+--------------------------------+-------------------+---------- + public | sensor_data | table | super_user=arwdDxtm/super_user | | +(1 row) + +\dp _timescaledb_internal._hyper_3* + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_3_3_chunk | table | super_user=arwdDxtm/super_user | | + _timescaledb_internal | _hyper_3_4_chunk | table | super_user=arwdDxtm/super_user | | + _timescaledb_internal | _hyper_3_5_chunk | table | super_user=arwdDxtm/super_user | | + _timescaledb_internal | _hyper_3_6_chunk | table | super_user=arwdDxtm/super_user | | + _timescaledb_internal | _hyper_3_7_chunk | table | super_user=arwdDxtm/super_user | | + _timescaledb_internal | _hyper_3_8_chunk | table | super_user=arwdDxtm/super_user | | +(6 rows) + diff --git a/test/expected/grant_hypertable.out b/test/expected/grant_hypertable-14.out similarity index 100% rename from test/expected/grant_hypertable.out rename to test/expected/grant_hypertable-14.out diff --git a/test/expected/grant_hypertable-15.out b/test/expected/grant_hypertable-15.out new file mode 100644 index 00000000000..96e3d41c4d1 --- /dev/null +++ b/test/expected/grant_hypertable-15.out @@ -0,0 +1,394 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE TABLE conditions( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temperature FLOAT +); +-- Create a hypertable and show that it does not have any privileges +SELECT * FROM create_hypertable('conditions', 'time', chunk_time_interval => '5 days'::interval); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 1 | public | conditions | t +(1 row) + +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------+-------------------+---------- + public | conditions | table | | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | | | + _timescaledb_internal | _hyper_1_2_chunk | table | | | + _timescaledb_internal | _hyper_1_3_chunk | table | | | +(3 rows) + +-- Add privileges and show that they propagate to the chunks +GRANT SELECT, INSERT ON conditions TO PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | +(3 rows) + +-- Create some more chunks and show that they also get the privileges. +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-10 00:00'::timestamp, '2018-12-20 00:00'::timestamp, '1h') AS time; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | +(5 rows) + +-- Revoke one of the privileges and show that it propagate to the +-- chunks. +REVOKE INSERT ON conditions FROM PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(5 rows) + +-- Add some more chunks and show that it inherits the grants from the +-- hypertable. +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-20 00:00'::timestamp, '2018-12-30 00:00'::timestamp, '1h') AS time; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_6_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_7_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(7 rows) + +-- Change grants of one chunk explicitly and check that it is possible +\z _timescaledb_internal._hyper_1_1_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +GRANT UPDATE ON _timescaledb_internal._hyper_1_1_chunk TO PUBLIC; +\z _timescaledb_internal._hyper_1_1_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =rw/super_user | | +(1 row) + +REVOKE SELECT ON _timescaledb_internal._hyper_1_1_chunk FROM PUBLIC; +\z _timescaledb_internal._hyper_1_1_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =w/super_user | | +(1 row) + +-- Check that revoking a permission first on the chunk and then on the +-- hypertable that was added through the hypertable (INSERT and +-- SELECT, in this case) still do not copy permissions from the +-- hypertable (so there should not be a select permission to public on +-- the chunk but there should be one on the hypertable). +GRANT INSERT ON conditions TO PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | +(1 row) + +\z _timescaledb_internal._hyper_1_2_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | +(1 row) + +REVOKE SELECT ON _timescaledb_internal._hyper_1_2_chunk FROM PUBLIC; +REVOKE INSERT ON conditions FROM PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal._hyper_1_2_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user | | +(1 row) + +-- Check that granting permissions through hypertable does not remove +-- separate grants on chunk. +GRANT UPDATE ON _timescaledb_internal._hyper_1_3_chunk TO PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal._hyper_1_3_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =rw/super_user | | +(1 row) + +GRANT INSERT ON conditions TO PUBLIC; +REVOKE INSERT ON conditions FROM PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal._hyper_1_3_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =rw/super_user | | +(1 row) + +-- Check that GRANT ALL IN SCHEMA adds privileges to the parent +-- and also goes to chunks in another schema +GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER_2; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+----------------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+----------------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user +| | + | | | =w/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user +| | + | | | =rw/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxt/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxt/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_6_chunk | table | super_user=arwdDxt/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_7_chunk | table | super_user=arwdDxt/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | +(7 rows) + +-- Check that REVOKE ALL IN SCHEMA removes privileges of the parent +-- and also goes to chunks in another schema +REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER_2; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =w/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =rw/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_6_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_7_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(7 rows) + +-- Create chunks in the same schema as the hypertable and check that +-- they also get the same privileges as the hypertable +CREATE TABLE measurements( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temperature FLOAT +); +-- Create a hypertable with chunks in the same schema +SELECT * FROM create_hypertable('public.measurements', 'time', chunk_time_interval => '5 days'::interval, associated_schema_name => 'public'); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 2 | public | measurements | t +(1 row) + +INSERT INTO measurements +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time; +-- GRANT ALL and check privileges +GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER_2; +\z measurements + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+--------------+-------+----------------------------------------+-------------------+---------- + public | measurements | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | +(1 row) + +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+----------------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | +(1 row) + +\z public.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------------+-------+----------------------------------------+-------------------+---------- + public | _hyper_2_10_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + public | _hyper_2_8_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + public | _hyper_2_9_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | +(3 rows) + +-- REVOKE ALL and check privileges +REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER_2; +\z measurements + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+--------------+-------+-------------------------------+-------------------+---------- + public | measurements | table | super_user=arwdDxt/super_user | | +(1 row) + +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z public.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------------+-------+-------------------------------+-------------------+---------- + public | _hyper_2_10_chunk | table | super_user=arwdDxt/super_user | | + public | _hyper_2_8_chunk | table | super_user=arwdDxt/super_user | | + public | _hyper_2_9_chunk | table | super_user=arwdDxt/super_user | | +(3 rows) + +-- GRANT/REVOKE in an empty schema (Issue #4581) +CREATE SCHEMA test_grant; +GRANT ALL ON ALL TABLES IN SCHEMA test_grant TO :ROLE_DEFAULT_PERM_USER_2; +REVOKE ALL ON ALL TABLES IN SCHEMA test_grant FROM :ROLE_DEFAULT_PERM_USER_2; diff --git a/test/expected/grant_hypertable-16.out b/test/expected/grant_hypertable-16.out new file mode 100644 index 00000000000..96e3d41c4d1 --- /dev/null +++ b/test/expected/grant_hypertable-16.out @@ -0,0 +1,394 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE TABLE conditions( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temperature FLOAT +); +-- Create a hypertable and show that it does not have any privileges +SELECT * FROM create_hypertable('conditions', 'time', chunk_time_interval => '5 days'::interval); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 1 | public | conditions | t +(1 row) + +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------+-------------------+---------- + public | conditions | table | | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | | | + _timescaledb_internal | _hyper_1_2_chunk | table | | | + _timescaledb_internal | _hyper_1_3_chunk | table | | | +(3 rows) + +-- Add privileges and show that they propagate to the chunks +GRANT SELECT, INSERT ON conditions TO PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | +(3 rows) + +-- Create some more chunks and show that they also get the privileges. +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-10 00:00'::timestamp, '2018-12-20 00:00'::timestamp, '1h') AS time; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | +(5 rows) + +-- Revoke one of the privileges and show that it propagate to the +-- chunks. +REVOKE INSERT ON conditions FROM PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(5 rows) + +-- Add some more chunks and show that it inherits the grants from the +-- hypertable. +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-20 00:00'::timestamp, '2018-12-30 00:00'::timestamp, '1h') AS time; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_6_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_7_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(7 rows) + +-- Change grants of one chunk explicitly and check that it is possible +\z _timescaledb_internal._hyper_1_1_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +GRANT UPDATE ON _timescaledb_internal._hyper_1_1_chunk TO PUBLIC; +\z _timescaledb_internal._hyper_1_1_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =rw/super_user | | +(1 row) + +REVOKE SELECT ON _timescaledb_internal._hyper_1_1_chunk FROM PUBLIC; +\z _timescaledb_internal._hyper_1_1_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =w/super_user | | +(1 row) + +-- Check that revoking a permission first on the chunk and then on the +-- hypertable that was added through the hypertable (INSERT and +-- SELECT, in this case) still do not copy permissions from the +-- hypertable (so there should not be a select permission to public on +-- the chunk but there should be one on the hypertable). +GRANT INSERT ON conditions TO PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | +(1 row) + +\z _timescaledb_internal._hyper_1_2_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user+| | + | | | =ar/super_user | | +(1 row) + +REVOKE SELECT ON _timescaledb_internal._hyper_1_2_chunk FROM PUBLIC; +REVOKE INSERT ON conditions FROM PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal._hyper_1_2_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user | | +(1 row) + +-- Check that granting permissions through hypertable does not remove +-- separate grants on chunk. +GRANT UPDATE ON _timescaledb_internal._hyper_1_3_chunk TO PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal._hyper_1_3_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =rw/super_user | | +(1 row) + +GRANT INSERT ON conditions TO PUBLIC; +REVOKE INSERT ON conditions FROM PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal._hyper_1_3_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =rw/super_user | | +(1 row) + +-- Check that GRANT ALL IN SCHEMA adds privileges to the parent +-- and also goes to chunks in another schema +GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER_2; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+----------------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+----------------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user +| | + | | | =w/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user +| | + | | | =rw/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxt/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxt/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_6_chunk | table | super_user=arwdDxt/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_7_chunk | table | super_user=arwdDxt/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | +(7 rows) + +-- Check that REVOKE ALL IN SCHEMA removes privileges of the parent +-- and also goes to chunks in another schema +REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER_2; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| | + | | | =w/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| | + | | | =rw/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_6_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_7_chunk | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(7 rows) + +-- Create chunks in the same schema as the hypertable and check that +-- they also get the same privileges as the hypertable +CREATE TABLE measurements( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temperature FLOAT +); +-- Create a hypertable with chunks in the same schema +SELECT * FROM create_hypertable('public.measurements', 'time', chunk_time_interval => '5 days'::interval, associated_schema_name => 'public'); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 2 | public | measurements | t +(1 row) + +INSERT INTO measurements +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time; +-- GRANT ALL and check privileges +GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER_2; +\z measurements + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+--------------+-------+----------------------------------------+-------------------+---------- + public | measurements | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | +(1 row) + +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+----------------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | +(1 row) + +\z public.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------------+-------+----------------------------------------+-------------------+---------- + public | _hyper_2_10_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + public | _hyper_2_8_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | + public | _hyper_2_9_chunk | table | super_user=arwdDxt/super_user +| | + | | | default_perm_user_2=arwdDxt/super_user | | +(3 rows) + +-- REVOKE ALL and check privileges +REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER_2; +\z measurements + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+--------------+-------+-------------------------------+-------------------+---------- + public | measurements | table | super_user=arwdDxt/super_user | | +(1 row) + +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxt/super_user+| | + | | | =r/super_user | | +(1 row) + +\z public.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------------+-------+-------------------------------+-------------------+---------- + public | _hyper_2_10_chunk | table | super_user=arwdDxt/super_user | | + public | _hyper_2_8_chunk | table | super_user=arwdDxt/super_user | | + public | _hyper_2_9_chunk | table | super_user=arwdDxt/super_user | | +(3 rows) + +-- GRANT/REVOKE in an empty schema (Issue #4581) +CREATE SCHEMA test_grant; +GRANT ALL ON ALL TABLES IN SCHEMA test_grant TO :ROLE_DEFAULT_PERM_USER_2; +REVOKE ALL ON ALL TABLES IN SCHEMA test_grant FROM :ROLE_DEFAULT_PERM_USER_2; diff --git a/test/expected/grant_hypertable-17.out b/test/expected/grant_hypertable-17.out new file mode 100644 index 00000000000..5dc00dba7a9 --- /dev/null +++ b/test/expected/grant_hypertable-17.out @@ -0,0 +1,394 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE TABLE conditions( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temperature FLOAT +); +-- Create a hypertable and show that it does not have any privileges +SELECT * FROM create_hypertable('conditions', 'time', chunk_time_interval => '5 days'::interval); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 1 | public | conditions | t +(1 row) + +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-------------------+-------------------+---------- + public | conditions | table | | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | | | + _timescaledb_internal | _hyper_1_2_chunk | table | | | + _timescaledb_internal | _hyper_1_3_chunk | table | | | +(3 rows) + +-- Add privileges and show that they propagate to the chunks +GRANT SELECT, INSERT ON conditions TO PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+--------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxtm/super_user+| | + | | | =ar/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =ar/super_user | | +(3 rows) + +-- Create some more chunks and show that they also get the privileges. +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-10 00:00'::timestamp, '2018-12-20 00:00'::timestamp, '1h') AS time; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+--------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxtm/super_user+| | + | | | =ar/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =ar/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =ar/super_user | | +(5 rows) + +-- Revoke one of the privileges and show that it propagate to the +-- chunks. +REVOKE INSERT ON conditions FROM PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+--------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | +(5 rows) + +-- Add some more chunks and show that it inherits the grants from the +-- hypertable. +INSERT INTO conditions +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-20 00:00'::timestamp, '2018-12-30 00:00'::timestamp, '1h') AS time; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+--------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_6_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_7_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | +(7 rows) + +-- Change grants of one chunk explicitly and check that it is possible +\z _timescaledb_internal._hyper_1_1_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | +(1 row) + +GRANT UPDATE ON _timescaledb_internal._hyper_1_1_chunk TO PUBLIC; +\z _timescaledb_internal._hyper_1_1_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =rw/super_user | | +(1 row) + +REVOKE SELECT ON _timescaledb_internal._hyper_1_1_chunk FROM PUBLIC; +\z _timescaledb_internal._hyper_1_1_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =w/super_user | | +(1 row) + +-- Check that revoking a permission first on the chunk and then on the +-- hypertable that was added through the hypertable (INSERT and +-- SELECT, in this case) still do not copy permissions from the +-- hypertable (so there should not be a select permission to public on +-- the chunk but there should be one on the hypertable). +GRANT INSERT ON conditions TO PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+--------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxtm/super_user+| | + | | | =ar/super_user | | +(1 row) + +\z _timescaledb_internal._hyper_1_2_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =ar/super_user | | +(1 row) + +REVOKE SELECT ON _timescaledb_internal._hyper_1_2_chunk FROM PUBLIC; +REVOKE INSERT ON conditions FROM PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+--------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal._hyper_1_2_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxtm/super_user | | +(1 row) + +-- Check that granting permissions through hypertable does not remove +-- separate grants on chunk. +GRANT UPDATE ON _timescaledb_internal._hyper_1_3_chunk TO PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+--------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal._hyper_1_3_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =rw/super_user | | +(1 row) + +GRANT INSERT ON conditions TO PUBLIC; +REVOKE INSERT ON conditions FROM PUBLIC; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+--------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal._hyper_1_3_chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =rw/super_user | | +(1 row) + +-- Check that GRANT ALL IN SCHEMA adds privileges to the parent +-- and also goes to chunks in another schema +GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER_2; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxtm/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+-----------------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxtm/super_user +| | + | | | =w/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxtm/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxtm/super_user +| | + | | | =rw/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxtm/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxtm/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | + _timescaledb_internal | _hyper_1_6_chunk | table | super_user=arwdDxtm/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | + _timescaledb_internal | _hyper_1_7_chunk | table | super_user=arwdDxtm/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | +(7 rows) + +-- Check that REVOKE ALL IN SCHEMA removes privileges of the parent +-- and also goes to chunks in another schema +REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER_2; +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+--------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | +(1 row) + +\z _timescaledb_internal.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +-----------------------+------------------+-------+--------------------------------+-------------------+---------- + _timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =w/super_user | | + _timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxtm/super_user | | + _timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =rw/super_user | | + _timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_6_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | + _timescaledb_internal | _hyper_1_7_chunk | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | +(7 rows) + +-- Create chunks in the same schema as the hypertable and check that +-- they also get the same privileges as the hypertable +CREATE TABLE measurements( + time TIMESTAMPTZ NOT NULL, + device INTEGER, + temperature FLOAT +); +-- Create a hypertable with chunks in the same schema +SELECT * FROM create_hypertable('public.measurements', 'time', chunk_time_interval => '5 days'::interval, associated_schema_name => 'public'); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 2 | public | measurements | t +(1 row) + +INSERT INTO measurements +SELECT time, (random()*30)::int, random()*80 - 40 +FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time; +-- GRANT ALL and check privileges +GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER_2; +\z measurements + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+--------------+-------+-----------------------------------------+-------------------+---------- + public | measurements | table | super_user=arwdDxtm/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | +(1 row) + +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+-----------------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxtm/super_user +| | + | | | =r/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | +(1 row) + +\z public.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------------+-------+-----------------------------------------+-------------------+---------- + public | _hyper_2_10_chunk | table | super_user=arwdDxtm/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | + public | _hyper_2_8_chunk | table | super_user=arwdDxtm/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | + public | _hyper_2_9_chunk | table | super_user=arwdDxtm/super_user +| | + | | | default_perm_user_2=arwdDxtm/super_user | | +(3 rows) + +-- REVOKE ALL and check privileges +REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER_2; +\z measurements + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+--------------+-------+--------------------------------+-------------------+---------- + public | measurements | table | super_user=arwdDxtm/super_user | | +(1 row) + +\z conditions + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+------------+-------+--------------------------------+-------------------+---------- + public | conditions | table | super_user=arwdDxtm/super_user+| | + | | | =r/super_user | | +(1 row) + +\z public.*chunk + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------+-------------------+-------+--------------------------------+-------------------+---------- + public | _hyper_2_10_chunk | table | super_user=arwdDxtm/super_user | | + public | _hyper_2_8_chunk | table | super_user=arwdDxtm/super_user | | + public | _hyper_2_9_chunk | table | super_user=arwdDxtm/super_user | | +(3 rows) + +-- GRANT/REVOKE in an empty schema (Issue #4581) +CREATE SCHEMA test_grant; +GRANT ALL ON ALL TABLES IN SCHEMA test_grant TO :ROLE_DEFAULT_PERM_USER_2; +REVOKE ALL ON ALL TABLES IN SCHEMA test_grant FROM :ROLE_DEFAULT_PERM_USER_2; diff --git a/test/expected/insert-14.out b/test/expected/insert-14.out index b4daca98c34..5fb76cbee06 100644 --- a/test/expected/insert-14.out +++ b/test/expected/insert-14.out @@ -1,6 +1,7 @@ -- This file and its contents are licensed under the Apache License 2.0. -- Please see the included NOTICE for copyright information and -- LICENSE-APACHE for a copy of the license. +SET enable_seqscan TO off; \ir include/insert_two_partitions.sql -- This file and its contents are licensed under the Apache License 2.0. -- Please see the included NOTICE for copyright information and @@ -326,15 +327,15 @@ SELECT 1 \g | grep -v "Planning" | grep -v "Execution" -- INSERTs can exclude chunks based on constraints EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail; - QUERY PLAN -------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- Custom Scan (HypertableModify) -> Insert on chunk_assert_fail -> Custom Scan (ChunkDispatch) -> Append - -> Seq Scan on _hyper_5_11_chunk - -> Seq Scan on _hyper_5_12_chunk - -> Seq Scan on _hyper_5_13_chunk + -> Index Only Scan using _hyper_5_11_chunk_chunk_assert_fail_j_i_idx on _hyper_5_11_chunk + -> Index Only Scan using _hyper_5_12_chunk_chunk_assert_fail_j_i_idx on _hyper_5_12_chunk + -> Index Only Scan using _hyper_5_13_chunk_chunk_assert_fail_j_i_idx on _hyper_5_13_chunk (7 rows) EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i < 1; diff --git a/test/expected/insert-15.out b/test/expected/insert-15.out index b4daca98c34..5fb76cbee06 100644 --- a/test/expected/insert-15.out +++ b/test/expected/insert-15.out @@ -1,6 +1,7 @@ -- This file and its contents are licensed under the Apache License 2.0. -- Please see the included NOTICE for copyright information and -- LICENSE-APACHE for a copy of the license. +SET enable_seqscan TO off; \ir include/insert_two_partitions.sql -- This file and its contents are licensed under the Apache License 2.0. -- Please see the included NOTICE for copyright information and @@ -326,15 +327,15 @@ SELECT 1 \g | grep -v "Planning" | grep -v "Execution" -- INSERTs can exclude chunks based on constraints EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail; - QUERY PLAN -------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- Custom Scan (HypertableModify) -> Insert on chunk_assert_fail -> Custom Scan (ChunkDispatch) -> Append - -> Seq Scan on _hyper_5_11_chunk - -> Seq Scan on _hyper_5_12_chunk - -> Seq Scan on _hyper_5_13_chunk + -> Index Only Scan using _hyper_5_11_chunk_chunk_assert_fail_j_i_idx on _hyper_5_11_chunk + -> Index Only Scan using _hyper_5_12_chunk_chunk_assert_fail_j_i_idx on _hyper_5_12_chunk + -> Index Only Scan using _hyper_5_13_chunk_chunk_assert_fail_j_i_idx on _hyper_5_13_chunk (7 rows) EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i < 1; diff --git a/test/expected/insert-16.out b/test/expected/insert-16.out index 3d49a902e36..6f16fa765b2 100644 --- a/test/expected/insert-16.out +++ b/test/expected/insert-16.out @@ -1,6 +1,7 @@ -- This file and its contents are licensed under the Apache License 2.0. -- Please see the included NOTICE for copyright information and -- LICENSE-APACHE for a copy of the license. +SET enable_seqscan TO off; \ir include/insert_two_partitions.sql -- This file and its contents are licensed under the Apache License 2.0. -- Please see the included NOTICE for copyright information and @@ -326,15 +327,15 @@ SELECT 1 \g | grep -v "Planning" | grep -v "Execution" -- INSERTs can exclude chunks based on constraints EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail; - QUERY PLAN -------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- Custom Scan (HypertableModify) -> Insert on chunk_assert_fail -> Custom Scan (ChunkDispatch) -> Append - -> Seq Scan on _hyper_5_11_chunk - -> Seq Scan on _hyper_5_12_chunk - -> Seq Scan on _hyper_5_13_chunk + -> Index Only Scan using _hyper_5_11_chunk_chunk_assert_fail_j_i_idx on _hyper_5_11_chunk + -> Index Only Scan using _hyper_5_12_chunk_chunk_assert_fail_j_i_idx on _hyper_5_12_chunk + -> Index Only Scan using _hyper_5_13_chunk_chunk_assert_fail_j_i_idx on _hyper_5_13_chunk (7 rows) EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i < 1; diff --git a/test/expected/insert-17.out b/test/expected/insert-17.out index 3d49a902e36..b0827aeca78 100644 --- a/test/expected/insert-17.out +++ b/test/expected/insert-17.out @@ -1,6 +1,7 @@ -- This file and its contents are licensed under the Apache License 2.0. -- Please see the included NOTICE for copyright information and -- LICENSE-APACHE for a copy of the license. +SET enable_seqscan TO off; \ir include/insert_two_partitions.sql -- This file and its contents are licensed under the Apache License 2.0. -- Please see the included NOTICE for copyright information and @@ -326,15 +327,15 @@ SELECT 1 \g | grep -v "Planning" | grep -v "Execution" -- INSERTs can exclude chunks based on constraints EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail; - QUERY PLAN -------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- Custom Scan (HypertableModify) -> Insert on chunk_assert_fail -> Custom Scan (ChunkDispatch) -> Append - -> Seq Scan on _hyper_5_11_chunk - -> Seq Scan on _hyper_5_12_chunk - -> Seq Scan on _hyper_5_13_chunk + -> Index Only Scan using _hyper_5_11_chunk_chunk_assert_fail_j_i_idx on _hyper_5_11_chunk + -> Index Only Scan using _hyper_5_12_chunk_chunk_assert_fail_j_i_idx on _hyper_5_12_chunk + -> Index Only Scan using _hyper_5_13_chunk_chunk_assert_fail_j_i_idx on _hyper_5_13_chunk (7 rows) EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i < 1; @@ -616,16 +617,16 @@ WHERE NOT EXISTS ( WHERE sensor_id = 1 AND toe = '2020-05-09 10:34:35.296288+00' ); - QUERY PLAN ------------------------------------------------ + QUERY PLAN +-------------------------------------------------------------- Custom Scan (HypertableModify) - InitPlan 1 (returns $0) + InitPlan 1 -> Result One-Time Filter: false -> Insert on readings -> Custom Scan (ChunkDispatch) -> Result - One-Time Filter: (NOT $0) + One-Time Filter: (NOT (InitPlan 1).col1) (8 rows) INSERT INTO readings diff --git a/test/expected/null_exclusion.out b/test/expected/null_exclusion-14.out similarity index 100% rename from test/expected/null_exclusion.out rename to test/expected/null_exclusion-14.out diff --git a/test/expected/null_exclusion-15.out b/test/expected/null_exclusion-15.out new file mode 100644 index 00000000000..9c4242ab361 --- /dev/null +++ b/test/expected/null_exclusion-15.out @@ -0,0 +1,126 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +create table metrics(ts timestamp, id int, value float); +select create_hypertable('metrics', 'ts'); +WARNING: column type "timestamp without time zone" used for "ts" does not follow best practices +NOTICE: adding not-null constraint to column "ts" + create_hypertable +---------------------- + (1,public,metrics,t) +(1 row) + +insert into metrics values ('2022-02-02 02:02:02', 2, 2.), + ('2023-03-03 03:03:03', 3, 3.); +analyze metrics; +-- non-const condition +explain (analyze, costs off, summary off, timing off) +select * from metrics +where ts >= (select max(ts) from metrics); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Chunks excluded during runtime: 1 + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=1 loops=1) + Order: metrics_1.ts DESC + -> Index Only Scan using _hyper_1_2_chunk_metrics_ts_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=1 loops=1) + Index Cond: (ts IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_1_1_chunk_metrics_ts_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (never executed) + Index Cond: (ts IS NOT NULL) + Heap Fetches: 0 + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: (ts >= $1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=1 loops=1) + Filter: (ts >= $1) +(18 rows) + +-- two non-const conditions +explain (analyze, costs off, summary off, timing off) +select * from metrics +where ts >= (select max(ts) from metrics) + and id = 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=0 loops=1) + Chunks excluded during runtime: 1 + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=1 loops=1) + Order: metrics_1.ts DESC + -> Index Only Scan using _hyper_1_2_chunk_metrics_ts_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=1 loops=1) + Index Cond: (ts IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_1_1_chunk_metrics_ts_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (never executed) + Index Cond: (ts IS NOT NULL) + Heap Fetches: 0 + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: ((ts >= $1) AND (id = 1)) + -> Seq Scan on _hyper_1_2_chunk (actual rows=0 loops=1) + Filter: ((ts >= $1) AND (id = 1)) + Rows Removed by Filter: 1 +(19 rows) + +-- condition that becomes const null after evaluating the param +explain (analyze, costs off, summary off, timing off) +select * from metrics +where ts >= (select max(ts) from metrics where id = -1); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=0 loops=1) + Chunks excluded during runtime: 2 + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=0 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=0 loops=1) + Order: metrics_1.ts DESC + -> Index Scan using _hyper_1_2_chunk_metrics_ts_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=0 loops=1) + Index Cond: (ts IS NOT NULL) + Filter: (id = '-1'::integer) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_1_chunk_metrics_ts_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=0 loops=1) + Index Cond: (ts IS NOT NULL) + Filter: (id = '-1'::integer) + Rows Removed by Filter: 1 + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: (ts >= $1) + -> Seq Scan on _hyper_1_2_chunk (never executed) + Filter: (ts >= $1) +(20 rows) + +-- const null condition and some other condition +explain (analyze, costs off, summary off, timing off) +select * from metrics +where ts >= (select max(ts) from metrics where id = -1) + and id = 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=0 loops=1) + Chunks excluded during runtime: 2 + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=0 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=0 loops=1) + Order: metrics_1.ts DESC + -> Index Scan using _hyper_1_2_chunk_metrics_ts_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=0 loops=1) + Index Cond: (ts IS NOT NULL) + Filter: (id = '-1'::integer) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_1_chunk_metrics_ts_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=0 loops=1) + Index Cond: (ts IS NOT NULL) + Filter: (id = '-1'::integer) + Rows Removed by Filter: 1 + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: ((ts >= $1) AND (id = 1)) + -> Seq Scan on _hyper_1_2_chunk (never executed) + Filter: ((ts >= $1) AND (id = 1)) +(20 rows) + diff --git a/test/expected/null_exclusion-16.out b/test/expected/null_exclusion-16.out new file mode 100644 index 00000000000..9c4242ab361 --- /dev/null +++ b/test/expected/null_exclusion-16.out @@ -0,0 +1,126 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +create table metrics(ts timestamp, id int, value float); +select create_hypertable('metrics', 'ts'); +WARNING: column type "timestamp without time zone" used for "ts" does not follow best practices +NOTICE: adding not-null constraint to column "ts" + create_hypertable +---------------------- + (1,public,metrics,t) +(1 row) + +insert into metrics values ('2022-02-02 02:02:02', 2, 2.), + ('2023-03-03 03:03:03', 3, 3.); +analyze metrics; +-- non-const condition +explain (analyze, costs off, summary off, timing off) +select * from metrics +where ts >= (select max(ts) from metrics); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Chunks excluded during runtime: 1 + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=1 loops=1) + Order: metrics_1.ts DESC + -> Index Only Scan using _hyper_1_2_chunk_metrics_ts_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=1 loops=1) + Index Cond: (ts IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_1_1_chunk_metrics_ts_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (never executed) + Index Cond: (ts IS NOT NULL) + Heap Fetches: 0 + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: (ts >= $1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=1 loops=1) + Filter: (ts >= $1) +(18 rows) + +-- two non-const conditions +explain (analyze, costs off, summary off, timing off) +select * from metrics +where ts >= (select max(ts) from metrics) + and id = 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=0 loops=1) + Chunks excluded during runtime: 1 + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=1 loops=1) + Order: metrics_1.ts DESC + -> Index Only Scan using _hyper_1_2_chunk_metrics_ts_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=1 loops=1) + Index Cond: (ts IS NOT NULL) + Heap Fetches: 1 + -> Index Only Scan using _hyper_1_1_chunk_metrics_ts_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (never executed) + Index Cond: (ts IS NOT NULL) + Heap Fetches: 0 + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: ((ts >= $1) AND (id = 1)) + -> Seq Scan on _hyper_1_2_chunk (actual rows=0 loops=1) + Filter: ((ts >= $1) AND (id = 1)) + Rows Removed by Filter: 1 +(19 rows) + +-- condition that becomes const null after evaluating the param +explain (analyze, costs off, summary off, timing off) +select * from metrics +where ts >= (select max(ts) from metrics where id = -1); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=0 loops=1) + Chunks excluded during runtime: 2 + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=0 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=0 loops=1) + Order: metrics_1.ts DESC + -> Index Scan using _hyper_1_2_chunk_metrics_ts_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=0 loops=1) + Index Cond: (ts IS NOT NULL) + Filter: (id = '-1'::integer) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_1_chunk_metrics_ts_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=0 loops=1) + Index Cond: (ts IS NOT NULL) + Filter: (id = '-1'::integer) + Rows Removed by Filter: 1 + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: (ts >= $1) + -> Seq Scan on _hyper_1_2_chunk (never executed) + Filter: (ts >= $1) +(20 rows) + +-- const null condition and some other condition +explain (analyze, costs off, summary off, timing off) +select * from metrics +where ts >= (select max(ts) from metrics where id = -1) + and id = 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=0 loops=1) + Chunks excluded during runtime: 2 + InitPlan 2 (returns $1) + -> Result (actual rows=1 loops=1) + InitPlan 1 (returns $0) + -> Limit (actual rows=0 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=0 loops=1) + Order: metrics_1.ts DESC + -> Index Scan using _hyper_1_2_chunk_metrics_ts_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=0 loops=1) + Index Cond: (ts IS NOT NULL) + Filter: (id = '-1'::integer) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_1_chunk_metrics_ts_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=0 loops=1) + Index Cond: (ts IS NOT NULL) + Filter: (id = '-1'::integer) + Rows Removed by Filter: 1 + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: ((ts >= $1) AND (id = 1)) + -> Seq Scan on _hyper_1_2_chunk (never executed) + Filter: ((ts >= $1) AND (id = 1)) +(20 rows) + diff --git a/test/expected/null_exclusion-17.out b/test/expected/null_exclusion-17.out new file mode 100644 index 00000000000..f3ae7f9f32d --- /dev/null +++ b/test/expected/null_exclusion-17.out @@ -0,0 +1,118 @@ +-- This file and its contents are licensed under the Apache License 2.0. +-- Please see the included NOTICE for copyright information and +-- LICENSE-APACHE for a copy of the license. +create table metrics(ts timestamp, id int, value float); +select create_hypertable('metrics', 'ts'); +WARNING: column type "timestamp without time zone" used for "ts" does not follow best practices +NOTICE: adding not-null constraint to column "ts" + create_hypertable +---------------------- + (1,public,metrics,t) +(1 row) + +insert into metrics values ('2022-02-02 02:02:02', 2, 2.), + ('2023-03-03 03:03:03', 3, 3.); +analyze metrics; +-- non-const condition +explain (analyze, costs off, summary off, timing off) +select * from metrics +where ts >= (select max(ts) from metrics); + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=1 loops=1) + Chunks excluded during runtime: 1 + InitPlan 2 + -> Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=1 loops=1) + Order: metrics_1.ts DESC + -> Index Only Scan using _hyper_1_2_chunk_metrics_ts_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_1_1_chunk_metrics_ts_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (never executed) + Heap Fetches: 0 + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: (ts >= (InitPlan 2).col1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=1 loops=1) + Filter: (ts >= (InitPlan 2).col1) +(16 rows) + +-- two non-const conditions +explain (analyze, costs off, summary off, timing off) +select * from metrics +where ts >= (select max(ts) from metrics) + and id = 1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=0 loops=1) + Chunks excluded during runtime: 1 + InitPlan 2 + -> Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=1 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=1 loops=1) + Order: metrics_1.ts DESC + -> Index Only Scan using _hyper_1_2_chunk_metrics_ts_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_1_1_chunk_metrics_ts_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (never executed) + Heap Fetches: 0 + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: ((ts >= (InitPlan 2).col1) AND (id = 1)) + -> Seq Scan on _hyper_1_2_chunk (actual rows=0 loops=1) + Filter: ((ts >= (InitPlan 2).col1) AND (id = 1)) + Rows Removed by Filter: 1 +(17 rows) + +-- condition that becomes const null after evaluating the param +explain (analyze, costs off, summary off, timing off) +select * from metrics +where ts >= (select max(ts) from metrics where id = -1); + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=0 loops=1) + Chunks excluded during runtime: 2 + InitPlan 2 + -> Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=0 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=0 loops=1) + Order: metrics_1.ts DESC + -> Index Scan using _hyper_1_2_chunk_metrics_ts_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=0 loops=1) + Filter: (id = '-1'::integer) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_1_chunk_metrics_ts_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=0 loops=1) + Filter: (id = '-1'::integer) + Rows Removed by Filter: 1 + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: (ts >= (InitPlan 2).col1) + -> Seq Scan on _hyper_1_2_chunk (never executed) + Filter: (ts >= (InitPlan 2).col1) +(18 rows) + +-- const null condition and some other condition +explain (analyze, costs off, summary off, timing off) +select * from metrics +where ts >= (select max(ts) from metrics where id = -1) + and id = 1; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on metrics (actual rows=0 loops=1) + Chunks excluded during runtime: 2 + InitPlan 2 + -> Result (actual rows=1 loops=1) + InitPlan 1 + -> Limit (actual rows=0 loops=1) + -> Custom Scan (ChunkAppend) on metrics metrics_1 (actual rows=0 loops=1) + Order: metrics_1.ts DESC + -> Index Scan using _hyper_1_2_chunk_metrics_ts_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1 (actual rows=0 loops=1) + Filter: (id = '-1'::integer) + Rows Removed by Filter: 1 + -> Index Scan using _hyper_1_1_chunk_metrics_ts_idx on _hyper_1_1_chunk _hyper_1_1_chunk_1 (actual rows=0 loops=1) + Filter: (id = '-1'::integer) + Rows Removed by Filter: 1 + -> Seq Scan on _hyper_1_1_chunk (never executed) + Filter: ((ts >= (InitPlan 2).col1) AND (id = 1)) + -> Seq Scan on _hyper_1_2_chunk (never executed) + Filter: ((ts >= (InitPlan 2).col1) AND (id = 1)) +(18 rows) + diff --git a/test/expected/plan_expand_hypertable-17.out b/test/expected/plan_expand_hypertable-17.out index b7df1441569..a244501fb87 100644 --- a/test/expected/plan_expand_hypertable-17.out +++ b/test/expected/plan_expand_hypertable-17.out @@ -822,30 +822,30 @@ joins (7 rows) :PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.id=1) or (time < to_timestamp(10) and device_id = 'dev1') ORDER BY value; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Sort Sort Key: hyper_ts.value -> Custom Scan (ChunkAppend) on hyper_ts -> Seq Scan on _hyper_3_116_chunk - Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + Filter: ((ANY (tag_id = (hashed SubPlan 1).col1)) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) SubPlan 1 -> Seq Scan on tag Filter: (id = 1) -> Seq Scan on _hyper_3_117_chunk - Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + Filter: ((ANY (tag_id = (hashed SubPlan 1).col1)) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) -> Seq Scan on _hyper_3_118_chunk - Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + Filter: ((ANY (tag_id = (hashed SubPlan 1).col1)) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) -> Seq Scan on _hyper_3_119_chunk - Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + Filter: ((ANY (tag_id = (hashed SubPlan 1).col1)) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) -> Seq Scan on _hyper_3_120_chunk - Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + Filter: ((ANY (tag_id = (hashed SubPlan 1).col1)) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) -> Seq Scan on _hyper_3_121_chunk - Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + Filter: ((ANY (tag_id = (hashed SubPlan 1).col1)) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) -> Seq Scan on _hyper_3_122_chunk - Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + Filter: ((ANY (tag_id = (hashed SubPlan 1).col1)) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) -> Seq Scan on _hyper_3_123_chunk - Filter: ((hashed SubPlan 1) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) + Filter: ((ANY (tag_id = (hashed SubPlan 1).col1)) OR (("time" < 'Wed Dec 31 16:00:10 1969 PST'::timestamp with time zone) AND (device_id = 'dev1'::text))) (22 rows) :PREFIX SELECT * FROM hyper_ts WHERE tag_id IN (SELECT id FROM tag WHERE tag.name='tag1') and time < to_timestamp(10) and device_id = 'dev1' ORDER BY value; diff --git a/test/expected/rowsecurity-17.out b/test/expected/rowsecurity-17.out index e2ce06ccb2a..4587ee369b4 100644 --- a/test/expected/rowsecurity-17.out +++ b/test/expected/rowsecurity-17.out @@ -106,23 +106,23 @@ CREATE POLICY p2r ON document AS RESTRICTIVE TO regress_rls_dave CREATE POLICY p1r ON document AS RESTRICTIVE TO regress_rls_dave USING (cid <> 44); \dp - Access privileges - Schema | Name | Type | Access privileges | Column privileges | Policies ---------------------+----------+-------+---------------------------------------------+-------------------+-------------------------------------------- - regress_rls_schema | category | table | regress_rls_alice=arwdDxt/regress_rls_alice+| | - | | | =arwdDxt/regress_rls_alice | | - regress_rls_schema | document | table | regress_rls_alice=arwdDxt/regress_rls_alice+| | p1: + - | | | =arwdDxt/regress_rls_alice | | (u): (dlevel <= ( SELECT uaccount.seclv + - | | | | | FROM uaccount + - | | | | | WHERE (uaccount.pguser = CURRENT_USER)))+ - | | | | | p2r (RESTRICTIVE): + - | | | | | (u): ((cid <> 44) AND (cid < 50)) + - | | | | | to: regress_rls_dave + - | | | | | p1r (RESTRICTIVE): + - | | | | | (u): (cid <> 44) + - | | | | | to: regress_rls_dave - regress_rls_schema | uaccount | table | regress_rls_alice=arwdDxt/regress_rls_alice+| | - | | | =r/regress_rls_alice | | + Access privileges + Schema | Name | Type | Access privileges | Column privileges | Policies +--------------------+----------+-------+----------------------------------------------+-------------------+-------------------------------------------- + regress_rls_schema | category | table | regress_rls_alice=arwdDxtm/regress_rls_alice+| | + | | | =arwdDxtm/regress_rls_alice | | + regress_rls_schema | document | table | regress_rls_alice=arwdDxtm/regress_rls_alice+| | p1: + + | | | =arwdDxtm/regress_rls_alice | | (u): (dlevel <= ( SELECT uaccount.seclv + + | | | | | FROM uaccount + + | | | | | WHERE (uaccount.pguser = CURRENT_USER)))+ + | | | | | p2r (RESTRICTIVE): + + | | | | | (u): ((cid <> 44) AND (cid < 50)) + + | | | | | to: regress_rls_dave + + | | | | | p1r (RESTRICTIVE): + + | | | | | (u): (cid <> 44) + + | | | | | to: regress_rls_dave + regress_rls_schema | uaccount | table | regress_rls_alice=arwdDxtm/regress_rls_alice+| | + | | | =r/regress_rls_alice | | (3 rows) \d document @@ -263,53 +263,53 @@ SELECT * FROM document TABLESAMPLE BERNOULLI(50) REPEATABLE(0) (0 rows) EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle); - QUERY PLAN ------------------------------------------------------ + QUERY PLAN +-------------------------------------------------------------------- Custom Scan (ChunkAppend) on document Chunks excluded during startup: 0 - InitPlan 1 (returns $0) + InitPlan 1 -> Index Scan using uaccount_pkey on uaccount Index Cond: (pguser = CURRENT_USER) -> Seq Scan on document document_1 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_1_chunk document_2 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_2_chunk document_3 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_3_chunk document_4 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_4_chunk document_5 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_5_chunk document_6 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_6_chunk document_7 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) (19 rows) EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle); - QUERY PLAN ------------------------------------------------------------ + QUERY PLAN +-------------------------------------------------------------------------- Hash Join Hash Cond: (document.cid = category.cid) - InitPlan 1 (returns $0) + InitPlan 1 -> Index Scan using uaccount_pkey on uaccount Index Cond: (pguser = CURRENT_USER) -> Custom Scan (ChunkAppend) on document Chunks excluded during startup: 0 -> Seq Scan on document document_1 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_1_chunk document_2 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_2_chunk document_3 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_3_chunk document_4 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_4_chunk document_5 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_5_chunk document_6 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_6_chunk document_7 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Hash -> Seq Scan on category (23 rows) @@ -355,35 +355,35 @@ NOTICE: f_leak => awesome technology book (7 rows) EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle); - QUERY PLAN ----------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- Custom Scan (ChunkAppend) on document Chunks excluded during startup: 0 - InitPlan 1 (returns $0) + InitPlan 1 -> Index Scan using uaccount_pkey on uaccount Index Cond: (pguser = CURRENT_USER) -> Seq Scan on document document_1 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_1_chunk document_2 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_2_chunk document_3 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_3_chunk document_4 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_4_chunk document_5 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_5_chunk document_6 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_6_chunk document_7 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) (19 rows) EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle); - QUERY PLAN ----------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- Hash Join Hash Cond: (category.cid = document.cid) - InitPlan 1 (returns $0) + InitPlan 1 -> Index Scan using uaccount_pkey on uaccount Index Cond: (pguser = CURRENT_USER) -> Seq Scan on category @@ -391,19 +391,19 @@ EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dt -> Custom Scan (ChunkAppend) on document Chunks excluded during startup: 0 -> Seq Scan on document document_1 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_1_chunk document_2 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_2_chunk document_3 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_3_chunk document_4 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_4_chunk document_5 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_5_chunk document_6 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_1_6_chunk document_7 - Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid <> 44) AND (cid <> 44) AND (cid < 50) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) (23 rows) -- 44 would technically fail for both p2r and p1r, but we should get an error @@ -542,8 +542,8 @@ SELECT * FROM document d FULL OUTER JOIN category c on d.cid = c.cid ORDER BY d. (6 rows) DELETE FROM category WHERE cid = 33; -- fails with FK violation -ERROR: update or delete on table "category" violates foreign key constraint "4_7_document_cid_fkey" on table "_hyper_1_4_chunk" -DETAIL: Key is still referenced from table "_hyper_1_4_chunk". +ERROR: update or delete on table "category" violates foreign key constraint "document_cid_fkey" on table "document" +DETAIL: Key is still referenced from table "document". -- can insert FK referencing invisible PK SET SESSION AUTHORIZATION regress_rls_carol; SELECT * FROM document d FULL OUTER JOIN category c on d.cid = c.cid ORDER BY d.did, c.cid; @@ -1076,27 +1076,27 @@ NOTICE: f_leak => awesome science fiction (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM hyper_document WHERE f_leak(dtitle); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------- Custom Scan (ChunkAppend) on hyper_document Chunks excluded during startup: 0 - InitPlan 1 (returns $0) + InitPlan 1 -> Index Scan using uaccount_pkey on uaccount Index Cond: (pguser = CURRENT_USER) -> Seq Scan on hyper_document hyper_document_1 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_9_chunk hyper_document_2 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_10_chunk hyper_document_3 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_11_chunk hyper_document_4 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_12_chunk hyper_document_5 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_13_chunk hyper_document_6 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_14_chunk hyper_document_7 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) (19 rows) -- viewpoint from regress_rls_carol @@ -1127,27 +1127,27 @@ NOTICE: f_leak => awesome technology book (10 rows) EXPLAIN (COSTS OFF) SELECT * FROM hyper_document WHERE f_leak(dtitle); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------- Custom Scan (ChunkAppend) on hyper_document Chunks excluded during startup: 0 - InitPlan 1 (returns $0) + InitPlan 1 -> Index Scan using uaccount_pkey on uaccount Index Cond: (pguser = CURRENT_USER) -> Seq Scan on hyper_document hyper_document_1 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_9_chunk hyper_document_2 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_10_chunk hyper_document_3 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_11_chunk hyper_document_4 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_12_chunk hyper_document_5 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_13_chunk hyper_document_6 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_14_chunk hyper_document_7 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) (19 rows) -- viewpoint from regress_rls_dave @@ -1166,27 +1166,27 @@ NOTICE: f_leak => awesome science fiction (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM hyper_document WHERE f_leak(dtitle); - QUERY PLAN --------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------- Custom Scan (ChunkAppend) on hyper_document Chunks excluded during startup: 0 - InitPlan 1 (returns $0) + InitPlan 1 -> Index Scan using uaccount_pkey on uaccount Index Cond: (pguser = CURRENT_USER) -> Seq Scan on hyper_document hyper_document_1 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_9_chunk hyper_document_2 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_10_chunk hyper_document_3 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_11_chunk hyper_document_4 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_12_chunk hyper_document_5 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_13_chunk hyper_document_6 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_14_chunk hyper_document_7 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) (19 rows) -- pp1 ERROR @@ -1256,27 +1256,27 @@ NOTICE: f_leak => awesome science fiction (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM hyper_document WHERE f_leak(dtitle); - QUERY PLAN --------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------- Custom Scan (ChunkAppend) on hyper_document Chunks excluded during startup: 0 - InitPlan 1 (returns $0) + InitPlan 1 -> Index Scan using uaccount_pkey on uaccount Index Cond: (pguser = CURRENT_USER) -> Seq Scan on hyper_document hyper_document_1 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_9_chunk hyper_document_2 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_10_chunk hyper_document_3 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_11_chunk hyper_document_4 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_12_chunk hyper_document_5 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_13_chunk hyper_document_6 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_14_chunk hyper_document_7 - Filter: ((cid < 55) AND (dlevel <= $0) AND f_leak(dtitle)) + Filter: ((cid < 55) AND (dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) (19 rows) -- viewpoint from regress_rls_carol @@ -1309,27 +1309,27 @@ NOTICE: f_leak => awesome technology book (11 rows) EXPLAIN (COSTS OFF) SELECT * FROM hyper_document WHERE f_leak(dtitle); - QUERY PLAN ------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------- Custom Scan (ChunkAppend) on hyper_document Chunks excluded during startup: 0 - InitPlan 1 (returns $0) + InitPlan 1 -> Index Scan using uaccount_pkey on uaccount Index Cond: (pguser = CURRENT_USER) -> Seq Scan on hyper_document hyper_document_1 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_9_chunk hyper_document_2 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_10_chunk hyper_document_3 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_11_chunk hyper_document_4 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_12_chunk hyper_document_5 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_13_chunk hyper_document_6 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) -> Seq Scan on _hyper_2_14_chunk hyper_document_7 - Filter: ((dlevel <= $0) AND f_leak(dtitle)) + Filter: ((dlevel <= (InitPlan 1).col1) AND f_leak(dtitle)) (19 rows) -- only owner can change policies @@ -1632,7 +1632,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM only s1 WHERE f_leak(b); QUERY PLAN ----------------------------------------------------------------- Seq Scan on s1 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) SubPlan 1 -> Append -> Seq Scan on s2 s2_1 @@ -1671,7 +1671,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM s1 WHERE f_leak(b); Custom Scan (ChunkAppend) on s1 Chunks excluded during startup: 0 -> Seq Scan on s1 s1_1 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) SubPlan 1 -> Append -> Seq Scan on s2 s2_1 @@ -1691,27 +1691,27 @@ EXPLAIN (COSTS OFF) SELECT * FROM s1 WHERE f_leak(b); -> Seq Scan on _hyper_7_33_chunk s2_8 Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) -> Seq Scan on _hyper_6_16_chunk s1_2 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) -> Seq Scan on _hyper_6_17_chunk s1_3 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) -> Seq Scan on _hyper_6_18_chunk s1_4 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) -> Seq Scan on _hyper_6_19_chunk s1_5 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) -> Seq Scan on _hyper_6_20_chunk s1_6 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) -> Seq Scan on _hyper_6_21_chunk s1_7 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) -> Seq Scan on _hyper_6_22_chunk s1_8 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) -> Seq Scan on _hyper_6_23_chunk s1_9 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) -> Seq Scan on _hyper_6_24_chunk s1_10 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) -> Seq Scan on _hyper_6_25_chunk s1_11 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) -> Seq Scan on _hyper_6_26_chunk s1_12 - Filter: ((hashed SubPlan 1) AND f_leak(b)) + Filter: ((ANY (a = (hashed SubPlan 1).col1)) AND f_leak(b)) (44 rows) SELECT (SELECT x FROM s1 LIMIT 1) xx, * FROM s2 WHERE y like '%28%'; @@ -1748,7 +1748,7 @@ EXPLAIN (COSTS OFF) SELECT (SELECT x FROM s1 LIMIT 1) xx, * FROM s2 WHERE y like -> Result -> Custom Scan (ChunkAppend) on s1 -> Seq Scan on s1 s1_1 - Filter: (hashed SubPlan 1) + Filter: (ANY (a = (hashed SubPlan 1).col1)) SubPlan 1 -> Append -> Seq Scan on s2 s2_10 @@ -1768,27 +1768,27 @@ EXPLAIN (COSTS OFF) SELECT (SELECT x FROM s1 LIMIT 1) xx, * FROM s2 WHERE y like -> Seq Scan on _hyper_7_33_chunk s2_17 Filter: (((x % 2) = 0) AND (y ~~ '%af%'::text)) -> Seq Scan on _hyper_6_16_chunk s1_2 - Filter: (hashed SubPlan 1) + Filter: (ANY (a = (hashed SubPlan 1).col1)) -> Seq Scan on _hyper_6_17_chunk s1_3 - Filter: (hashed SubPlan 1) + Filter: (ANY (a = (hashed SubPlan 1).col1)) -> Seq Scan on _hyper_6_18_chunk s1_4 - Filter: (hashed SubPlan 1) + Filter: (ANY (a = (hashed SubPlan 1).col1)) -> Seq Scan on _hyper_6_19_chunk s1_5 - Filter: (hashed SubPlan 1) + Filter: (ANY (a = (hashed SubPlan 1).col1)) -> Seq Scan on _hyper_6_20_chunk s1_6 - Filter: (hashed SubPlan 1) + Filter: (ANY (a = (hashed SubPlan 1).col1)) -> Seq Scan on _hyper_6_21_chunk s1_7 - Filter: (hashed SubPlan 1) + Filter: (ANY (a = (hashed SubPlan 1).col1)) -> Seq Scan on _hyper_6_22_chunk s1_8 - Filter: (hashed SubPlan 1) + Filter: (ANY (a = (hashed SubPlan 1).col1)) -> Seq Scan on _hyper_6_23_chunk s1_9 - Filter: (hashed SubPlan 1) + Filter: (ANY (a = (hashed SubPlan 1).col1)) -> Seq Scan on _hyper_6_24_chunk s1_10 - Filter: (hashed SubPlan 1) + Filter: (ANY (a = (hashed SubPlan 1).col1)) -> Seq Scan on _hyper_6_25_chunk s1_11 - Filter: (hashed SubPlan 1) + Filter: (ANY (a = (hashed SubPlan 1).col1)) -> Seq Scan on _hyper_6_26_chunk s1_12 - Filter: (hashed SubPlan 1) + Filter: (ANY (a = (hashed SubPlan 1).col1)) (64 rows) SET SESSION AUTHORIZATION regress_rls_alice; @@ -4717,8 +4717,8 @@ ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; ALTER TABLE r2 FORCE ROW LEVEL SECURITY; -- Errors due to rows in r2 DELETE FROM r1; -ERROR: update or delete on table "r1" violates foreign key constraint "113_23_r2_a_fkey" on table "_hyper_26_113_chunk" -DETAIL: Key (a)=(10) is still referenced from table "_hyper_26_113_chunk". +ERROR: update or delete on table "r1" violates foreign key constraint "r2_a_fkey" on table "r2" +DETAIL: Key (a)=(10) is still referenced from table "r2". -- Reset r2 to no-RLS DROP POLICY p1 ON r2; ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; @@ -4796,7 +4796,7 @@ ALTER TABLE r2 FORCE ROW LEVEL SECURITY; UPDATE r1 SET a = a+5; ERROR: new row for relation "_hyper_28_117_chunk" violates check constraint "constraint_117" DETAIL: Failing row contains (15). -CONTEXT: SQL statement "UPDATE ONLY "_timescaledb_internal"."_hyper_28_117_chunk" SET "a" = $1 WHERE $2 OPERATOR(pg_catalog.=) "a"" +CONTEXT: SQL statement "UPDATE ONLY "regress_rls_schema"."r2" SET "a" = $1 WHERE $2 OPERATOR(pg_catalog.=) "a"" -- Remove FORCE from r2 ALTER TABLE r2 NO FORCE ROW LEVEL SECURITY; -- As owner, we now bypass RLS diff --git a/test/sql/.gitignore b/test/sql/.gitignore index 5930690944a..88f47e52c29 100644 --- a/test/sql/.gitignore +++ b/test/sql/.gitignore @@ -1,12 +1,16 @@ +/agg_bookends-*.sql /alternate_users-*.sql /append-*.sql /cluster-*.sql /cursor-*.sql /ddl-*.sql /delete-*.sql +/drop_owned-*.sql +/grant_hypertable-*.sql /histogram_test-*.sql /insert-*.sql /insert_many-*.sql +/null_exclusion-*.sql /parallel-*.sql /partitioning-*.sql /partitionwise-*.sql diff --git a/test/sql/CMakeLists.txt b/test/sql/CMakeLists.txt index bc2c85c9577..e93caaf3583 100644 --- a/test/sql/CMakeLists.txt +++ b/test/sql/CMakeLists.txt @@ -1,7 +1,6 @@ include(GenerateTestSchedule) set(TEST_FILES - agg_bookends.sql alter.sql alternate_users.sql baserel_cache.sql @@ -23,13 +22,11 @@ set(TEST_FILES debug_utils.sql drop_extension.sql drop_hypertable.sql - drop_owned.sql drop_rename_hypertable.sql drop_schema.sql dump_meta.sql extension_scripts.sql generated_as_identity.sql - grant_hypertable.sql hash.sql index.sql information_views.sql @@ -37,7 +34,6 @@ set(TEST_FILES insert_single.sql insert_returning.sql lateral.sql - null_exclusion.sql partition.sql partitioning.sql pg_join.sql @@ -63,10 +59,14 @@ set(TEST_FILES license.sql) set(TEST_TEMPLATES + agg_bookends.sql.in append.sql.in cluster.sql.in + drop_owned.sql.in + grant_hypertable.sql.in histogram_test.sql.in insert.sql.in + null_exclusion.sql.in plan_hashagg.sql.in rowsecurity.sql.in parallel.sql.in diff --git a/test/sql/agg_bookends.sql b/test/sql/agg_bookends.sql.in similarity index 100% rename from test/sql/agg_bookends.sql rename to test/sql/agg_bookends.sql.in diff --git a/test/sql/alter.sql b/test/sql/alter.sql index 604983cd21c..49fc52e96ae 100644 --- a/test/sql/alter.sql +++ b/test/sql/alter.sql @@ -24,7 +24,7 @@ SELECT * FROM alter_before; -- Show that deleted column is marked as dropped and that attnums are -- now different for the root table and the chunk -- PG17 made attstattarget NULLABLE and changed the default from -1 to NULL -SELECT c.relname, a.attname, a.attnum, a.attoptions, CASE WHEN a.attstattarget = -1 THEN NULL ELSE a.attstattarget END attstattarget, a.attstorage FROM pg_attribute a, pg_class c +SELECT c.relname, a.attname, a.attnum, a.attoptions, CASE WHEN a.attstattarget = -1 OR (a.attisdropped AND a.attstattarget = 0) THEN NULL ELSE a.attstattarget END attstattarget, a.attstorage FROM pg_attribute a, pg_class c WHERE a.attrelid = c.oid AND (c.relname LIKE '_hyper_1%_chunk' OR c.relname = 'alter_before') AND a.attnum > 0 @@ -75,7 +75,7 @@ ALTER TABLE _timescaledb_internal._hyper_2_4_chunk ALTER COLUMN temp SET STATIS ALTER TABLE _timescaledb_internal._hyper_2_4_chunk ALTER COLUMN notes SET STORAGE EXTERNAL; -- PG17 made attstattarget NULLABLE and changed the default from -1 to NULL -SELECT c.relname, a.attname, a.attnum, a.attoptions, CASE WHEN a.attstattarget = -1 THEN NULL ELSE a.attstattarget END attstattarget, a.attstorage FROM pg_attribute a, pg_class c +SELECT c.relname, a.attname, a.attnum, a.attoptions, CASE WHEN a.attstattarget = -1 OR (a.attisdropped AND a.attstattarget = 0) THEN NULL ELSE a.attstattarget END attstattarget, a.attstorage FROM pg_attribute a, pg_class c WHERE a.attrelid = c.oid AND (c.relname LIKE '_hyper_2%_chunk' OR c.relname = 'alter_after') AND a.attnum > 0 diff --git a/test/sql/drop_owned.sql b/test/sql/drop_owned.sql.in similarity index 100% rename from test/sql/drop_owned.sql rename to test/sql/drop_owned.sql.in diff --git a/test/sql/grant_hypertable.sql b/test/sql/grant_hypertable.sql.in similarity index 100% rename from test/sql/grant_hypertable.sql rename to test/sql/grant_hypertable.sql.in diff --git a/test/sql/insert.sql.in b/test/sql/insert.sql.in index e50b0cc3fab..b50bcb98520 100644 --- a/test/sql/insert.sql.in +++ b/test/sql/insert.sql.in @@ -2,6 +2,8 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-APACHE for a copy of the license. +SET enable_seqscan TO off; + \ir include/insert_two_partitions.sql SELECT * FROM test.show_columnsp('_timescaledb_internal.%_hyper%'); diff --git a/test/sql/null_exclusion.sql b/test/sql/null_exclusion.sql.in similarity index 100% rename from test/sql/null_exclusion.sql rename to test/sql/null_exclusion.sql.in diff --git a/test/temp_schedule.backup b/test/temp_schedule.backup new file mode 100644 index 00000000000..7c1d68a6343 --- /dev/null +++ b/test/temp_schedule.backup @@ -0,0 +1,27 @@ + +test: bgw_custom +test: bgw_scheduler_control +test: bgw_db_scheduler +test: bgw_job_stat_history_errors_permissions +test: bgw_job_stat_history_errors +test: bgw_job_stat_history +test: bgw_db_scheduler_fixed +test: bgw_reorder_drop_chunks +test: scheduler_fixed +test: compress_bgw_reorder_drop_chunks +test: compression_ddl +test: cagg_ddl-17 +test: cagg_dump +test: cagg_invalidation +test: move +test: reorder +test: telemetry_stats +test: agg_partials_pushdown bgw_job_ddl bgw_policy bgw_security bgw_telemetry cagg_bgw-17 cagg_bgw_drop_chunks cagg_deprecated_bucket_ng cagg_drop_chunks cagg_errors cagg_joins +test: cagg_migrate cagg_migrate_function-17 cagg_multi cagg_on_cagg cagg_on_cagg_joins cagg_permissions-17 cagg_policy cagg_policy_run cagg_query cagg_refresh cagg_repair-17 +test: cagg_tableam cagg_union_view-17 cagg_usage-17 cagg_utils cagg_watermark-17 chunk_api chunk_column_stats-17 chunk_merge chunk_utils_compression chunk_utils_internal compress_auto_sparse_index +test: compress_default compress_dml_copy compress_float8_corrupt compressed_collation compressed_detoaster compression compression_algos compression_bgw compression_conflicts compression_create_compressed_table compression_defaults +test: compression_errors-17 compression_fks compression_hypertable compression_indexscan compression_insert compression_merge compression_permissions-17 compression_policy compression_qualpushdown compression_segment_meta compression_settings +test: compression_sorted_merge-17 compression_sorted_merge_columns compression_sorted_merge_distinct compression_sorted_merge_filter compression_update_delete-17 continuous_aggs-17 decompress_index decompress_memory decompress_vector_qual exp_cagg_monthly exp_cagg_next_gen +test: exp_cagg_origin exp_cagg_timezone feature_flags fixed_schedules foreign_keys hypertable_generalization information_view_chunk_count insert_memory_usage jit license_tsl merge_append_partially_compressed-17 +test: merge_compress modify_exclusion-17 partialize_finalize plan_skip_scan-17 policy_generalization read_only recompress_chunk_segmentwise size_utils_tsl skip_scan transparent_decompression-17 transparent_decompression_join_index +test: transparent_decompression_ordered_index-17 transparent_decompression_queries tsl_tables vector_agg_default vector_agg_param vector_agg_segmentby vectorized_aggregation diff --git a/tsl/src/compression/compression_storage.c b/tsl/src/compression/compression_storage.c index cbaa1eb16d6..eb1d7741efc 100644 --- a/tsl/src/compression/compression_storage.c +++ b/tsl/src/compression/compression_storage.c @@ -174,22 +174,22 @@ set_statistics_on_compressed_chunk(Oid compressed_table_id) Relation table_rel = table_open(compressed_table_id, ShareUpdateExclusiveLock); Relation attrelation = table_open(AttributeRelationId, RowExclusiveLock); TupleDesc table_desc = RelationGetDescr(table_rel); -#if PG17_LT - /* see comments about PG17+ below */ Oid compressed_data_type = ts_custom_type_cache_get(CUSTOM_TYPE_COMPRESSED_DATA)->type_oid; -#endif for (int i = 0; i < table_desc->natts; i++) { Form_pg_attribute attrtuple; HeapTuple tuple; Form_pg_attribute col_attr = TupleDescAttr(table_desc, i); + Datum repl_val[Natts_pg_attribute] = { 0 }; + bool repl_null[Natts_pg_attribute] = { false }; + bool repl_repl[Natts_pg_attribute] = { false }; /* skip system columns */ if (col_attr->attnum <= 0) continue; - tuple = SearchSysCacheCopyAttName(compressed_table_id, NameStr(col_attr->attname)); + tuple = SearchSysCacheCopyAttName(RelationGetRelid(table_rel), NameStr(col_attr->attname)); if (!HeapTupleIsValid(tuple)) ereport(ERROR, @@ -200,22 +200,25 @@ set_statistics_on_compressed_chunk(Oid compressed_table_id) attrtuple = (Form_pg_attribute) GETSTRUCT(tuple); -#if PG17_LT /* The planner should never look at compressed column statistics because * it will not understand them. Statistics on the other columns, * segmentbys and metadata, are very important, so we increase their * target. - * - * There are no 'attstattarget' and 'attstattarget' fields in PG17+. */ if (col_attr->atttypid == compressed_data_type) - attrtuple->attstattarget = 0; + repl_val[AttrNumberGetAttrOffset(Anum_pg_attribute_attstattarget)] = Int16GetDatum(0); else - attrtuple->attstattarget = 1000; -#endif + repl_val[AttrNumberGetAttrOffset(Anum_pg_attribute_attstattarget)] = + Int16GetDatum(1000); + repl_repl[AttrNumberGetAttrOffset(Anum_pg_attribute_attstattarget)] = true; + + tuple = + heap_modify_tuple(tuple, RelationGetDescr(attrelation), repl_val, repl_null, repl_repl); CatalogTupleUpdate(attrelation, &tuple->t_self, tuple); - InvokeObjectPostAlterHook(RelationRelationId, compressed_table_id, attrtuple->attnum); + InvokeObjectPostAlterHook(RelationRelationId, + RelationGetRelid(table_rel), + attrtuple->attnum); heap_freetuple(tuple); } diff --git a/tsl/test/expected/cagg_ddl-17.out b/tsl/test/expected/cagg_ddl-17.out index 38de3c97af2..a3b81df0b94 100644 --- a/tsl/test/expected/cagg_ddl-17.out +++ b/tsl/test/expected/cagg_ddl-17.out @@ -31,6 +31,7 @@ CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLE CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH; CREATE SCHEMA rename_schema; GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; +CREATE SCHEMA test_schema AUTHORIZATION :ROLE_DEFAULT_PERM_USER; SET ROLE :ROLE_DEFAULT_PERM_USER; CREATE TABLE foo(time TIMESTAMPTZ NOT NULL, data INTEGER); SELECT create_hypertable('foo', 'time'); @@ -39,19 +40,29 @@ SELECT create_hypertable('foo', 'time'); (2,public,foo,t) (1 row) -CREATE MATERIALIZED VIEW rename_test +CREATE MATERIALIZED VIEW rename_test_old WITH ( timescaledb.continuous, timescaledb.materialized_only=true) AS SELECT time_bucket('1week', time), COUNT(data) FROM foo GROUP BY 1 WITH NO DATA; +SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name + FROM _timescaledb_catalog.continuous_agg; + user_view_schema | user_view_name | partial_view_schema | partial_view_name +------------------+-----------------+-----------------------+------------------- + public | rename_test_old | _timescaledb_internal | _partial_view_3 +(1 row) + +ALTER TABLE rename_test_old RENAME TO rename_test; +ALTER TABLE rename_test SET SCHEMA test_schema; SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name FROM _timescaledb_catalog.continuous_agg; user_view_schema | user_view_name | partial_view_schema | partial_view_name ------------------+----------------+-----------------------+------------------- - public | rename_test | _timescaledb_internal | _partial_view_3 + test_schema | rename_test | _timescaledb_internal | _partial_view_3 (1 row) -ALTER MATERIALIZED VIEW rename_test SET SCHEMA rename_schema; +ALTER MATERIALIZED VIEW test_schema.rename_test SET SCHEMA rename_schema; +DROP SCHEMA test_schema; SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name FROM _timescaledb_catalog.continuous_agg; user_view_schema | user_view_name | partial_view_schema | partial_view_name @@ -1793,7 +1804,7 @@ View definition: cashflow2 FROM _timescaledb_internal._materialized_hypertable_45; -SELECT * FROM cashflows; +SELECT * FROM cashflows ORDER BY cashflows; bucket | amount | cashflow | cashflow2 ------------------------------+--------+----------+----------- Sun Dec 31 16:00:00 2017 PST | 1 | 10 | 11 @@ -1811,36 +1822,29 @@ SELECT * FROM cashflows; -- 3. test named ts -- 4. test named bucket width -- named origin --- Currently not supported due to a bug in time_bucket (see comment in cagg_validate_query) -\set ON_ERROR_STOP 0 CREATE MATERIALIZED VIEW cagg_named_origin WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket('1h', time, 'UTC', origin => '2001-01-03 01:23:45') AS bucket, avg(amount) as avg_amount FROM transactions GROUP BY 1 WITH NO DATA; -ERROR: cannot create continuous aggregate with variable-width bucket using offset or origin. -- named timezone CREATE MATERIALIZED VIEW cagg_named_tz_origin WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket('1h', time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, avg(amount) as avg_amount FROM transactions GROUP BY 1 WITH NO DATA; -ERROR: cannot create continuous aggregate with variable-width bucket using offset or origin. -- named ts CREATE MATERIALIZED VIEW cagg_named_ts_tz_origin WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket('1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, avg(amount) as avg_amount FROM transactions GROUP BY 1 WITH NO DATA; -ERROR: cannot create continuous aggregate with variable-width bucket using offset or origin. -- named bucket width CREATE MATERIALIZED VIEW cagg_named_all WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS SELECT time_bucket(bucket_width => '1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket, avg(amount) as avg_amount FROM transactions GROUP BY 1 WITH NO DATA; -ERROR: cannot create continuous aggregate with variable-width bucket using offset or origin. -\set ON_ERROR_STOP 1 -- Refreshing from the beginning (NULL) of a CAGG with variable time bucket and -- using an INTERVAL for the end timestamp (issue #5534) CREATE MATERIALIZED VIEW transactions_montly @@ -1923,11 +1927,11 @@ CREATE TABLE conditions ( SELECT create_hypertable('conditions', 'time'); create_hypertable -------------------------- - (48,public,conditions,t) + (52,public,conditions,t) (1 row) INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55); -INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100); +INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'POR', 100); INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65); INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65); INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45); @@ -1956,10 +1960,10 @@ View definition: SELECT location, bucket, avg - FROM _timescaledb_internal._materialized_hypertable_49; + FROM _timescaledb_internal._materialized_hypertable_53; -- Should return NO ROWS -SELECT * FROM conditions_daily ORDER BY bucket, avg; +SELECT * FROM conditions_daily ORDER BY bucket, location; location | bucket | avg ----------+--------+----- (0 rows) @@ -1973,27 +1977,27 @@ ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=fals bucket | timestamp with time zone | | | | plain | avg | double precision | | | | plain | View definition: - SELECT _materialized_hypertable_49.location, - _materialized_hypertable_49.bucket, - _materialized_hypertable_49.avg - FROM _timescaledb_internal._materialized_hypertable_49 - WHERE _materialized_hypertable_49.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(49)), '-infinity'::timestamp with time zone) + SELECT _materialized_hypertable_53.location, + _materialized_hypertable_53.bucket, + _materialized_hypertable_53.avg + FROM _timescaledb_internal._materialized_hypertable_53 + WHERE _materialized_hypertable_53.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(53)), '-infinity'::timestamp with time zone) UNION ALL SELECT conditions.location, time_bucket('@ 1 day'::interval, conditions."time") AS bucket, avg(conditions.temperature) AS avg FROM conditions - WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(49)), '-infinity'::timestamp with time zone) + WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(53)), '-infinity'::timestamp with time zone) GROUP BY conditions.location, (time_bucket('@ 1 day'::interval, conditions."time")); -- Should return ROWS because now it is realtime -SELECT * FROM conditions_daily ORDER BY bucket, avg; +SELECT * FROM conditions_daily ORDER BY bucket, location; location | bucket | avg ----------+------------------------------+----- SFO | Sun Dec 31 16:00:00 2017 PST | 55 - SFO | Mon Jan 01 16:00:00 2018 PST | 65 NYC | Mon Jan 01 16:00:00 2018 PST | 65 - por | Mon Jan 01 16:00:00 2018 PST | 100 + POR | Mon Jan 01 16:00:00 2018 PST | 100 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 NYC | Wed Oct 31 17:00:00 2018 PDT | 65 NYC | Thu Nov 01 17:00:00 2018 PDT | 15 (6 rows) @@ -2011,16 +2015,16 @@ View definition: SELECT location, bucket, avg - FROM _timescaledb_internal._materialized_hypertable_49; + FROM _timescaledb_internal._materialized_hypertable_53; CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); -SELECT * FROM conditions_daily ORDER BY bucket, avg; +SELECT * FROM conditions_daily ORDER BY bucket, location; location | bucket | avg ----------+------------------------------+----- SFO | Sun Dec 31 16:00:00 2017 PST | 55 - SFO | Mon Jan 01 16:00:00 2018 PST | 65 NYC | Mon Jan 01 16:00:00 2018 PST | 65 - por | Mon Jan 01 16:00:00 2018 PST | 100 + POR | Mon Jan 01 16:00:00 2018 PST | 100 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 NYC | Wed Oct 31 17:00:00 2018 PDT | 65 NYC | Thu Nov 01 17:00:00 2018 PDT | 15 (6 rows) @@ -2077,13 +2081,13 @@ SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark (1 row) -- Should return ROWS because the watermark was reseted by the TRUNCATE -SELECT * FROM conditions_daily ORDER BY bucket, avg; +SELECT * FROM conditions_daily ORDER BY bucket, location; location | bucket | avg ----------+------------------------------+----- SFO | Sun Dec 31 16:00:00 2017 PST | 55 - SFO | Mon Jan 01 16:00:00 2018 PST | 65 NYC | Mon Jan 01 16:00:00 2018 PST | 65 - por | Mon Jan 01 16:00:00 2018 PST | 100 + POR | Mon Jan 01 16:00:00 2018 PST | 100 + SFO | Mon Jan 01 16:00:00 2018 PST | 65 NYC | Wed Oct 31 17:00:00 2018 PDT | 65 NYC | Thu Nov 01 17:00:00 2018 PDT | 15 (6 rows) @@ -2102,7 +2106,7 @@ NOTICE: refreshing continuous aggregate "cagg1" ALTER MATERIALIZED VIEW cagg1 SET (timescaledb.compress); NOTICE: defaulting compress_orderby to time_bucket WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes -NOTICE: default segment by for hypertable "_materialized_hypertable_52" is set to "" +NOTICE: default segment by for hypertable "_materialized_hypertable_56" is set to "" SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch; count ------- @@ -2110,7 +2114,7 @@ SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch; (1 row) DROP MATERIALIZED VIEW cagg1; -NOTICE: drop cascades to table _timescaledb_internal._hyper_52_70_chunk +NOTICE: drop cascades to table _timescaledb_internal._hyper_56_70_chunk SELECT * FROM _timescaledb_catalog.compression_settings; relid | segmentby | orderby | orderby_desc | orderby_nullsfirst -------+-----------+---------+--------------+-------------------- diff --git a/tsl/test/expected/cagg_migrate_function-17.out b/tsl/test/expected/cagg_migrate_function-17.out index 0fa824b7aef..a030ec05aeb 100644 --- a/tsl/test/expected/cagg_migrate_function-17.out +++ b/tsl/test/expected/cagg_migrate_function-17.out @@ -806,7 +806,7 @@ SELECT time_bucket('1 week', time, origin=>'2000-01-01 00:00:00+01'), sensor, av (6 rows) -- Even the CAgg now uses time_bucket, we should see buckets of the same alignment -SELECT * FROM cagg_temp_ng_1week; +SELECT * FROM cagg_temp_ng_1week ORDER BY time, sensor; time | sensor | avg ------------------------------+--------+----- Fri Dec 25 15:00:00 2009 PST | 1 | 101 @@ -861,7 +861,7 @@ SELECT timescaledb_experimental.time_bucket_ng('1 week', time, '2005-01-01 15:00 (4 rows) -- Even the CAgg now uses time_bucket, we should see buckets of the same alignment -SELECT * FROM cagg_temp_ng_1week_corgin; +SELECT * FROM cagg_temp_ng_1week_corgin ORDER BY time, sensor; time | sensor | avg ------------------------------+--------+----- Sat Dec 26 15:00:00 2009 PST | 1 | 101 @@ -944,7 +944,7 @@ SELECT time_bucket('1 week', time, origin=>'2000-01-01 00:00:00'), sensor, avg(v (4 rows) -- Even the CAgg now uses time_bucket, we should see buckets of the same alignment -SELECT * FROM cagg_temp_ng_1week_timestamp; +SELECT * FROM cagg_temp_ng_1week_timestamp ORDER BY time, sensor; time | sensor | avg --------------------------+--------+----- Sat Dec 26 00:00:00 2009 | 1 | 101 @@ -1027,7 +1027,7 @@ SELECT time_bucket('1 week', time, origin=>'2000-01-01 01:00:00+00'), sensor, av (4 rows) -- Even the CAgg now uses time_bucket, we should see buckets of the same alignment -SELECT * FROM cagg_temp_ng_1week_date; +SELECT * FROM cagg_temp_ng_1week_date ORDER BY time, sensor; time | sensor | avg ------------+--------+----- 12-26-2009 | 1 | 101 diff --git a/tsl/test/expected/cagg_permissions.out b/tsl/test/expected/cagg_permissions-14.out similarity index 100% rename from tsl/test/expected/cagg_permissions.out rename to tsl/test/expected/cagg_permissions-14.out diff --git a/tsl/test/expected/cagg_permissions-15.out b/tsl/test/expected/cagg_permissions-15.out new file mode 100644 index 00000000000..3b2f6f4bc1b --- /dev/null +++ b/tsl/test/expected/cagg_permissions-15.out @@ -0,0 +1,321 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- initialize the bgw mock state to prevent the materialization workers from running +\c :TEST_DBNAME :ROLE_SUPERUSER +-- remove any default jobs, e.g., telemetry so bgw_job isn't polluted +DELETE FROM _timescaledb_config.bgw_job WHERE TRUE; +CREATE VIEW cagg_info AS +WITH + caggs AS ( + SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view, + format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view, + format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view, + format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid + FROM _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.continuous_agg cagg + WHERE ht.id = cagg.mat_hypertable_id + ) +SELECT user_view, + (SELECT relacl FROM pg_class WHERE oid = user_view) AS user_view_perm, + relname AS mat_table, + (relacl) AS mat_table_perm, + direct_view, + (SELECT relacl FROM pg_class WHERE oid = direct_view) AS direct_view_perm, + partial_view, + (SELECT relacl FROM pg_class WHERE oid = partial_view) AS partial_view_perm + FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid; +GRANT SELECT ON cagg_info TO PUBLIC; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE conditions ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW mat_refresh_test +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +as +select location, max(humidity) +from conditions +group by time_bucket(100, timec), location WITH NO DATA; +-- Manually create index on CAgg +CREATE INDEX cagg_idx on mat_refresh_test(location); +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE USER not_priv; +\c :TEST_DBNAME not_priv +-- A user with no ownership on the Cagg cannot create index on it. -- This should fail +\set ON_ERROR_STOP 0 +CREATE INDEX cagg_idx on mat_refresh_test(humidity); +ERROR: must be owner of hypertable "_materialized_hypertable_2" +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP USER not_priv; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT add_continuous_aggregate_policy('mat_refresh_test', NULL, -200::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1000 +(1 row) + +insert into conditions +select generate_series(0, 50, 10), 'NYC', 55, 75, 40, 70, NULL; +CALL refresh_continuous_aggregate(' mat_refresh_test', NULL, NULL); +SELECT id as cagg_job_id FROM _timescaledb_config.bgw_job order by id desc limit 1 \gset +SELECT format('%I.%I', materialization_hypertable_schema, materialization_hypertable_name ) as materialization_hypertable +FROM timescaledb_information.continuous_aggregates +WHERE view_name = 'mat_refresh_test' \gset +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg WHERE user_view_name = 'mat_refresh_test' \gset +SELECT schema_name as mat_chunk_schema, table_name as mat_chunk_table +FROM _timescaledb_catalog.chunk +WHERE hypertable_id = :mat_hypertable_id +ORDER BY id desc +LIMIT 1 \gset +CREATE TABLE conditions_for_perm_check ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable('conditions_for_perm_check', 'timec', chunk_time_interval=> 100); + table_name +--------------------------- + conditions_for_perm_check +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test2() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions_for_perm_check $$; +SELECT set_integer_now_func('conditions_for_perm_check', 'integer_now_test2'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE TABLE conditions_for_perm_check_w_grant ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable('conditions_for_perm_check_w_grant', 'timec', chunk_time_interval=> 100); + table_name +----------------------------------- + conditions_for_perm_check_w_grant +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test3() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions_for_perm_check_w_grant $$; +SELECT set_integer_now_func('conditions_for_perm_check_w_grant', 'integer_now_test3'); + set_integer_now_func +---------------------- + +(1 row) + +GRANT SELECT, TRIGGER ON conditions_for_perm_check_w_grant TO public; +insert into conditions_for_perm_check_w_grant +select generate_series(0, 30, 10), 'POR', 55, 75, 40, 70, NULL; +--need both select and trigger permissions to create a materialized view on top of it. +GRANT SELECT, TRIGGER ON conditions_for_perm_check_w_grant TO public; +\c :TEST_DBNAME :ROLE_SUPERUSER +create schema custom_schema; +CREATE FUNCTION get_constant() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS +$BODY$ + SELECT 10; +$BODY$; +REVOKE EXECUTE ON FUNCTION get_constant() FROM PUBLIC; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +\set ON_ERROR_STOP 0 +select from alter_job(:cagg_job_id, max_runtime => NULL); +ERROR: insufficient permissions to alter job 1000 +--make sure that commands fail +ALTER MATERIALIZED VIEW mat_refresh_test SET(timescaledb.materialized_only = true); +ERROR: must be owner of continuous aggregate "mat_refresh_test" +DROP MATERIALIZED VIEW mat_refresh_test; +ERROR: must be owner of view mat_refresh_test +CALL refresh_continuous_aggregate('mat_refresh_test', NULL, NULL); +ERROR: must be owner of view mat_refresh_test +SELECT * FROM mat_refresh_test; +ERROR: permission denied for view mat_refresh_test +-- Test permissions also when the watermark is not constified and the ACL checks +-- in ts_continuous_agg_watermark are executed +SET timescaledb.enable_cagg_watermark_constify = OFF; +SELECT * FROM mat_refresh_test; +ERROR: permission denied for materialized view mat_refresh_test +RESET timescaledb.enable_cagg_watermark_constify; +SELECT * FROM :materialization_hypertable; +ERROR: permission denied for table _materialized_hypertable_2 +SELECT * FROM :"mat_chunk_schema".:"mat_chunk_table"; +ERROR: permission denied for table _hyper_2_2_chunk +--cannot create a mat view without select and trigger grants +CREATE MATERIALIZED VIEW mat_perm_view_test +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity) +from conditions_for_perm_check +group by time_bucket(100, timec), location WITH NO DATA; +ERROR: permission denied for table conditions_for_perm_check +--cannot create mat view in a schema without create privileges +CREATE MATERIALIZED VIEW custom_schema.mat_perm_view_test +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity) +from conditions_for_perm_check_w_grant +group by time_bucket(100, timec), location WITH NO DATA; +ERROR: permission denied for schema custom_schema +--cannot use a function without EXECUTE privileges +--you can create a VIEW but cannot refresh it +CREATE MATERIALIZED VIEW mat_perm_view_test +WITH ( timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity), get_constant() +from conditions_for_perm_check_w_grant +group by time_bucket(100, timec), location WITH NO DATA; +--this should fail +CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL); +ERROR: permission denied for function get_constant +DROP MATERIALIZED VIEW mat_perm_view_test; +--can create a mat view on something with select and trigger grants +CREATE MATERIALIZED VIEW mat_perm_view_test +WITH ( timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity) +from conditions_for_perm_check_w_grant +group by time_bucket(100, timec), location WITH NO DATA; +CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL); +SELECT * FROM mat_perm_view_test; + location | max +----------+----- + POR | 75 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +--revoke select permissions from role with mat view +REVOKE SELECT ON conditions_for_perm_check_w_grant FROM public; +insert into conditions_for_perm_check_w_grant +select generate_series(100, 130, 10), 'POR', 65, 85, 30, 90, NULL; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +--refresh mat view should now fail due to lack of permissions +CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL); +ERROR: permission denied for table conditions_for_perm_check_w_grant +--but the old data will still be there +SELECT * FROM mat_perm_view_test; + location | max +----------+----- + POR | 75 +(1 row) + +\set VERBOSITY default +-- Test that grants and revokes are propagated to the implementation +-- objects, that is, the user view, the partial view, the direct view, +-- and the materialization table. +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE devices ( + time TIMESTAMPTZ NOT NULL, + device INT, + temp DOUBLE PRECISION NULL, + PRIMARY KEY(time, device) +); +SELECT create_hypertable('devices', 'time'); + create_hypertable +---------------------- + (9,public,devices,t) +(1 row) + +GRANT SELECT, TRIGGER ON devices TO :ROLE_DEFAULT_PERM_USER_2; +INSERT INTO devices +SELECT time, (random() * 30)::int, random() * 80 +FROM generate_series('2020-02-01 00:00:00'::timestamptz, '2020-03-01 00:00:00', '1 hour') AS time; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +CREATE MATERIALIZED VIEW devices_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS SELECT time_bucket('1 day', time) AS bucket, device, MAX(temp) +FROM devices GROUP BY bucket, device WITH NO DATA; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'devices_summary'; +-[ RECORD 1 ]-----+--------------------------------------- +user_view | devices_summary +user_view_perm | +mat_table | _materialized_hypertable_10 +mat_table_perm | +direct_view | _timescaledb_internal._direct_view_10 +direct_view_perm | +partial_view | _timescaledb_internal._partial_view_10 +partial_view_perm | + +GRANT ALL ON devices_summary TO :ROLE_DEFAULT_PERM_USER; +SELECT * FROM cagg_info WHERE user_view::text = 'devices_summary'; +-[ RECORD 1 ]-----+------------------------------------------------------------------------------------------------ +user_view | devices_summary +user_view_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=arwdDxt/default_perm_user_2} +mat_table | _materialized_hypertable_10 +mat_table_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=arwdDxt/default_perm_user_2} +direct_view | _timescaledb_internal._direct_view_10 +direct_view_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=arwdDxt/default_perm_user_2} +partial_view | _timescaledb_internal._partial_view_10 +partial_view_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=arwdDxt/default_perm_user_2} + +REVOKE SELECT, UPDATE ON devices_summary FROM :ROLE_DEFAULT_PERM_USER; +SELECT * FROM cagg_info WHERE user_view::text = 'devices_summary'; +-[ RECORD 1 ]-----+---------------------------------------------------------------------------------------------- +user_view | devices_summary +user_view_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=adDxt/default_perm_user_2} +mat_table | _materialized_hypertable_10 +mat_table_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=adDxt/default_perm_user_2} +direct_view | _timescaledb_internal._direct_view_10 +direct_view_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=adDxt/default_perm_user_2} +partial_view | _timescaledb_internal._partial_view_10 +partial_view_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=adDxt/default_perm_user_2} + +\x off +-- Check for default privilege permissions get propagated to the materialization hypertable +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE SCHEMA test_default_privileges; +GRANT USAGE ON SCHEMA "test_default_privileges" TO :ROLE_DEFAULT_PERM_USER; +ALTER DEFAULT PRIVILEGES IN SCHEMA "test_default_privileges" GRANT SELECT ON TABLES TO :ROLE_DEFAULT_PERM_USER; +CREATE TABLE test_default_privileges.devices ( + time TIMESTAMPTZ NOT NULL, + device INT, + temp DOUBLE PRECISION NULL, + PRIMARY KEY(time, device) +); +SELECT create_hypertable('test_default_privileges.devices', 'time'); + create_hypertable +---------------------------------------- + (11,test_default_privileges,devices,t) +(1 row) + +CREATE MATERIALIZED VIEW test_default_privileges.devices_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket('1 day', time) AS bucket, device, MAX(temp) +FROM test_default_privileges.devices +GROUP BY bucket, device +WITH NO DATA; +-- check if user view perms have been propagated to the mat-ht +SELECT user_view_perm IS NOT DISTINCT FROM mat_table_perm +FROM cagg_info +WHERE user_view = 'test_default_privileges.devices_summary'::regclass; + ?column? +---------- + t +(1 row) + diff --git a/tsl/test/expected/cagg_permissions-16.out b/tsl/test/expected/cagg_permissions-16.out new file mode 100644 index 00000000000..3b2f6f4bc1b --- /dev/null +++ b/tsl/test/expected/cagg_permissions-16.out @@ -0,0 +1,321 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- initialize the bgw mock state to prevent the materialization workers from running +\c :TEST_DBNAME :ROLE_SUPERUSER +-- remove any default jobs, e.g., telemetry so bgw_job isn't polluted +DELETE FROM _timescaledb_config.bgw_job WHERE TRUE; +CREATE VIEW cagg_info AS +WITH + caggs AS ( + SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view, + format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view, + format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view, + format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid + FROM _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.continuous_agg cagg + WHERE ht.id = cagg.mat_hypertable_id + ) +SELECT user_view, + (SELECT relacl FROM pg_class WHERE oid = user_view) AS user_view_perm, + relname AS mat_table, + (relacl) AS mat_table_perm, + direct_view, + (SELECT relacl FROM pg_class WHERE oid = direct_view) AS direct_view_perm, + partial_view, + (SELECT relacl FROM pg_class WHERE oid = partial_view) AS partial_view_perm + FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid; +GRANT SELECT ON cagg_info TO PUBLIC; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE conditions ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW mat_refresh_test +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +as +select location, max(humidity) +from conditions +group by time_bucket(100, timec), location WITH NO DATA; +-- Manually create index on CAgg +CREATE INDEX cagg_idx on mat_refresh_test(location); +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE USER not_priv; +\c :TEST_DBNAME not_priv +-- A user with no ownership on the Cagg cannot create index on it. -- This should fail +\set ON_ERROR_STOP 0 +CREATE INDEX cagg_idx on mat_refresh_test(humidity); +ERROR: must be owner of hypertable "_materialized_hypertable_2" +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP USER not_priv; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT add_continuous_aggregate_policy('mat_refresh_test', NULL, -200::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1000 +(1 row) + +insert into conditions +select generate_series(0, 50, 10), 'NYC', 55, 75, 40, 70, NULL; +CALL refresh_continuous_aggregate(' mat_refresh_test', NULL, NULL); +SELECT id as cagg_job_id FROM _timescaledb_config.bgw_job order by id desc limit 1 \gset +SELECT format('%I.%I', materialization_hypertable_schema, materialization_hypertable_name ) as materialization_hypertable +FROM timescaledb_information.continuous_aggregates +WHERE view_name = 'mat_refresh_test' \gset +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg WHERE user_view_name = 'mat_refresh_test' \gset +SELECT schema_name as mat_chunk_schema, table_name as mat_chunk_table +FROM _timescaledb_catalog.chunk +WHERE hypertable_id = :mat_hypertable_id +ORDER BY id desc +LIMIT 1 \gset +CREATE TABLE conditions_for_perm_check ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable('conditions_for_perm_check', 'timec', chunk_time_interval=> 100); + table_name +--------------------------- + conditions_for_perm_check +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test2() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions_for_perm_check $$; +SELECT set_integer_now_func('conditions_for_perm_check', 'integer_now_test2'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE TABLE conditions_for_perm_check_w_grant ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable('conditions_for_perm_check_w_grant', 'timec', chunk_time_interval=> 100); + table_name +----------------------------------- + conditions_for_perm_check_w_grant +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test3() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions_for_perm_check_w_grant $$; +SELECT set_integer_now_func('conditions_for_perm_check_w_grant', 'integer_now_test3'); + set_integer_now_func +---------------------- + +(1 row) + +GRANT SELECT, TRIGGER ON conditions_for_perm_check_w_grant TO public; +insert into conditions_for_perm_check_w_grant +select generate_series(0, 30, 10), 'POR', 55, 75, 40, 70, NULL; +--need both select and trigger permissions to create a materialized view on top of it. +GRANT SELECT, TRIGGER ON conditions_for_perm_check_w_grant TO public; +\c :TEST_DBNAME :ROLE_SUPERUSER +create schema custom_schema; +CREATE FUNCTION get_constant() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS +$BODY$ + SELECT 10; +$BODY$; +REVOKE EXECUTE ON FUNCTION get_constant() FROM PUBLIC; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +\set ON_ERROR_STOP 0 +select from alter_job(:cagg_job_id, max_runtime => NULL); +ERROR: insufficient permissions to alter job 1000 +--make sure that commands fail +ALTER MATERIALIZED VIEW mat_refresh_test SET(timescaledb.materialized_only = true); +ERROR: must be owner of continuous aggregate "mat_refresh_test" +DROP MATERIALIZED VIEW mat_refresh_test; +ERROR: must be owner of view mat_refresh_test +CALL refresh_continuous_aggregate('mat_refresh_test', NULL, NULL); +ERROR: must be owner of view mat_refresh_test +SELECT * FROM mat_refresh_test; +ERROR: permission denied for view mat_refresh_test +-- Test permissions also when the watermark is not constified and the ACL checks +-- in ts_continuous_agg_watermark are executed +SET timescaledb.enable_cagg_watermark_constify = OFF; +SELECT * FROM mat_refresh_test; +ERROR: permission denied for materialized view mat_refresh_test +RESET timescaledb.enable_cagg_watermark_constify; +SELECT * FROM :materialization_hypertable; +ERROR: permission denied for table _materialized_hypertable_2 +SELECT * FROM :"mat_chunk_schema".:"mat_chunk_table"; +ERROR: permission denied for table _hyper_2_2_chunk +--cannot create a mat view without select and trigger grants +CREATE MATERIALIZED VIEW mat_perm_view_test +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity) +from conditions_for_perm_check +group by time_bucket(100, timec), location WITH NO DATA; +ERROR: permission denied for table conditions_for_perm_check +--cannot create mat view in a schema without create privileges +CREATE MATERIALIZED VIEW custom_schema.mat_perm_view_test +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity) +from conditions_for_perm_check_w_grant +group by time_bucket(100, timec), location WITH NO DATA; +ERROR: permission denied for schema custom_schema +--cannot use a function without EXECUTE privileges +--you can create a VIEW but cannot refresh it +CREATE MATERIALIZED VIEW mat_perm_view_test +WITH ( timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity), get_constant() +from conditions_for_perm_check_w_grant +group by time_bucket(100, timec), location WITH NO DATA; +--this should fail +CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL); +ERROR: permission denied for function get_constant +DROP MATERIALIZED VIEW mat_perm_view_test; +--can create a mat view on something with select and trigger grants +CREATE MATERIALIZED VIEW mat_perm_view_test +WITH ( timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity) +from conditions_for_perm_check_w_grant +group by time_bucket(100, timec), location WITH NO DATA; +CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL); +SELECT * FROM mat_perm_view_test; + location | max +----------+----- + POR | 75 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +--revoke select permissions from role with mat view +REVOKE SELECT ON conditions_for_perm_check_w_grant FROM public; +insert into conditions_for_perm_check_w_grant +select generate_series(100, 130, 10), 'POR', 65, 85, 30, 90, NULL; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +--refresh mat view should now fail due to lack of permissions +CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL); +ERROR: permission denied for table conditions_for_perm_check_w_grant +--but the old data will still be there +SELECT * FROM mat_perm_view_test; + location | max +----------+----- + POR | 75 +(1 row) + +\set VERBOSITY default +-- Test that grants and revokes are propagated to the implementation +-- objects, that is, the user view, the partial view, the direct view, +-- and the materialization table. +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE devices ( + time TIMESTAMPTZ NOT NULL, + device INT, + temp DOUBLE PRECISION NULL, + PRIMARY KEY(time, device) +); +SELECT create_hypertable('devices', 'time'); + create_hypertable +---------------------- + (9,public,devices,t) +(1 row) + +GRANT SELECT, TRIGGER ON devices TO :ROLE_DEFAULT_PERM_USER_2; +INSERT INTO devices +SELECT time, (random() * 30)::int, random() * 80 +FROM generate_series('2020-02-01 00:00:00'::timestamptz, '2020-03-01 00:00:00', '1 hour') AS time; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +CREATE MATERIALIZED VIEW devices_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS SELECT time_bucket('1 day', time) AS bucket, device, MAX(temp) +FROM devices GROUP BY bucket, device WITH NO DATA; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'devices_summary'; +-[ RECORD 1 ]-----+--------------------------------------- +user_view | devices_summary +user_view_perm | +mat_table | _materialized_hypertable_10 +mat_table_perm | +direct_view | _timescaledb_internal._direct_view_10 +direct_view_perm | +partial_view | _timescaledb_internal._partial_view_10 +partial_view_perm | + +GRANT ALL ON devices_summary TO :ROLE_DEFAULT_PERM_USER; +SELECT * FROM cagg_info WHERE user_view::text = 'devices_summary'; +-[ RECORD 1 ]-----+------------------------------------------------------------------------------------------------ +user_view | devices_summary +user_view_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=arwdDxt/default_perm_user_2} +mat_table | _materialized_hypertable_10 +mat_table_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=arwdDxt/default_perm_user_2} +direct_view | _timescaledb_internal._direct_view_10 +direct_view_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=arwdDxt/default_perm_user_2} +partial_view | _timescaledb_internal._partial_view_10 +partial_view_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=arwdDxt/default_perm_user_2} + +REVOKE SELECT, UPDATE ON devices_summary FROM :ROLE_DEFAULT_PERM_USER; +SELECT * FROM cagg_info WHERE user_view::text = 'devices_summary'; +-[ RECORD 1 ]-----+---------------------------------------------------------------------------------------------- +user_view | devices_summary +user_view_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=adDxt/default_perm_user_2} +mat_table | _materialized_hypertable_10 +mat_table_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=adDxt/default_perm_user_2} +direct_view | _timescaledb_internal._direct_view_10 +direct_view_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=adDxt/default_perm_user_2} +partial_view | _timescaledb_internal._partial_view_10 +partial_view_perm | {default_perm_user_2=arwdDxt/default_perm_user_2,default_perm_user=adDxt/default_perm_user_2} + +\x off +-- Check for default privilege permissions get propagated to the materialization hypertable +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE SCHEMA test_default_privileges; +GRANT USAGE ON SCHEMA "test_default_privileges" TO :ROLE_DEFAULT_PERM_USER; +ALTER DEFAULT PRIVILEGES IN SCHEMA "test_default_privileges" GRANT SELECT ON TABLES TO :ROLE_DEFAULT_PERM_USER; +CREATE TABLE test_default_privileges.devices ( + time TIMESTAMPTZ NOT NULL, + device INT, + temp DOUBLE PRECISION NULL, + PRIMARY KEY(time, device) +); +SELECT create_hypertable('test_default_privileges.devices', 'time'); + create_hypertable +---------------------------------------- + (11,test_default_privileges,devices,t) +(1 row) + +CREATE MATERIALIZED VIEW test_default_privileges.devices_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket('1 day', time) AS bucket, device, MAX(temp) +FROM test_default_privileges.devices +GROUP BY bucket, device +WITH NO DATA; +-- check if user view perms have been propagated to the mat-ht +SELECT user_view_perm IS NOT DISTINCT FROM mat_table_perm +FROM cagg_info +WHERE user_view = 'test_default_privileges.devices_summary'::regclass; + ?column? +---------- + t +(1 row) + diff --git a/tsl/test/expected/cagg_permissions-17.out b/tsl/test/expected/cagg_permissions-17.out new file mode 100644 index 00000000000..82b74755ed9 --- /dev/null +++ b/tsl/test/expected/cagg_permissions-17.out @@ -0,0 +1,321 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- initialize the bgw mock state to prevent the materialization workers from running +\c :TEST_DBNAME :ROLE_SUPERUSER +-- remove any default jobs, e.g., telemetry so bgw_job isn't polluted +DELETE FROM _timescaledb_config.bgw_job WHERE TRUE; +CREATE VIEW cagg_info AS +WITH + caggs AS ( + SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view, + format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view, + format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view, + format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid + FROM _timescaledb_catalog.hypertable ht, + _timescaledb_catalog.continuous_agg cagg + WHERE ht.id = cagg.mat_hypertable_id + ) +SELECT user_view, + (SELECT relacl FROM pg_class WHERE oid = user_view) AS user_view_perm, + relname AS mat_table, + (relacl) AS mat_table_perm, + direct_view, + (SELECT relacl FROM pg_class WHERE oid = direct_view) AS direct_view_perm, + partial_view, + (SELECT relacl FROM pg_class WHERE oid = partial_view) AS partial_view_perm + FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid; +GRANT SELECT ON cagg_info TO PUBLIC; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE conditions ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=> 100); + table_name +------------ + conditions +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test1() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions $$; +SELECT set_integer_now_func('conditions', 'integer_now_test1'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE MATERIALIZED VIEW mat_refresh_test +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +as +select location, max(humidity) +from conditions +group by time_bucket(100, timec), location WITH NO DATA; +-- Manually create index on CAgg +CREATE INDEX cagg_idx on mat_refresh_test(location); +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE USER not_priv; +\c :TEST_DBNAME not_priv +-- A user with no ownership on the Cagg cannot create index on it. -- This should fail +\set ON_ERROR_STOP 0 +CREATE INDEX cagg_idx on mat_refresh_test(humidity); +ERROR: must be owner of hypertable "_materialized_hypertable_2" +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP USER not_priv; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +SELECT add_continuous_aggregate_policy('mat_refresh_test', NULL, -200::integer, '12 h'::interval); + add_continuous_aggregate_policy +--------------------------------- + 1000 +(1 row) + +insert into conditions +select generate_series(0, 50, 10), 'NYC', 55, 75, 40, 70, NULL; +CALL refresh_continuous_aggregate(' mat_refresh_test', NULL, NULL); +SELECT id as cagg_job_id FROM _timescaledb_config.bgw_job order by id desc limit 1 \gset +SELECT format('%I.%I', materialization_hypertable_schema, materialization_hypertable_name ) as materialization_hypertable +FROM timescaledb_information.continuous_aggregates +WHERE view_name = 'mat_refresh_test' \gset +SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg WHERE user_view_name = 'mat_refresh_test' \gset +SELECT schema_name as mat_chunk_schema, table_name as mat_chunk_table +FROM _timescaledb_catalog.chunk +WHERE hypertable_id = :mat_hypertable_id +ORDER BY id desc +LIMIT 1 \gset +CREATE TABLE conditions_for_perm_check ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable('conditions_for_perm_check', 'timec', chunk_time_interval=> 100); + table_name +--------------------------- + conditions_for_perm_check +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test2() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions_for_perm_check $$; +SELECT set_integer_now_func('conditions_for_perm_check', 'integer_now_test2'); + set_integer_now_func +---------------------- + +(1 row) + +CREATE TABLE conditions_for_perm_check_w_grant ( + timec INT NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL, + lowp double precision NULL, + highp double precision null, + allnull double precision null + ); +select table_name from create_hypertable('conditions_for_perm_check_w_grant', 'timec', chunk_time_interval=> 100); + table_name +----------------------------------- + conditions_for_perm_check_w_grant +(1 row) + +CREATE OR REPLACE FUNCTION integer_now_test3() returns int LANGUAGE SQL STABLE as $$ SELECT coalesce(max(timec), 0) FROM conditions_for_perm_check_w_grant $$; +SELECT set_integer_now_func('conditions_for_perm_check_w_grant', 'integer_now_test3'); + set_integer_now_func +---------------------- + +(1 row) + +GRANT SELECT, TRIGGER ON conditions_for_perm_check_w_grant TO public; +insert into conditions_for_perm_check_w_grant +select generate_series(0, 30, 10), 'POR', 55, 75, 40, 70, NULL; +--need both select and trigger permissions to create a materialized view on top of it. +GRANT SELECT, TRIGGER ON conditions_for_perm_check_w_grant TO public; +\c :TEST_DBNAME :ROLE_SUPERUSER +create schema custom_schema; +CREATE FUNCTION get_constant() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS +$BODY$ + SELECT 10; +$BODY$; +REVOKE EXECUTE ON FUNCTION get_constant() FROM PUBLIC; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +\set ON_ERROR_STOP 0 +select from alter_job(:cagg_job_id, max_runtime => NULL); +ERROR: insufficient permissions to alter job 1000 +--make sure that commands fail +ALTER MATERIALIZED VIEW mat_refresh_test SET(timescaledb.materialized_only = true); +ERROR: must be owner of continuous aggregate "mat_refresh_test" +DROP MATERIALIZED VIEW mat_refresh_test; +ERROR: must be owner of view mat_refresh_test +CALL refresh_continuous_aggregate('mat_refresh_test', NULL, NULL); +ERROR: must be owner of view mat_refresh_test +SELECT * FROM mat_refresh_test; +ERROR: permission denied for view mat_refresh_test +-- Test permissions also when the watermark is not constified and the ACL checks +-- in ts_continuous_agg_watermark are executed +SET timescaledb.enable_cagg_watermark_constify = OFF; +SELECT * FROM mat_refresh_test; +ERROR: permission denied for materialized view mat_refresh_test +RESET timescaledb.enable_cagg_watermark_constify; +SELECT * FROM :materialization_hypertable; +ERROR: permission denied for table _materialized_hypertable_2 +SELECT * FROM :"mat_chunk_schema".:"mat_chunk_table"; +ERROR: permission denied for table _hyper_2_2_chunk +--cannot create a mat view without select and trigger grants +CREATE MATERIALIZED VIEW mat_perm_view_test +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity) +from conditions_for_perm_check +group by time_bucket(100, timec), location WITH NO DATA; +ERROR: permission denied for table conditions_for_perm_check +--cannot create mat view in a schema without create privileges +CREATE MATERIALIZED VIEW custom_schema.mat_perm_view_test +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity) +from conditions_for_perm_check_w_grant +group by time_bucket(100, timec), location WITH NO DATA; +ERROR: permission denied for schema custom_schema +--cannot use a function without EXECUTE privileges +--you can create a VIEW but cannot refresh it +CREATE MATERIALIZED VIEW mat_perm_view_test +WITH ( timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity), get_constant() +from conditions_for_perm_check_w_grant +group by time_bucket(100, timec), location WITH NO DATA; +--this should fail +CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL); +ERROR: permission denied for function get_constant +DROP MATERIALIZED VIEW mat_perm_view_test; +--can create a mat view on something with select and trigger grants +CREATE MATERIALIZED VIEW mat_perm_view_test +WITH ( timescaledb.continuous, timescaledb.materialized_only=true) +as +select location, max(humidity) +from conditions_for_perm_check_w_grant +group by time_bucket(100, timec), location WITH NO DATA; +CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL); +SELECT * FROM mat_perm_view_test; + location | max +----------+----- + POR | 75 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +--revoke select permissions from role with mat view +REVOKE SELECT ON conditions_for_perm_check_w_grant FROM public; +insert into conditions_for_perm_check_w_grant +select generate_series(100, 130, 10), 'POR', 65, 85, 30, 90, NULL; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +--refresh mat view should now fail due to lack of permissions +CALL refresh_continuous_aggregate('mat_perm_view_test', NULL, NULL); +ERROR: permission denied for table conditions_for_perm_check_w_grant +--but the old data will still be there +SELECT * FROM mat_perm_view_test; + location | max +----------+----- + POR | 75 +(1 row) + +\set VERBOSITY default +-- Test that grants and revokes are propagated to the implementation +-- objects, that is, the user view, the partial view, the direct view, +-- and the materialization table. +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +CREATE TABLE devices ( + time TIMESTAMPTZ NOT NULL, + device INT, + temp DOUBLE PRECISION NULL, + PRIMARY KEY(time, device) +); +SELECT create_hypertable('devices', 'time'); + create_hypertable +---------------------- + (9,public,devices,t) +(1 row) + +GRANT SELECT, TRIGGER ON devices TO :ROLE_DEFAULT_PERM_USER_2; +INSERT INTO devices +SELECT time, (random() * 30)::int, random() * 80 +FROM generate_series('2020-02-01 00:00:00'::timestamptz, '2020-03-01 00:00:00', '1 hour') AS time; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +CREATE MATERIALIZED VIEW devices_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=true) +AS SELECT time_bucket('1 day', time) AS bucket, device, MAX(temp) +FROM devices GROUP BY bucket, device WITH NO DATA; +\x on +SELECT * FROM cagg_info WHERE user_view::text = 'devices_summary'; +-[ RECORD 1 ]-----+--------------------------------------- +user_view | devices_summary +user_view_perm | +mat_table | _materialized_hypertable_10 +mat_table_perm | +direct_view | _timescaledb_internal._direct_view_10 +direct_view_perm | +partial_view | _timescaledb_internal._partial_view_10 +partial_view_perm | + +GRANT ALL ON devices_summary TO :ROLE_DEFAULT_PERM_USER; +SELECT * FROM cagg_info WHERE user_view::text = 'devices_summary'; +-[ RECORD 1 ]-----+-------------------------------------------------------------------------------------------------- +user_view | devices_summary +user_view_perm | {default_perm_user_2=arwdDxtm/default_perm_user_2,default_perm_user=arwdDxtm/default_perm_user_2} +mat_table | _materialized_hypertable_10 +mat_table_perm | {default_perm_user_2=arwdDxtm/default_perm_user_2,default_perm_user=arwdDxtm/default_perm_user_2} +direct_view | _timescaledb_internal._direct_view_10 +direct_view_perm | {default_perm_user_2=arwdDxtm/default_perm_user_2,default_perm_user=arwdDxtm/default_perm_user_2} +partial_view | _timescaledb_internal._partial_view_10 +partial_view_perm | {default_perm_user_2=arwdDxtm/default_perm_user_2,default_perm_user=arwdDxtm/default_perm_user_2} + +REVOKE SELECT, UPDATE ON devices_summary FROM :ROLE_DEFAULT_PERM_USER; +SELECT * FROM cagg_info WHERE user_view::text = 'devices_summary'; +-[ RECORD 1 ]-----+------------------------------------------------------------------------------------------------ +user_view | devices_summary +user_view_perm | {default_perm_user_2=arwdDxtm/default_perm_user_2,default_perm_user=adDxtm/default_perm_user_2} +mat_table | _materialized_hypertable_10 +mat_table_perm | {default_perm_user_2=arwdDxtm/default_perm_user_2,default_perm_user=adDxtm/default_perm_user_2} +direct_view | _timescaledb_internal._direct_view_10 +direct_view_perm | {default_perm_user_2=arwdDxtm/default_perm_user_2,default_perm_user=adDxtm/default_perm_user_2} +partial_view | _timescaledb_internal._partial_view_10 +partial_view_perm | {default_perm_user_2=arwdDxtm/default_perm_user_2,default_perm_user=adDxtm/default_perm_user_2} + +\x off +-- Check for default privilege permissions get propagated to the materialization hypertable +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE SCHEMA test_default_privileges; +GRANT USAGE ON SCHEMA "test_default_privileges" TO :ROLE_DEFAULT_PERM_USER; +ALTER DEFAULT PRIVILEGES IN SCHEMA "test_default_privileges" GRANT SELECT ON TABLES TO :ROLE_DEFAULT_PERM_USER; +CREATE TABLE test_default_privileges.devices ( + time TIMESTAMPTZ NOT NULL, + device INT, + temp DOUBLE PRECISION NULL, + PRIMARY KEY(time, device) +); +SELECT create_hypertable('test_default_privileges.devices', 'time'); + create_hypertable +---------------------------------------- + (11,test_default_privileges,devices,t) +(1 row) + +CREATE MATERIALIZED VIEW test_default_privileges.devices_summary +WITH (timescaledb.continuous, timescaledb.materialized_only=false) +AS +SELECT time_bucket('1 day', time) AS bucket, device, MAX(temp) +FROM test_default_privileges.devices +GROUP BY bucket, device +WITH NO DATA; +-- check if user view perms have been propagated to the mat-ht +SELECT user_view_perm IS NOT DISTINCT FROM mat_table_perm +FROM cagg_info +WHERE user_view = 'test_default_privileges.devices_summary'::regclass; + ?column? +---------- + t +(1 row) + diff --git a/tsl/test/expected/cagg_repair-17.out b/tsl/test/expected/cagg_repair-17.out index ffb4db7beb1..164a0fc8201 100644 --- a/tsl/test/expected/cagg_repair-17.out +++ b/tsl/test/expected/cagg_repair-17.out @@ -76,7 +76,7 @@ View definition: min, max, sum - FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2; + FROM _timescaledb_internal._materialized_hypertable_2; CALL refresh_continuous_aggregate('conditions_summary', NULL, '2021-06-22 00:00:00-00'); SELECT * FROM conditions_summary ORDER BY bucket, device_name; @@ -106,7 +106,7 @@ View definition: min, max, sum - FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2; + FROM _timescaledb_internal._materialized_hypertable_2; SELECT * FROM conditions_summary ORDER BY bucket, device_name; bucket | device_name | min | max | sum @@ -134,7 +134,7 @@ View definition: min, max, sum - FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2; + FROM _timescaledb_internal._materialized_hypertable_2; SELECT * FROM conditions_summary ORDER BY bucket, device_name; bucket | device_name | min | max | sum @@ -162,7 +162,7 @@ View definition: _materialized_hypertable_2.min, _materialized_hypertable_2.max, _materialized_hypertable_2.sum - FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2 + FROM _timescaledb_internal._materialized_hypertable_2 WHERE _materialized_hypertable_2.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) UNION ALL SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, @@ -205,7 +205,7 @@ View definition: _materialized_hypertable_2.min, _materialized_hypertable_2.max, _materialized_hypertable_2.sum - FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2 + FROM _timescaledb_internal._materialized_hypertable_2 WHERE _materialized_hypertable_2.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) UNION ALL SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, @@ -247,7 +247,7 @@ View definition: _materialized_hypertable_2.min, _materialized_hypertable_2.max, _materialized_hypertable_2.sum - FROM _timescaledb_internal._materialized_hypertable_2 _materialized_hypertable_2 + FROM _timescaledb_internal._materialized_hypertable_2 WHERE _materialized_hypertable_2.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(2)), '-infinity'::timestamp with time zone) UNION ALL SELECT time_bucket('@ 7 days'::interval, conditions."time") AS bucket, diff --git a/tsl/test/expected/cagg_usage-17.out b/tsl/test/expected/cagg_usage-17.out index 8f214e56f31..1ad0d6c7ef4 100644 --- a/tsl/test/expected/cagg_usage-17.out +++ b/tsl/test/expected/cagg_usage-17.out @@ -444,10 +444,12 @@ SELECT current_setting('timezone'); -- should be blocked because non-immutable expression \set ON_ERROR_STOP 0 -CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 day', time, current_setting('timezone')) FROM metrics GROUP BY 1; +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS +SELECT time_bucket('1 day', time, current_setting('timezone')) FROM metrics GROUP BY 1; ERROR: only immutable expressions allowed in time bucket function \set ON_ERROR_STOP 1 -CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 day', time, 'PST8PDT') FROM metrics GROUP BY 1; +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS +SELECT time_bucket('1 day', time, 'PST8PDT') FROM metrics GROUP BY 1; NOTICE: refreshing continuous aggregate "cagg1" SELECT * FROM cagg1; time_bucket @@ -458,7 +460,8 @@ SELECT * FROM cagg1; Tue Jan 25 00:00:00 2000 PST (4 rows) -CREATE MATERIALIZED VIEW cagg2 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT') FROM metrics GROUP BY 1; +CREATE MATERIALIZED VIEW cagg2 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS +SELECT time_bucket('1 month', time, 'PST8PDT') FROM metrics GROUP BY 1; NOTICE: refreshing continuous aggregate "cagg2" SELECT * FROM cagg2; time_bucket @@ -467,14 +470,28 @@ SELECT * FROM cagg2; Sat Jan 01 00:00:00 2000 PST (2 rows) --- custom origin - not supported due to variable size -\set ON_ERROR_STOP 0 -CREATE MATERIALIZED VIEW cagg3 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', '2000-01-01'::timestamptz) FROM metrics GROUP BY 1; -ERROR: cannot create continuous aggregate with variable-width bucket using offset or origin. --- offset - not supported due to variable size -CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1; -ERROR: cannot create continuous aggregate with variable-width bucket using offset or origin. -\set ON_ERROR_STOP 1 +-- custom origin with variable size +CREATE MATERIALIZED VIEW cagg3 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS +SELECT time_bucket('1 month', time, 'PST8PDT', '2000-01-01'::timestamptz) FROM metrics GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg3" +SELECT * FROM cagg3; + time_bucket +------------------------------ + Wed Dec 01 00:00:00 1999 PST + Sat Jan 01 00:00:00 2000 PST +(2 rows) + +-- offset with variable size +CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS +SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg4" +SELECT * FROM cagg4; + time_bucket +------------------------------ + Thu Dec 16 00:00:00 1999 PST + Sun Jan 16 00:00:00 2000 PST +(2 rows) + -- -- drop chunks tests -- @@ -494,12 +511,14 @@ ORDER BY 1; (4 rows) -- all caggs in the new format (finalized=true) -SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3', 'cagg4') ORDER BY 1; user_view_name | finalized ----------------+----------- cagg1 | t cagg2 | t -(2 rows) + cagg3 | t + cagg4 | t +(4 rows) -- dropping chunk should also remove the catalog data SELECT drop_chunks('metrics', older_than => '2000-01-01 00:00:00-02'::timestamptz); @@ -527,12 +546,14 @@ ORDER BY 1; UPDATE _timescaledb_catalog.continuous_agg SET finalized=FALSE WHERE user_view_name = 'cagg1'; \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER -- cagg1 now is a fake old format (finalized=false) -SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3', 'cagg4') ORDER BY 1; user_view_name | finalized ----------------+----------- cagg1 | f cagg2 | t -(2 rows) + cagg3 | t + cagg4 | t +(4 rows) -- cagg1 now is in the old format (finalized=false) -- dropping chunk should NOT remove the catalog data @@ -560,11 +581,13 @@ ORDER BY 1; DROP MATERIALIZED VIEW cagg1; NOTICE: drop cascades to table _timescaledb_internal._hyper_12_21_chunk -- no more old format caggs (finalized=false) -SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1; +SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3', 'cagg4') ORDER BY 1; user_view_name | finalized ----------------+----------- cagg2 | t -(1 row) + cagg3 | t + cagg4 | t +(3 rows) -- dropping chunk should remove the catalog data SELECT drop_chunks('metrics', older_than => '2000-01-25 00:00:00-02'::timestamptz); diff --git a/tsl/test/expected/chunk_column_stats-17.out b/tsl/test/expected/chunk_column_stats-17.out new file mode 100644 index 00000000000..a688e79a21a --- /dev/null +++ b/tsl/test/expected/chunk_column_stats-17.out @@ -0,0 +1,544 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set PREFIX 'EXPLAIN (costs off, timing off, summary off)' +CREATE OR REPLACE VIEW compressed_chunk_info_view AS +SELECT + h.schema_name AS hypertable_schema, + h.table_name AS hypertable_name, + c.schema_name || '.' || c.table_name as chunk_name, + c.status as chunk_status +FROM + _timescaledb_catalog.hypertable h JOIN + _timescaledb_catalog.chunk c ON h.id = c.hypertable_id + LEFT JOIN _timescaledb_catalog.chunk comp +ON comp.id = c.compressed_chunk_id +; +CREATE TABLE sample_table ( + time TIMESTAMP WITH TIME ZONE NOT NULL, + sensor_id INTEGER NOT NULL, + cpu double precision null, + temperature double precision null, + name varchar(100) default 'this is a default string value' +); +CREATE INDEX sense_idx ON sample_table (sensor_id); +SELECT * FROM create_hypertable('sample_table', 'time', + chunk_time_interval => INTERVAL '2 months'); +WARNING: column type "character varying" used for "name" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 1 | public | sample_table | t +(1 row) + +\set start_date '2022-01-28 01:09:53.583252+05:30' +-- insert into chunk1 +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 1, 21.98, 33.123, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 2, 17.66, 13.875, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 3, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 8, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 4, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 5, 0.988, 33.123, 'new row3'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 6, 4.6554, 47, 'new row3'); +\set start_date '2023-03-17 17:51:11.322998+05:30' +-- insert into new chunks +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 12, 121.98, 33.123, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 12, 117.66, 13.875, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 13, 121.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 9, 121.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 14, 121.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 15, 10.988, 33.123, 'new row3'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 16, 14.6554, 47, 'new row3'); +-- Non-int, date, timestamp cannot be specified as a min/max ranges for now +-- We could expand to FLOATs, NUMERICs later +\set ON_ERROR_STOP 0 +SELECT * FROM enable_chunk_skipping('sample_table', 'name'); +ERROR: data type "character varying" unsupported for range calculation +SELECT * FROM enable_chunk_skipping('sample_table', NULL); +ERROR: column name cannot be NULL +SELECT * FROM enable_chunk_skipping(NULL, 'name'); +ERROR: hypertable cannot be NULL +SELECT * FROM enable_chunk_skipping('sample_table1', 'name'); +ERROR: relation "sample_table1" does not exist at character 37 +CREATE TABLE plain_table(like sample_table); +SELECT * FROM enable_chunk_skipping('plain_table', 'sensor_id'); +ERROR: table "plain_table" is not a hypertable +DROP TABLE plain_table; +\set ON_ERROR_STOP 1 +-- Specify tracking of min/max ranges for a column +SELECT * FROM enable_chunk_skipping('sample_table', 'sensor_id'); + column_stats_id | enabled +-----------------+--------- + 1 | t +(1 row) + +-- The above should add an entry with MIN/MAX int64 entries for invalid chunk id +-- to indicate that ranges on this column should be calculated for chunks +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+----------------------+---------------------+------- + 1 | 1 | 0 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 2 | 1 | 1 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 3 | 1 | 2 | sensor_id | -9223372036854775808 | 9223372036854775807 | t +(3 rows) + +-- Skipping should work +SELECT * FROM enable_chunk_skipping('sample_table', 'sensor_id', true); +NOTICE: already enabled for column "sensor_id", skipping + column_stats_id | enabled +-----------------+--------- + 1 | t +(1 row) + +-- A query using a WHERE clause on "sensor_id" column will scan all the chunks +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9; + QUERY PLAN +----------------------------------------------------------------------- + Append + -> Index Scan using _hyper_1_1_chunk_sense_idx on _hyper_1_1_chunk + Index Cond: (sensor_id > 9) + -> Index Scan using _hyper_1_2_chunk_sense_idx on _hyper_1_2_chunk + Index Cond: (sensor_id > 9) +(5 rows) + +-- For the purposes of min/max range tracking, a compressed chunk is considered as a +-- completed chunk. +-- enable compression +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time' +); +WARNING: there was some uncertainty picking the default segment by for the hypertable: Please make sure sensor_id is not a unique column and appropriate for a segment by +NOTICE: default segment by for hypertable "sample_table" is set to "sensor_id" +-- +-- compress one chunk +SELECT show_chunks('sample_table') AS "CH_NAME" order by 1 limit 1 \gset +SELECT compress_chunk(:'CH_NAME'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT id AS "CHUNK_ID" from _timescaledb_catalog.chunk WHERE table_name = '_hyper_1_1_chunk' \gset +-- There should be an entry with min/max range computed for this chunk for this +-- "sensor_id" column. +SELECT * from _timescaledb_catalog.chunk_column_stats where chunk_id = :'CHUNK_ID'; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+-------------+-----------+------- + 2 | 1 | 1 | sensor_id | 1 | 9 | t +(1 row) + +-- check chunk compression status +SELECT chunk_status +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' AND chunk_name = :'CH_NAME'; + chunk_status +-------------- + 1 +(1 row) + +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9; + QUERY PLAN +----------------------------------------------------------------- + Index Scan using _hyper_1_2_chunk_sense_idx on _hyper_1_2_chunk + Index Cond: (sensor_id > 9) +(2 rows) + +-- do update, this will change the status of the chunk +UPDATE sample_table SET name = 'updated row' WHERE cpu = 21.98 AND temperature = 33.123; +-- check chunk compression status +SELECT chunk_status +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' AND chunk_name = :'CH_NAME'; + chunk_status +-------------- + 9 +(1 row) + +-- There should be an entry with "valid" set to false for this chunk +SELECT * from _timescaledb_catalog.chunk_column_stats WHERE chunk_id = :'CHUNK_ID'; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+-------------+-----------+------- + 2 | 1 | 1 | sensor_id | 1 | 9 | f +(1 row) + +-- A query using a WHERE clause on "sensor_id" column will go back to scanning all the chunks +-- along with an expensive DECOMPRESS on the first chunk +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Append + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + Filter: (sensor_id > 9) + -> Index Scan using compress_hyper_2_3_chunk_sensor_id__ts_meta_sequence_num_idx on compress_hyper_2_3_chunk + Index Cond: (sensor_id > 9) + -> Index Scan using _hyper_1_1_chunk_sense_idx on _hyper_1_1_chunk + Index Cond: (sensor_id > 9) + -> Index Scan using _hyper_1_2_chunk_sense_idx on _hyper_1_2_chunk + Index Cond: (sensor_id > 9) +(9 rows) + +-- Remove the index to check the sequential min/max calculation code as well +DROP INDEX sense_idx; +-- recompress the partial chunk +SELECT compress_chunk(:'CH_NAME'); +WARNING: no index on "sensor_id" found for column range on chunk "_hyper_1_1_chunk" + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +-- check chunk compression status +SELECT chunk_status +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' AND chunk_name = :'CH_NAME'; + chunk_status +-------------- + 1 +(1 row) + +-- The chunk entry should become "valid" again +SELECT * from _timescaledb_catalog.chunk_column_stats WHERE chunk_id = :'CHUNK_ID'; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+-------------+-----------+------- + 2 | 1 | 1 | sensor_id | 1 | 9 | t +(1 row) + +-- A query using a WHERE clause on "sensor_id" column will scan the proper chunk +-- due to chunk exclusion using min/max ranges calculated above +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9; + QUERY PLAN +------------------------------ + Seq Scan on _hyper_1_2_chunk + Filter: (sensor_id > 9) +(2 rows) + +:PREFIX SELECT * FROM sample_table WHERE sensor_id = 10; + QUERY PLAN +------------------------------ + Seq Scan on _hyper_1_2_chunk + Filter: (sensor_id = 10) +(2 rows) + +:PREFIX SELECT * FROM sample_table WHERE sensor_id < 11; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Append + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Index Scan using compress_hyper_2_3_chunk_sensor_id__ts_meta_sequence_num_idx on compress_hyper_2_3_chunk + Index Cond: (sensor_id < 11) + -> Seq Scan on _hyper_1_2_chunk + Filter: (sensor_id < 11) +(6 rows) + +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 9 and sensor_id < 20; + QUERY PLAN +-------------------------------------------------- + Seq Scan on _hyper_1_2_chunk + Filter: ((sensor_id > 9) AND (sensor_id < 20)) +(2 rows) + +:PREFIX SELECT * FROM sample_table WHERE sensor_id > 1 and sensor_id < 7; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Append + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Index Scan using compress_hyper_2_3_chunk_sensor_id__ts_meta_sequence_num_idx on compress_hyper_2_3_chunk + Index Cond: ((sensor_id > 1) AND (sensor_id < 7)) + -> Seq Scan on _hyper_1_2_chunk + Filter: ((sensor_id > 1) AND (sensor_id < 7)) +(6 rows) + +-- Executor startup time exclusion will also use these ranges appropriately +:PREFIX UPDATE sample_table set sensor_id = 10 WHERE sensor_id > length(substring(version(),1,9)); + QUERY PLAN +-------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Update on sample_table + Update on _hyper_1_1_chunk sample_table + Update on _hyper_1_2_chunk sample_table_1 + -> Result + -> Custom Scan (ChunkAppend) on sample_table + Chunks excluded during startup: 1 + -> Seq Scan on _hyper_1_2_chunk sample_table_1 + Filter: (sensor_id > length("substring"(version(), 1, 9))) +(9 rows) + +:PREFIX DELETE FROM sample_table WHERE sensor_id > length(substring(version(),1,10)); + QUERY PLAN +--------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on sample_table + Delete on _hyper_1_1_chunk sample_table + Delete on _hyper_1_2_chunk sample_table_1 + -> Custom Scan (ChunkAppend) on sample_table + Chunks excluded during startup: 1 + -> Seq Scan on _hyper_1_2_chunk sample_table_1 + Filter: (sensor_id > length("substring"(version(), 1, 10))) +(8 rows) + +:PREFIX SELECT * FROM sample_table WHERE sensor_id > length(substring(version(),1,11)); + QUERY PLAN +--------------------------------------------------------------------- + Custom Scan (ChunkAppend) on sample_table + Chunks excluded during startup: 1 + -> Seq Scan on _hyper_1_2_chunk + Filter: (sensor_id > length("substring"(version(), 1, 11))) +(4 rows) + +-- Scan both chunks +:PREFIX SELECT * FROM sample_table WHERE sensor_id > length(substring(version(),1,6)); + QUERY PLAN +-------------------------------------------------------------------- + Custom Scan (ChunkAppend) on sample_table + Chunks excluded during startup: 0 + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + Filter: (sensor_id > length("substring"(version(), 1, 6))) + -> Seq Scan on compress_hyper_2_3_chunk + -> Seq Scan on _hyper_1_2_chunk + Filter: (sensor_id > length("substring"(version(), 1, 6))) +(7 rows) + +-- IN/ANY is not supported for now +:PREFIX SELECT * FROM sample_table WHERE sensor_id IN (9, 10, 11); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Append + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Index Scan using compress_hyper_2_3_chunk_sensor_id__ts_meta_sequence_num_idx on compress_hyper_2_3_chunk + Index Cond: (sensor_id = ANY ('{9,10,11}'::integer[])) + -> Seq Scan on _hyper_1_2_chunk + Filter: (sensor_id = ANY ('{9,10,11}'::integer[])) +(6 rows) + +:PREFIX SELECT * FROM sample_table WHERE sensor_id = ANY(ARRAY[9, 10, 11]); + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------- + Append + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Index Scan using compress_hyper_2_3_chunk_sensor_id__ts_meta_sequence_num_idx on compress_hyper_2_3_chunk + Index Cond: (sensor_id = ANY ('{9,10,11}'::integer[])) + -> Seq Scan on _hyper_1_2_chunk + Filter: (sensor_id = ANY ('{9,10,11}'::integer[])) +(6 rows) + +-- Newly added chunks should also have MIN/MAX entry +\set start_date '2024-01-28 01:09:51.583252+05:30' +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 1, 9, 78.999, 'new row4'); +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+----------------------+---------------------+------- + 1 | 1 | 0 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 3 | 1 | 2 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 2 | 1 | 1 | sensor_id | 1 | 9 | t + 4 | 1 | 4 | sensor_id | -9223372036854775808 | 9223372036854775807 | t +(4 rows) + +-- Check that a RENAME COLUMN works ok +ALTER TABLE sample_table RENAME COLUMN sensor_id TO sense_id; +-- use the disable_chunk_skipping API to remove the min/max range entries +SELECT * from disable_chunk_skipping('sample_table', 'sense_id'); + hypertable_id | column_name | disabled +---------------+-------------+---------- + 1 | sense_id | t +(1 row) + +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+-------------+-----------+------- +(0 rows) + +SELECT * from disable_chunk_skipping('sample_table', 'sense_id', true); +NOTICE: statistics not enabled for column "sense_id", skipping + hypertable_id | column_name | disabled +---------------+-------------+---------- + 1 | sense_id | f +(1 row) + +ALTER TABLE sample_table RENAME COLUMN sense_id TO sensor_id; +\set ON_ERROR_STOP 0 +SELECT * from disable_chunk_skipping('sample_table', 'sensor_id'); +ERROR: statistics not enabled for column "sensor_id" +SELECT * from disable_chunk_skipping('sample_table', NULL); +ERROR: column name cannot be NULL +SELECT * from disable_chunk_skipping(NULL, 'sensor_id'); +ERROR: hypertable cannot be NULL +SELECT * from disable_chunk_skipping('sample_table1', 'sensor_id'); +ERROR: relation "sample_table1" does not exist at character 38 +-- should only work on columns that have been enabled for tracking +SELECT * from disable_chunk_skipping('sample_table', 'time'); +ERROR: statistics not enabled for column "time" +SELECT * from disable_chunk_skipping('sample_table', 'cpu'); +ERROR: statistics not enabled for column "cpu" +\set ON_ERROR_STOP 1 +SELECT * FROM enable_chunk_skipping('sample_table', 'sensor_id'); + column_stats_id | enabled +-----------------+--------- + 5 | t +(1 row) + +-- Chunk was already compressed before we enabled stats. It will +-- point to min/max entries till the ranges get refreshed later. +SELECT decompress_chunk(:'CH_NAME'); + decompress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+----------------------+---------------------+------- + 5 | 1 | 0 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 6 | 1 | 2 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 8 | 1 | 4 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 7 | 1 | 1 | sensor_id | -9223372036854775808 | 9223372036854775807 | t +(4 rows) + +-- Compressing a chunk again should calculate proper ranges +SELECT compress_chunk(:'CH_NAME'); +WARNING: no index on "sensor_id" found for column range on chunk "_hyper_1_1_chunk" + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+----------------------+---------------------+------- + 5 | 1 | 0 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 6 | 1 | 2 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 8 | 1 | 4 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 7 | 1 | 1 | sensor_id | 1 | 9 | t +(4 rows) + +SELECT decompress_chunk(:'CH_NAME'); + decompress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +-- Entry should be reset for this chunk now +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+----------------------+---------------------+------- + 5 | 1 | 0 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 6 | 1 | 2 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 8 | 1 | 4 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 7 | 1 | 1 | sensor_id | -9223372036854775808 | 9223372036854775807 | t +(4 rows) + +-- Check that truncate resets the entry in the catalog +SELECT compress_chunk(:'CH_NAME'); +WARNING: no index on "sensor_id" found for column range on chunk "_hyper_1_1_chunk" + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+----------------------+---------------------+------- + 5 | 1 | 0 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 6 | 1 | 2 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 8 | 1 | 4 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 7 | 1 | 1 | sensor_id | 1 | 9 | t +(4 rows) + +TRUNCATE :CH_NAME; +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+----------------------+---------------------+------- + 5 | 1 | 0 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 6 | 1 | 2 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 8 | 1 | 4 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 7 | 1 | 1 | sensor_id | -9223372036854775808 | 9223372036854775807 | t +(4 rows) + +-- Check that drop chunk also removes entries from the catalog +SELECT drop_chunks('sample_table', older_than => '2022-02-28'); + drop_chunks +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +-- Entry should be removed for this chunk now +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+----------------------+---------------------+------- + 5 | 1 | 0 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 6 | 1 | 2 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 8 | 1 | 4 | sensor_id | -9223372036854775808 | 9223372036854775807 | t +(3 rows) + +-- disable compression to allow dropping of the column +ALTER TABLE sample_table SET ( + timescaledb.compress = FALSE +); +-- Check that a DROP COLUMN removes entries from catalogs as well +ALTER TABLE sample_table DROP COLUMN sensor_id; +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+-------------+-----------+------- +(0 rows) + +DROP TABLE sample_table; +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+-------------+-----------+------- +(0 rows) + +-- Check that empty hypertables can have enable_chunk_skipping and +-- that new chunks get entries in the catalog as they get added +CREATE TABLE sample_table1 ( + time TIMESTAMP WITH TIME ZONE NOT NULL, + sensor_id INTEGER NOT NULL, + cpu double precision null, + temperature double precision null, + name varchar(100) default 'this is a default string value' +); +CREATE INDEX sense_idx ON sample_table1 (sensor_id); +SELECT * FROM create_hypertable('sample_table1', 'time', + chunk_time_interval => INTERVAL '2 months'); +WARNING: column type "character varying" used for "name" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+---------------+--------- + 3 | public | sample_table1 | t +(1 row) + +SELECT * FROM enable_chunk_skipping('sample_table1', 'sensor_id'); + column_stats_id | enabled +-----------------+--------- + 9 | t +(1 row) + +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+----------------------+---------------------+------- + 9 | 3 | 0 | sensor_id | -9223372036854775808 | 9223372036854775807 | t +(1 row) + +\set start_date '2023-03-17 17:51:11.322998+05:30' +-- insert into new chunks +INSERT INTO sample_table1 VALUES (:'start_date'::timestamptz, 12, 21, 33.123, 'new row1'); +SELECT * from _timescaledb_catalog.chunk_column_stats; + id | hypertable_id | chunk_id | column_name | range_start | range_end | valid +----+---------------+----------+-------------+----------------------+---------------------+------- + 9 | 3 | 0 | sensor_id | -9223372036854775808 | 9223372036854775807 | t + 10 | 3 | 7 | sensor_id | -9223372036854775808 | 9223372036854775807 | t +(2 rows) + +-- Check that ALTER TYPE for a column on which stats are enabled only works +-- in a few sub types +ALTER TABLE sample_table1 ALTER COLUMN sensor_id TYPE BIGINT; +\set ON_ERROR_STOP 0 +ALTER TABLE sample_table1 ALTER COLUMN sensor_id TYPE TIMESTAMPTZ; +ERROR: column "sensor_id" cannot be cast automatically to type timestamp with time zone +ALTER TABLE sample_table1 ALTER COLUMN sensor_id TYPE TEXT; +ERROR: data type "text" unsupported for statistics calculation +\set ON_ERROR_STOP 1 +SELECT * FROM disable_chunk_skipping('sample_table1', 'sensor_id'); + hypertable_id | column_name | disabled +---------------+-------------+---------- + 3 | sensor_id | t +(1 row) + +ALTER TABLE sample_table1 ALTER COLUMN sensor_id TYPE TEXT; diff --git a/tsl/test/expected/chunk_utils_internal.out b/tsl/test/expected/chunk_utils_internal.out index ee7024e1fc9..bcaead5d1eb 100644 --- a/tsl/test/expected/chunk_utils_internal.out +++ b/tsl/test/expected/chunk_utils_internal.out @@ -8,6 +8,7 @@ -- * drop_chunk -- * attach_foreign_table_chunk -- * hypertable_osm_range_update +\set EXPLAIN 'EXPLAIN (COSTS OFF)' CREATE OR REPLACE VIEW chunk_view AS SELECT ht.table_name AS hypertable_name, @@ -641,13 +642,13 @@ SELECT * from ht_try WHERE timec > '2020-01-01 01:00' ORDER BY 1; -- test ordered append BEGIN; -- before updating the ranges -EXPLAIN SELECT * FROM ht_try ORDER BY 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------- - Custom Scan (ChunkAppend) on ht_try (cost=100.00..202.29 rows=3276 width=24) +:EXPLAIN SELECT * FROM ht_try ORDER BY 1; + QUERY PLAN +----------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on ht_try Order: ht_try.timec - -> Foreign Scan on child_fdw_table (cost=100.00..166.59 rows=1706 width=24) - -> Index Scan Backward using _hyper_5_10_chunk_ht_try_timec_idx on _hyper_5_10_chunk (cost=0.15..35.70 rows=1570 width=24) + -> Foreign Scan on child_fdw_table + -> Index Scan Backward using _hyper_5_10_chunk_ht_try_timec_idx on _hyper_5_10_chunk (4 rows) -- range before update @@ -681,13 +682,13 @@ WHERE c.table_name = 'child_fdw_table' AND cc.chunk_id = c.id AND ds.id = cc.dim (1 row) -- should be ordered append now -EXPLAIN SELECT * FROM ht_try ORDER BY 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------- - Custom Scan (ChunkAppend) on ht_try (cost=100.00..202.29 rows=3276 width=24) +:EXPLAIN SELECT * FROM ht_try ORDER BY 1; + QUERY PLAN +----------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on ht_try Order: ht_try.timec - -> Foreign Scan on child_fdw_table (cost=100.00..166.59 rows=1706 width=24) - -> Index Scan Backward using _hyper_5_10_chunk_ht_try_timec_idx on _hyper_5_10_chunk (cost=0.15..35.70 rows=1570 width=24) + -> Foreign Scan on child_fdw_table + -> Index Scan Backward using _hyper_5_10_chunk_ht_try_timec_idx on _hyper_5_10_chunk (4 rows) SELECT * FROM ht_try ORDER BY 1; @@ -704,13 +705,13 @@ SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try'); f (1 row) -EXPLAIN SELECT * from ht_try ORDER BY 1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------- - Merge Append (cost=100.16..235.06 rows=3276 width=24) +:EXPLAIN SELECT * from ht_try ORDER BY 1; + QUERY PLAN +----------------------------------------------------------------------------------------- + Merge Append Sort Key: _hyper_5_10_chunk.timec - -> Index Scan Backward using _hyper_5_10_chunk_ht_try_timec_idx on _hyper_5_10_chunk (cost=0.15..35.70 rows=1570 width=24) - -> Foreign Scan on child_fdw_table (cost=100.00..166.59 rows=1706 width=24) + -> Index Scan Backward using _hyper_5_10_chunk_ht_try_timec_idx on _hyper_5_10_chunk + -> Foreign Scan on child_fdw_table (4 rows) SELECT * from ht_try ORDER BY 1; @@ -738,20 +739,20 @@ SELECT * FROM hypertable_approximate_size('ht_try'); --TEST GUC variable to enable/disable OSM chunk SET timescaledb.enable_tiered_reads=false; -EXPLAIN (COSTS OFF) SELECT * from ht_try; +:EXPLAIN SELECT * from ht_try; QUERY PLAN ------------------------------- Seq Scan on _hyper_5_10_chunk (1 row) -EXPLAIN (COSTS OFF) SELECT * from ht_try WHERE timec > '2022-01-01 01:00'; +:EXPLAIN SELECT * from ht_try WHERE timec > '2022-01-01 01:00'; QUERY PLAN ---------------------------------------------------------------------------------- Index Scan using _hyper_5_10_chunk_ht_try_timec_idx on _hyper_5_10_chunk Index Cond: (timec > 'Sat Jan 01 01:00:00 2022 PST'::timestamp with time zone) (2 rows) -EXPLAIN (COSTS OFF) SELECT * from ht_try WHERE timec < '2023-01-01 01:00'; +:EXPLAIN SELECT * from ht_try WHERE timec < '2023-01-01 01:00'; QUERY PLAN ---------------------------------------------------------------------------------- Index Scan using _hyper_5_10_chunk_ht_try_timec_idx on _hyper_5_10_chunk @@ -759,7 +760,7 @@ EXPLAIN (COSTS OFF) SELECT * from ht_try WHERE timec < '2023-01-01 01:00'; (2 rows) SET timescaledb.enable_tiered_reads=true; -EXPLAIN (COSTS OFF) SELECT * from ht_try; +:EXPLAIN SELECT * from ht_try; QUERY PLAN --------------------------------------- Append @@ -768,14 +769,14 @@ EXPLAIN (COSTS OFF) SELECT * from ht_try; (3 rows) -- foreign chunk contains data from Jan 2020, so it is skipped during planning -EXPLAIN (COSTS OFF) SELECT * from ht_try WHERE timec > '2022-01-01 01:00'; +:EXPLAIN SELECT * from ht_try WHERE timec > '2022-01-01 01:00'; QUERY PLAN ---------------------------------------------------------------------------------- Index Scan using _hyper_5_10_chunk_ht_try_timec_idx on _hyper_5_10_chunk Index Cond: (timec > 'Sat Jan 01 01:00:00 2022 PST'::timestamp with time zone) (2 rows) -EXPLAIN (COSTS OFF) SELECT * from ht_try WHERE timec < '2023-01-01 01:00'; +:EXPLAIN SELECT * from ht_try WHERE timec < '2023-01-01 01:00'; QUERY PLAN ---------------------------------------------------------------------------------------- Append @@ -804,7 +805,7 @@ DO $$ DECLARE r RECORD; BEGIN - EXPLAIN UPDATE ht_try SET value = 2 + EXPLAIN (COSTS OFF) UPDATE ht_try SET value = 2 WHERE acq_id = 10 AND timec > now() - '15 years'::interval INTO r; END $$ LANGUAGE plpgsql; @@ -1414,14 +1415,14 @@ WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_sl (3 rows) -- ordered append should be possible as ranges do not overlap -EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------- - Custom Scan (ChunkAppend) on test_chunkapp (cost=0.15..270.31 rows=6355 width=12) +:EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_chunkapp Order: test_chunkapp."time" - -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk (cost=0.15..42.75 rows=2040 width=12) - -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk (cost=0.15..42.75 rows=2040 width=12) - -> Foreign Scan on test_chunkapp_fdw_child (cost=100.00..184.80 rows=2275 width=12) + -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk + -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk + -> Foreign Scan on test_chunkapp_fdw_child (5 rows) SELECT * FROM test_chunkapp ORDER BY 1; @@ -1457,14 +1458,14 @@ SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp',empty: (1 row) -- ordered append not possible because range is invalid and empty was not specified -EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------- - Merge Append (cost=100.33..352.47 rows=6355 width=12) +:EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Merge Append Sort Key: _hyper_16_26_chunk."time" - -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk (cost=0.15..42.75 rows=2040 width=12) - -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk (cost=0.15..42.75 rows=2040 width=12) - -> Foreign Scan on test_chunkapp_fdw_child (cost=100.00..184.80 rows=2275 width=12) + -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk + -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk + -> Foreign Scan on test_chunkapp_fdw_child (5 rows) SELECT * FROM test_chunkapp ORDER BY 1; @@ -1486,16 +1487,16 @@ WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_sl (3 rows) -- but also, OSM chunk should be included in the scan, since range is invalid and chunk is not empty -EXPLAIN SELECT * FROM test_chunkapp WHERE time < '2023-01-01' ORDER BY 1; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------- - Merge Append (cost=100.33..234.79 rows=2118 width=12) +:EXPLAIN SELECT * FROM test_chunkapp WHERE time < '2023-01-01' ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Merge Append Sort Key: _hyper_16_26_chunk."time" - -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk (cost=0.15..23.05 rows=680 width=12) + -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone) - -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk (cost=0.15..23.05 rows=680 width=12) + -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone) - -> Foreign Scan on test_chunkapp_fdw_child (cost=100.00..161.29 rows=758 width=12) + -> Foreign Scan on test_chunkapp_fdw_child (7 rows) SELECT * FROM test_chunkapp WHERE time < '2023-01-01' ORDER BY 1; @@ -1516,14 +1517,14 @@ SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp', NULL: f (1 row) -EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------- - Custom Scan (ChunkAppend) on test_chunkapp (cost=0.15..270.31 rows=6355 width=12) +:EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_chunkapp Order: test_chunkapp."time" - -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk (cost=0.15..42.75 rows=2040 width=12) - -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk (cost=0.15..42.75 rows=2040 width=12) - -> Foreign Scan on test_chunkapp_fdw_child (cost=100.00..184.80 rows=2275 width=12) + -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk + -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk + -> Foreign Scan on test_chunkapp_fdw_child (5 rows) SELECT * FROM test_chunkapp ORDER BY 1; @@ -1534,14 +1535,14 @@ SELECT * FROM test_chunkapp ORDER BY 1; (2 rows) -- should exclude the OSM chunk this time since it is empty -EXPLAIN SELECT * FROM test_chunkapp WHERE time < '2023-01-01' ORDER BY 1; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------- - Custom Scan (ChunkAppend) on test_chunkapp (cost=0.15..46.11 rows=1360 width=12) +:EXPLAIN SELECT * FROM test_chunkapp WHERE time < '2023-01-01' ORDER BY 1; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_chunkapp Order: test_chunkapp."time" - -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk (cost=0.15..23.05 rows=680 width=12) + -> Index Scan Backward using _hyper_16_26_chunk_test_chunkapp_time_idx on _hyper_16_26_chunk Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone) - -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk (cost=0.15..23.05 rows=680 width=12) + -> Index Scan Backward using _hyper_16_27_chunk_test_chunkapp_time_idx on _hyper_16_27_chunk Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone) (6 rows) diff --git a/tsl/test/expected/compression.out b/tsl/test/expected/compression.out index b61f0f1c30d..10d7848b814 100644 --- a/tsl/test/expected/compression.out +++ b/tsl/test/expected/compression.out @@ -1258,7 +1258,8 @@ SELECT create_hypertable('stattest', 'time'); INSERT INTO stattest SELECT '2020/02/20 01:00'::TIMESTAMPTZ + ('1 hour'::interval * v), 250 * v FROM generate_series(0,25) v; SELECT table_name INTO TEMPORARY temptable FROM _timescaledb_catalog.chunk WHERE hypertable_id = (SELECT id FROM _timescaledb_catalog.hypertable WHERE table_name = 'stattest'); \set statchunk '(select table_name from temptable)' -SELECT * FROM pg_stats WHERE tablename = :statchunk; +SELECT schemaname, tablename, attname, inherited, null_frac, avg_width, n_distinct, most_common_vals, most_common_freqs, histogram_bounds, correlation, most_common_elems, most_common_elem_freqs, elem_count_histogram +FROM pg_stats WHERE tablename = :statchunk; schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram ------------+-----------+---------+-----------+-----------+-----------+------------+------------------+-------------------+------------------+-------------+-------------------+------------------------+---------------------- (0 rows) diff --git a/tsl/test/expected/compression_errors-17.out b/tsl/test/expected/compression_errors-17.out index 9ad6955df69..29ec64940dc 100644 --- a/tsl/test/expected/compression_errors-17.out +++ b/tsl/test/expected/compression_errors-17.out @@ -356,10 +356,6 @@ BEGIN; ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id'); WARNING: column "location" should be used for segmenting or ordering ROLLBACK; -alter table table_constr add constraint table_constr_fk FOREIGN KEY(d) REFERENCES fortable(col) on delete cascade; -ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id, location'); -ERROR: column "d" must be used for segmenting -DETAIL: The foreign key constraint "table_constr_fk" cannot be enforced with the given compression configuration. --exclusion constraints not allowed alter table table_constr add constraint table_constr_exclu exclude using btree (timec with = ); ALTER TABLE table_constr set (timescaledb.compress, timescaledb.compress_orderby = 'timec', timescaledb.compress_segmentby = 'device_id, location, d'); @@ -452,10 +448,7 @@ INSERT INTO fortable VALUES( 99 ); INSERT INTO table_constr2 VALUES( 1000, 10, 5, 99); ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id'); NOTICE: default order by for hypertable "table_constr2" is set to "timec DESC" -ERROR: column "d" must be used for segmenting -DETAIL: The foreign key constraint "table_constr2_d_fkey" cannot be enforced with the given compression configuration. ALTER TABLE table_constr2 SET (timescaledb.compress, timescaledb.compress_segmentby = 'device_id, d'); -NOTICE: default order by for hypertable "table_constr2" is set to "timec DESC" --compress a chunk and try to disable compression, it should fail -- SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME" FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht diff --git a/tsl/test/expected/compression_permissions.out b/tsl/test/expected/compression_permissions-14.out similarity index 100% rename from tsl/test/expected/compression_permissions.out rename to tsl/test/expected/compression_permissions-14.out diff --git a/tsl/test/expected/compression_permissions-15.out b/tsl/test/expected/compression_permissions-15.out new file mode 100644 index 00000000000..b893da7f95f --- /dev/null +++ b/tsl/test/expected/compression_permissions-15.out @@ -0,0 +1,374 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set ON_ERROR_STOP 0 +CREATE VIEW hypertable_details AS +WITH + names AS (SELECT * FROM pg_class cl JOIN pg_namespace ns ON ns.oid = relnamespace) +SELECT (SELECT format('%I.%I', ht.schema_name, ht.table_name) FROM names + WHERE ht.schema_name = nspname AND ht.table_name = relname) AS hypertable, + (SELECT relacl FROM names + WHERE ht.schema_name = nspname AND ht.table_name = relname) AS hypertable_acl, + (SELECT format('%I.%I', ct.schema_name, ct.table_name) FROM names + WHERE ct.schema_name = nspname AND ct.table_name = relname) AS compressed, + (SELECT relacl FROM names + WHERE ct.schema_name = nspname AND ct.table_name = relname) AS compressed_acl + FROM _timescaledb_catalog.hypertable ht + LEFT JOIN _timescaledb_catalog.hypertable ct ON ht.compressed_hypertable_id = ct.id; +CREATE VIEW chunk_details AS +WITH + names AS (SELECT * FROM pg_class cl JOIN pg_namespace ns ON ns.oid = relnamespace) +SELECT (SELECT format('%I.%I', ht.schema_name, ht.table_name) FROM names + WHERE ht.schema_name = nspname AND ht.table_name = relname) AS hypertable, + (SELECT relacl FROM names + WHERE ht.schema_name = nspname AND ht.table_name = relname) AS hypertable_acl, + (SELECT format('%I.%I', ch.schema_name, ch.table_name) FROM names + WHERE ch.schema_name = nspname AND ch.table_name = relname) AS chunk, + (SELECT relacl FROM names + WHERE ch.schema_name = nspname AND ch.table_name = relname) AS chunk_acl + FROM _timescaledb_catalog.hypertable ht + JOIN _timescaledb_catalog.chunk ch ON ch.hypertable_id = ht.id; +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + location2 char(10) NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=>'1month'::interval); + table_name +------------ + conditions +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'timec'); +ERROR: must be owner of table conditions +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +ERROR: permission denied for table conditions +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +--now owner tries and succeeds -- +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'timec'); +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +--try modifying compress properties -- +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'humidity'); +ERROR: must be owner of table conditions +--- compress_chunks and decompress_chunks fail without correct perm -- +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +SELECT compress_chunk(show_chunks('conditions')); +ERROR: must be owner of hypertable "conditions" +SELECT decompress_chunk(show_chunks('conditions')); +ERROR: must be owner of hypertable "conditions" +select add_compression_policy('conditions', '1day'::interval); +ERROR: must be owner of hypertable "conditions" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +select add_compression_policy('conditions', '1day'::interval); + add_compression_policy +------------------------ + 1000 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +--try dropping policy +select remove_compression_policy('conditions', true); +ERROR: must be owner of hypertable "conditions" +--Tests for GRANTS. +-- as owner grant select , compress chunk and check SELECT works +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +GRANT SELECT on conditions to :ROLE_DEFAULT_PERM_USER_2; +SELECT count(compress_chunk(ch)) FROM show_chunks('conditions') ch; + count +------- + 2 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +select count(*) from conditions; + count +------- + 31 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +GRANT INSERT on conditions to :ROLE_DEFAULT_PERM_USER_2; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +insert into conditions +select '2019-04-01 00:00+0'::timestamp with time zone, 'NYC', 'klick', 55, 75; +select count(*) from conditions; + count +------- + 32 +(1 row) + +update conditions +set location = 'SFO' +where timec = '2019-04-01 00:00+0'::timestamp with time zone; +ERROR: permission denied for table conditions +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +GRANT UPDATE, DELETE on conditions to :ROLE_DEFAULT_PERM_USER_2; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +select location from conditions where timec = '2019-04-01 00:00+0'; + location +---------- + NYC +(1 row) + +--NOTE constraint exclusion excludes the other chunks here +--otherwise update would fail +update conditions +set location = 'SFO' +where timec = '2019-04-01 00:00+0'::timestamp with time zone; +select location from conditions where timec = '2019-04-01 00:00+0'; + location +---------- + SFO +(1 row) + +--update expected to fail as executor touches all chunks +update conditions +set location = 'PNC' +where location = 'SFO'; +delete from conditions +where timec = '2019-04-01 00:00+0'::timestamp with time zone; +select location from conditions where timec = '2019-04-01 00:00+0'; + location +---------- +(0 rows) + +CREATE VIEW v2 as select * from conditions; +select count(*) from v2; + count +------- + 31 +(1 row) + +--should fail after revoking permissions +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +REVOKE SELECT on conditions FROM :ROLE_DEFAULT_PERM_USER_2; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +select count(*) from v2; +ERROR: permission denied for table conditions +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to view v2 +-- Testing that permissions propagate to compressed hypertables and to +-- compressed chunks. +-- Table is created by superuser +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +SELECT table_name FROM create_hypertable( 'conditions', 'timec', + chunk_time_interval => '1 week'::interval); + table_name +------------ + conditions +(1 row) + +ALTER TABLE conditions SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'location', + timescaledb.compress_orderby = 'timec' +); +INSERT INTO conditions +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 55, 75; +SELECT compress_chunk(show_chunks('conditions')); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_3_6_chunk + _timescaledb_internal._hyper_3_7_chunk + _timescaledb_internal._hyper_3_8_chunk + _timescaledb_internal._hyper_3_9_chunk + _timescaledb_internal._hyper_3_10_chunk +(5 rows) + +-- Check that ACL propagates to compressed hypertable. We could prune +-- the listing by only selecting chunks where the ACL does not match +-- the hypertable ACL, but for now we list all to make debugging easy. +\x on +SELECT * FROM hypertable_details WHERE hypertable = 'public.conditions'; +-[ RECORD 1 ]--+----------------------------------------------- +hypertable | public.conditions +hypertable_acl | +compressed | _timescaledb_internal._compressed_hypertable_4 +compressed_acl | + +SELECT htd.hypertable, htd.hypertable_acl, chunk, chunk_acl + FROM chunk_details chd JOIN hypertable_details htd ON chd.hypertable = htd.compressed +ORDER BY hypertable, chunk; +-[ RECORD 1 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_11_chunk +chunk_acl | +-[ RECORD 2 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_12_chunk +chunk_acl | +-[ RECORD 3 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_13_chunk +chunk_acl | +-[ RECORD 4 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_14_chunk +chunk_acl | +-[ RECORD 5 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_15_chunk +chunk_acl | + +GRANT SELECT ON conditions TO :ROLE_DEFAULT_PERM_USER; +SELECT * FROM hypertable_details WHERE hypertable = 'public.conditions'; +-[ RECORD 1 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +compressed | _timescaledb_internal._compressed_hypertable_4 +compressed_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} + +SELECT htd.hypertable, htd.hypertable_acl, chunk, chunk_acl + FROM chunk_details chd JOIN hypertable_details htd ON chd.hypertable = htd.compressed +ORDER BY hypertable, chunk; +-[ RECORD 1 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_11_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 2 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_12_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 3 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_13_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 4 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_14_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 5 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_15_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} + +-- Add some new data and compress the chunks. The chunks should get +-- the permissions of the hypertable. We pick a start date to make +-- sure that we are not inserting into an already compressed chunk. +INSERT INTO conditions +SELECT generate_series('2019-01-07 00:00'::timestamp, '2019-02-07 00:00'::timestamp, '1 day'), 'XYZ', 47, 11; +SELECT compress_chunk(show_chunks('conditions', newer_than => '2019-01-01')); +-[ RECORD 1 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_16_chunk +-[ RECORD 2 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_17_chunk +-[ RECORD 3 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_18_chunk +-[ RECORD 4 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_19_chunk +-[ RECORD 5 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_20_chunk +-[ RECORD 6 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_21_chunk + +SELECT htd.hypertable, htd.hypertable_acl, chunk, chunk_acl + FROM chunk_details chd JOIN hypertable_details htd ON chd.hypertable = htd.compressed +ORDER BY hypertable, chunk; +-[ RECORD 1 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_11_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 2 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_12_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 3 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_13_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 4 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_14_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 5 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_15_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 6 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_22_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 7 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_23_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 8 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_24_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 9 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_25_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 10 ]-+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_26_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 11 ]-+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_27_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} + +\x off +--TEST user that has insert permission can insert into a compressed chunk +GRANT INSERT ON conditions TO :ROLE_DEFAULT_PERM_USER; +SELECT count(*) FROM conditions; + count +------- + 63 +(1 row) + +SELECT count(*) FROM ( SELECT show_chunks('conditions'))q; + count +------- + 11 +(1 row) + +SET ROLE :ROLE_DEFAULT_PERM_USER; +--insert into a compressed chunk -- +INSERT INTO conditions VALUES( '2018-12-02 00:00'::timestamp, 'NYC', 75, 95); +SELECT count(*) FROM conditions; + count +------- + 64 +(1 row) + +SELECT count(*) FROM ( SELECT show_chunks('conditions'))q; + count +------- + 11 +(1 row) + diff --git a/tsl/test/expected/compression_permissions-16.out b/tsl/test/expected/compression_permissions-16.out new file mode 100644 index 00000000000..b893da7f95f --- /dev/null +++ b/tsl/test/expected/compression_permissions-16.out @@ -0,0 +1,374 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set ON_ERROR_STOP 0 +CREATE VIEW hypertable_details AS +WITH + names AS (SELECT * FROM pg_class cl JOIN pg_namespace ns ON ns.oid = relnamespace) +SELECT (SELECT format('%I.%I', ht.schema_name, ht.table_name) FROM names + WHERE ht.schema_name = nspname AND ht.table_name = relname) AS hypertable, + (SELECT relacl FROM names + WHERE ht.schema_name = nspname AND ht.table_name = relname) AS hypertable_acl, + (SELECT format('%I.%I', ct.schema_name, ct.table_name) FROM names + WHERE ct.schema_name = nspname AND ct.table_name = relname) AS compressed, + (SELECT relacl FROM names + WHERE ct.schema_name = nspname AND ct.table_name = relname) AS compressed_acl + FROM _timescaledb_catalog.hypertable ht + LEFT JOIN _timescaledb_catalog.hypertable ct ON ht.compressed_hypertable_id = ct.id; +CREATE VIEW chunk_details AS +WITH + names AS (SELECT * FROM pg_class cl JOIN pg_namespace ns ON ns.oid = relnamespace) +SELECT (SELECT format('%I.%I', ht.schema_name, ht.table_name) FROM names + WHERE ht.schema_name = nspname AND ht.table_name = relname) AS hypertable, + (SELECT relacl FROM names + WHERE ht.schema_name = nspname AND ht.table_name = relname) AS hypertable_acl, + (SELECT format('%I.%I', ch.schema_name, ch.table_name) FROM names + WHERE ch.schema_name = nspname AND ch.table_name = relname) AS chunk, + (SELECT relacl FROM names + WHERE ch.schema_name = nspname AND ch.table_name = relname) AS chunk_acl + FROM _timescaledb_catalog.hypertable ht + JOIN _timescaledb_catalog.chunk ch ON ch.hypertable_id = ht.id; +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + location2 char(10) NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=>'1month'::interval); + table_name +------------ + conditions +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'timec'); +ERROR: must be owner of table conditions +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +ERROR: permission denied for table conditions +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +--now owner tries and succeeds -- +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'timec'); +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +--try modifying compress properties -- +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'humidity'); +ERROR: must be owner of table conditions +--- compress_chunks and decompress_chunks fail without correct perm -- +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +SELECT compress_chunk(show_chunks('conditions')); +ERROR: must be owner of hypertable "conditions" +SELECT decompress_chunk(show_chunks('conditions')); +ERROR: must be owner of hypertable "conditions" +select add_compression_policy('conditions', '1day'::interval); +ERROR: must be owner of hypertable "conditions" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +select add_compression_policy('conditions', '1day'::interval); + add_compression_policy +------------------------ + 1000 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +--try dropping policy +select remove_compression_policy('conditions', true); +ERROR: must be owner of hypertable "conditions" +--Tests for GRANTS. +-- as owner grant select , compress chunk and check SELECT works +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +GRANT SELECT on conditions to :ROLE_DEFAULT_PERM_USER_2; +SELECT count(compress_chunk(ch)) FROM show_chunks('conditions') ch; + count +------- + 2 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +select count(*) from conditions; + count +------- + 31 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +GRANT INSERT on conditions to :ROLE_DEFAULT_PERM_USER_2; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +insert into conditions +select '2019-04-01 00:00+0'::timestamp with time zone, 'NYC', 'klick', 55, 75; +select count(*) from conditions; + count +------- + 32 +(1 row) + +update conditions +set location = 'SFO' +where timec = '2019-04-01 00:00+0'::timestamp with time zone; +ERROR: permission denied for table conditions +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +GRANT UPDATE, DELETE on conditions to :ROLE_DEFAULT_PERM_USER_2; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +select location from conditions where timec = '2019-04-01 00:00+0'; + location +---------- + NYC +(1 row) + +--NOTE constraint exclusion excludes the other chunks here +--otherwise update would fail +update conditions +set location = 'SFO' +where timec = '2019-04-01 00:00+0'::timestamp with time zone; +select location from conditions where timec = '2019-04-01 00:00+0'; + location +---------- + SFO +(1 row) + +--update expected to fail as executor touches all chunks +update conditions +set location = 'PNC' +where location = 'SFO'; +delete from conditions +where timec = '2019-04-01 00:00+0'::timestamp with time zone; +select location from conditions where timec = '2019-04-01 00:00+0'; + location +---------- +(0 rows) + +CREATE VIEW v2 as select * from conditions; +select count(*) from v2; + count +------- + 31 +(1 row) + +--should fail after revoking permissions +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +REVOKE SELECT on conditions FROM :ROLE_DEFAULT_PERM_USER_2; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +select count(*) from v2; +ERROR: permission denied for table conditions +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to view v2 +-- Testing that permissions propagate to compressed hypertables and to +-- compressed chunks. +-- Table is created by superuser +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +SELECT table_name FROM create_hypertable( 'conditions', 'timec', + chunk_time_interval => '1 week'::interval); + table_name +------------ + conditions +(1 row) + +ALTER TABLE conditions SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'location', + timescaledb.compress_orderby = 'timec' +); +INSERT INTO conditions +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 55, 75; +SELECT compress_chunk(show_chunks('conditions')); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_3_6_chunk + _timescaledb_internal._hyper_3_7_chunk + _timescaledb_internal._hyper_3_8_chunk + _timescaledb_internal._hyper_3_9_chunk + _timescaledb_internal._hyper_3_10_chunk +(5 rows) + +-- Check that ACL propagates to compressed hypertable. We could prune +-- the listing by only selecting chunks where the ACL does not match +-- the hypertable ACL, but for now we list all to make debugging easy. +\x on +SELECT * FROM hypertable_details WHERE hypertable = 'public.conditions'; +-[ RECORD 1 ]--+----------------------------------------------- +hypertable | public.conditions +hypertable_acl | +compressed | _timescaledb_internal._compressed_hypertable_4 +compressed_acl | + +SELECT htd.hypertable, htd.hypertable_acl, chunk, chunk_acl + FROM chunk_details chd JOIN hypertable_details htd ON chd.hypertable = htd.compressed +ORDER BY hypertable, chunk; +-[ RECORD 1 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_11_chunk +chunk_acl | +-[ RECORD 2 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_12_chunk +chunk_acl | +-[ RECORD 3 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_13_chunk +chunk_acl | +-[ RECORD 4 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_14_chunk +chunk_acl | +-[ RECORD 5 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_15_chunk +chunk_acl | + +GRANT SELECT ON conditions TO :ROLE_DEFAULT_PERM_USER; +SELECT * FROM hypertable_details WHERE hypertable = 'public.conditions'; +-[ RECORD 1 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +compressed | _timescaledb_internal._compressed_hypertable_4 +compressed_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} + +SELECT htd.hypertable, htd.hypertable_acl, chunk, chunk_acl + FROM chunk_details chd JOIN hypertable_details htd ON chd.hypertable = htd.compressed +ORDER BY hypertable, chunk; +-[ RECORD 1 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_11_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 2 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_12_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 3 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_13_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 4 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_14_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 5 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_15_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} + +-- Add some new data and compress the chunks. The chunks should get +-- the permissions of the hypertable. We pick a start date to make +-- sure that we are not inserting into an already compressed chunk. +INSERT INTO conditions +SELECT generate_series('2019-01-07 00:00'::timestamp, '2019-02-07 00:00'::timestamp, '1 day'), 'XYZ', 47, 11; +SELECT compress_chunk(show_chunks('conditions', newer_than => '2019-01-01')); +-[ RECORD 1 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_16_chunk +-[ RECORD 2 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_17_chunk +-[ RECORD 3 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_18_chunk +-[ RECORD 4 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_19_chunk +-[ RECORD 5 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_20_chunk +-[ RECORD 6 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_21_chunk + +SELECT htd.hypertable, htd.hypertable_acl, chunk, chunk_acl + FROM chunk_details chd JOIN hypertable_details htd ON chd.hypertable = htd.compressed +ORDER BY hypertable, chunk; +-[ RECORD 1 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_11_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 2 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_12_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 3 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_13_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 4 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_14_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 5 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_15_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 6 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_22_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 7 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_23_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 8 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_24_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 9 ]--+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_25_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 10 ]-+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_26_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +-[ RECORD 11 ]-+--------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_27_chunk +chunk_acl | {super_user=arwdDxt/super_user,default_perm_user=r/super_user} + +\x off +--TEST user that has insert permission can insert into a compressed chunk +GRANT INSERT ON conditions TO :ROLE_DEFAULT_PERM_USER; +SELECT count(*) FROM conditions; + count +------- + 63 +(1 row) + +SELECT count(*) FROM ( SELECT show_chunks('conditions'))q; + count +------- + 11 +(1 row) + +SET ROLE :ROLE_DEFAULT_PERM_USER; +--insert into a compressed chunk -- +INSERT INTO conditions VALUES( '2018-12-02 00:00'::timestamp, 'NYC', 75, 95); +SELECT count(*) FROM conditions; + count +------- + 64 +(1 row) + +SELECT count(*) FROM ( SELECT show_chunks('conditions'))q; + count +------- + 11 +(1 row) + diff --git a/tsl/test/expected/compression_permissions-17.out b/tsl/test/expected/compression_permissions-17.out new file mode 100644 index 00000000000..b01ac9a5ddd --- /dev/null +++ b/tsl/test/expected/compression_permissions-17.out @@ -0,0 +1,374 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set ON_ERROR_STOP 0 +CREATE VIEW hypertable_details AS +WITH + names AS (SELECT * FROM pg_class cl JOIN pg_namespace ns ON ns.oid = relnamespace) +SELECT (SELECT format('%I.%I', ht.schema_name, ht.table_name) FROM names + WHERE ht.schema_name = nspname AND ht.table_name = relname) AS hypertable, + (SELECT relacl FROM names + WHERE ht.schema_name = nspname AND ht.table_name = relname) AS hypertable_acl, + (SELECT format('%I.%I', ct.schema_name, ct.table_name) FROM names + WHERE ct.schema_name = nspname AND ct.table_name = relname) AS compressed, + (SELECT relacl FROM names + WHERE ct.schema_name = nspname AND ct.table_name = relname) AS compressed_acl + FROM _timescaledb_catalog.hypertable ht + LEFT JOIN _timescaledb_catalog.hypertable ct ON ht.compressed_hypertable_id = ct.id; +CREATE VIEW chunk_details AS +WITH + names AS (SELECT * FROM pg_class cl JOIN pg_namespace ns ON ns.oid = relnamespace) +SELECT (SELECT format('%I.%I', ht.schema_name, ht.table_name) FROM names + WHERE ht.schema_name = nspname AND ht.table_name = relname) AS hypertable, + (SELECT relacl FROM names + WHERE ht.schema_name = nspname AND ht.table_name = relname) AS hypertable_acl, + (SELECT format('%I.%I', ch.schema_name, ch.table_name) FROM names + WHERE ch.schema_name = nspname AND ch.table_name = relname) AS chunk, + (SELECT relacl FROM names + WHERE ch.schema_name = nspname AND ch.table_name = relname) AS chunk_acl + FROM _timescaledb_catalog.hypertable ht + JOIN _timescaledb_catalog.chunk ch ON ch.hypertable_id = ht.id; +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + location2 char(10) NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +select table_name from create_hypertable( 'conditions', 'timec', chunk_time_interval=>'1month'::interval); + table_name +------------ + conditions +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'timec'); +ERROR: must be owner of table conditions +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +ERROR: permission denied for table conditions +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +--now owner tries and succeeds -- +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'timec'); +insert into conditions +select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75; +--try modifying compress properties -- +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'humidity'); +ERROR: must be owner of table conditions +--- compress_chunks and decompress_chunks fail without correct perm -- +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +SELECT compress_chunk(show_chunks('conditions')); +ERROR: must be owner of hypertable "conditions" +SELECT decompress_chunk(show_chunks('conditions')); +ERROR: must be owner of hypertable "conditions" +select add_compression_policy('conditions', '1day'::interval); +ERROR: must be owner of hypertable "conditions" +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +select add_compression_policy('conditions', '1day'::interval); + add_compression_policy +------------------------ + 1000 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +--try dropping policy +select remove_compression_policy('conditions', true); +ERROR: must be owner of hypertable "conditions" +--Tests for GRANTS. +-- as owner grant select , compress chunk and check SELECT works +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +GRANT SELECT on conditions to :ROLE_DEFAULT_PERM_USER_2; +SELECT count(compress_chunk(ch)) FROM show_chunks('conditions') ch; + count +------- + 2 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +select count(*) from conditions; + count +------- + 31 +(1 row) + +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +GRANT INSERT on conditions to :ROLE_DEFAULT_PERM_USER_2; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +insert into conditions +select '2019-04-01 00:00+0'::timestamp with time zone, 'NYC', 'klick', 55, 75; +select count(*) from conditions; + count +------- + 32 +(1 row) + +update conditions +set location = 'SFO' +where timec = '2019-04-01 00:00+0'::timestamp with time zone; +ERROR: permission denied for table conditions +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +GRANT UPDATE, DELETE on conditions to :ROLE_DEFAULT_PERM_USER_2; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +select location from conditions where timec = '2019-04-01 00:00+0'; + location +---------- + NYC +(1 row) + +--NOTE constraint exclusion excludes the other chunks here +--otherwise update would fail +update conditions +set location = 'SFO' +where timec = '2019-04-01 00:00+0'::timestamp with time zone; +select location from conditions where timec = '2019-04-01 00:00+0'; + location +---------- + SFO +(1 row) + +--update expected to fail as executor touches all chunks +update conditions +set location = 'PNC' +where location = 'SFO'; +delete from conditions +where timec = '2019-04-01 00:00+0'::timestamp with time zone; +select location from conditions where timec = '2019-04-01 00:00+0'; + location +---------- +(0 rows) + +CREATE VIEW v2 as select * from conditions; +select count(*) from v2; + count +------- + 31 +(1 row) + +--should fail after revoking permissions +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER +REVOKE SELECT on conditions FROM :ROLE_DEFAULT_PERM_USER_2; +\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2 +select count(*) from v2; +ERROR: permission denied for table conditions +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to view v2 +-- Testing that permissions propagate to compressed hypertables and to +-- compressed chunks. +-- Table is created by superuser +CREATE TABLE conditions ( + timec TIMESTAMPTZ NOT NULL, + location TEXT NOT NULL, + temperature DOUBLE PRECISION NULL, + humidity DOUBLE PRECISION NULL + ); +SELECT table_name FROM create_hypertable( 'conditions', 'timec', + chunk_time_interval => '1 week'::interval); + table_name +------------ + conditions +(1 row) + +ALTER TABLE conditions SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'location', + timescaledb.compress_orderby = 'timec' +); +INSERT INTO conditions +SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 55, 75; +SELECT compress_chunk(show_chunks('conditions')); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_3_6_chunk + _timescaledb_internal._hyper_3_7_chunk + _timescaledb_internal._hyper_3_8_chunk + _timescaledb_internal._hyper_3_9_chunk + _timescaledb_internal._hyper_3_10_chunk +(5 rows) + +-- Check that ACL propagates to compressed hypertable. We could prune +-- the listing by only selecting chunks where the ACL does not match +-- the hypertable ACL, but for now we list all to make debugging easy. +\x on +SELECT * FROM hypertable_details WHERE hypertable = 'public.conditions'; +-[ RECORD 1 ]--+----------------------------------------------- +hypertable | public.conditions +hypertable_acl | +compressed | _timescaledb_internal._compressed_hypertable_4 +compressed_acl | + +SELECT htd.hypertable, htd.hypertable_acl, chunk, chunk_acl + FROM chunk_details chd JOIN hypertable_details htd ON chd.hypertable = htd.compressed +ORDER BY hypertable, chunk; +-[ RECORD 1 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_11_chunk +chunk_acl | +-[ RECORD 2 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_12_chunk +chunk_acl | +-[ RECORD 3 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_13_chunk +chunk_acl | +-[ RECORD 4 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_14_chunk +chunk_acl | +-[ RECORD 5 ]--+------------------------------------------------ +hypertable | public.conditions +hypertable_acl | +chunk | _timescaledb_internal.compress_hyper_4_15_chunk +chunk_acl | + +GRANT SELECT ON conditions TO :ROLE_DEFAULT_PERM_USER; +SELECT * FROM hypertable_details WHERE hypertable = 'public.conditions'; +-[ RECORD 1 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +compressed | _timescaledb_internal._compressed_hypertable_4 +compressed_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} + +SELECT htd.hypertable, htd.hypertable_acl, chunk, chunk_acl + FROM chunk_details chd JOIN hypertable_details htd ON chd.hypertable = htd.compressed +ORDER BY hypertable, chunk; +-[ RECORD 1 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_11_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 2 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_12_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 3 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_13_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 4 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_14_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 5 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_15_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} + +-- Add some new data and compress the chunks. The chunks should get +-- the permissions of the hypertable. We pick a start date to make +-- sure that we are not inserting into an already compressed chunk. +INSERT INTO conditions +SELECT generate_series('2019-01-07 00:00'::timestamp, '2019-02-07 00:00'::timestamp, '1 day'), 'XYZ', 47, 11; +SELECT compress_chunk(show_chunks('conditions', newer_than => '2019-01-01')); +-[ RECORD 1 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_16_chunk +-[ RECORD 2 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_17_chunk +-[ RECORD 3 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_18_chunk +-[ RECORD 4 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_19_chunk +-[ RECORD 5 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_20_chunk +-[ RECORD 6 ]--+---------------------------------------- +compress_chunk | _timescaledb_internal._hyper_3_21_chunk + +SELECT htd.hypertable, htd.hypertable_acl, chunk, chunk_acl + FROM chunk_details chd JOIN hypertable_details htd ON chd.hypertable = htd.compressed +ORDER BY hypertable, chunk; +-[ RECORD 1 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_11_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 2 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_12_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 3 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_13_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 4 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_14_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 5 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_15_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 6 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_22_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 7 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_23_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 8 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_24_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 9 ]--+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_25_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 10 ]-+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_26_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +-[ RECORD 11 ]-+---------------------------------------------------------------- +hypertable | public.conditions +hypertable_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} +chunk | _timescaledb_internal.compress_hyper_4_27_chunk +chunk_acl | {super_user=arwdDxtm/super_user,default_perm_user=r/super_user} + +\x off +--TEST user that has insert permission can insert into a compressed chunk +GRANT INSERT ON conditions TO :ROLE_DEFAULT_PERM_USER; +SELECT count(*) FROM conditions; + count +------- + 63 +(1 row) + +SELECT count(*) FROM ( SELECT show_chunks('conditions'))q; + count +------- + 11 +(1 row) + +SET ROLE :ROLE_DEFAULT_PERM_USER; +--insert into a compressed chunk -- +INSERT INTO conditions VALUES( '2018-12-02 00:00'::timestamp, 'NYC', 75, 95); +SELECT count(*) FROM conditions; + count +------- + 64 +(1 row) + +SELECT count(*) FROM ( SELECT show_chunks('conditions'))q; + count +------- + 11 +(1 row) + diff --git a/tsl/test/expected/compression_update_delete.out b/tsl/test/expected/compression_update_delete-14.out similarity index 100% rename from tsl/test/expected/compression_update_delete.out rename to tsl/test/expected/compression_update_delete-14.out diff --git a/tsl/test/expected/compression_update_delete-15.out b/tsl/test/expected/compression_update_delete-15.out new file mode 100644 index 00000000000..b827ee5819a --- /dev/null +++ b/tsl/test/expected/compression_update_delete-15.out @@ -0,0 +1,3405 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set EXPLAIN 'EXPLAIN (costs off, timing off, summary off, analyze)' +CREATE OR REPLACE VIEW compressed_chunk_info_view AS +SELECT + h.schema_name AS hypertable_schema, + h.table_name AS hypertable_name, + c.schema_name as chunk_schema, + c.table_name as chunk_name, + c.status as chunk_status, + comp.schema_name as compressed_chunk_schema, + comp.table_name as compressed_chunk_name +FROM + _timescaledb_catalog.hypertable h JOIN + _timescaledb_catalog.chunk c ON h.id = c.hypertable_id + LEFT JOIN _timescaledb_catalog.chunk comp +ON comp.id = c.compressed_chunk_id +; +CREATE TABLE sample_table ( + time TIMESTAMP WITH TIME ZONE NOT NULL, + sensor_id INTEGER NOT NULL, + cpu double precision null, + temperature double precision null, + name varchar(100) default 'this is a default string value' +); +SELECT * FROM create_hypertable('sample_table', 'time', + chunk_time_interval => INTERVAL '2 months'); +WARNING: column type "character varying" used for "name" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 1 | public | sample_table | t +(1 row) + +\set start_date '2022-01-28 01:09:53.583252+05:30' +INSERT INTO sample_table + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date'::timestamptz - INTERVAL '1 months', + :'start_date'::timestamptz - INTERVAL '1 week', + INTERVAL '1 hour') AS g1(time), + generate_series(1, 8, 1 ) AS g2(sensor_id) + ORDER BY + time; +\set start_date '2023-03-17 17:51:11.322998+05:30' +-- insert into new chunks +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 12, 21.98, 33.123, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 12, 17.66, 13.875, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 13, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 1, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 4, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 5, 0.988, 33.123, 'new row3'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 6, 4.6554, 47, 'new row3'); +-- enable compression +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'sensor_id' +); +NOTICE: default order by for hypertable "sample_table" is set to ""time" DESC" +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk +(2 rows) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- test rows visibility +BEGIN; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 0 +(1 row) + +-- update 4 rows +UPDATE sample_table SET name = 'updated row' WHERE cpu = 21.98 AND temperature = 33.123; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 4 +(1 row) + +ROLLBACK; +-- get count of affected rows +SELECT count(*) FROM sample_table WHERE cpu = 21.98 AND temperature = 33.123; + count +------- + 4 +(1 row) + +-- do update +UPDATE sample_table SET name = 'updated row' WHERE cpu = 21.98 AND temperature = 33.123; +-- get count of updated rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 4 +(1 row) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 9 | _hyper_1_2_chunk +(2 rows) + +-- recompress the partial chunks +SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk'); +NOTICE: chunk "_hyper_1_1_chunk" is already compressed + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT compress_chunk('_timescaledb_internal._hyper_1_2_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_2_chunk +(1 row) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- get count of affected rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 4 +(1 row) + +-- do delete +DELETE FROM sample_table WHERE name = 'updated row'; +-- get count of updated rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 0 +(1 row) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 9 | _hyper_1_2_chunk +(2 rows) + +-- recompress the paritial chunks +SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk'); +NOTICE: chunk "_hyper_1_1_chunk" is already compressed + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT compress_chunk('_timescaledb_internal._hyper_1_2_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_2_chunk +(1 row) + +-- test for IS NULL checks +-- should not UPDATE any rows +UPDATE sample_table SET temperature = 34.21 WHERE sensor_id IS NULL; +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- test for IS NOT NULL checks +-- should UPDATE all rows +UPDATE sample_table SET temperature = 34.21 WHERE sensor_id IS NOT NULL; +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 9 | _hyper_1_1_chunk + 9 | _hyper_1_2_chunk +(2 rows) + +-- recompress the paritial chunks +SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT compress_chunk('_timescaledb_internal._hyper_1_2_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_2_chunk +(1 row) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- report 0 rows +SELECT COUNT(*) FROM sample_table WHERE name = 'updated row based on < OR > comparison'; + count +------- + 0 +(1 row) + +-- get total count of rows which satifies the condition +SELECT COUNT(*) as "total_affected_rows" FROM sample_table WHERE + time > '2022-01-20 19:10:00.101514+05:30' and + time < '2022-01-20 21:10:43.855297+05:30' \gset +-- perform UPDATE with < and > comparison on SEGMENTBY column +UPDATE sample_table SET name = 'updated row based on < OR > comparison' WHERE + time > '2022-01-20 19:10:00.101514+05:30' and time < '2022-01-20 21:10:43.855297+05:30'; +-- check chunk compression status after UPDATE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 9 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- count should be same +SELECT COUNT(*) = (:total_affected_rows) FROM sample_table WHERE name = 'updated row based on < OR > comparison'; + ?column? +---------- + t +(1 row) + +DROP TABLE sample_table; +-- test to ensure that only required rows from compressed chunks +-- are extracted if SEGMENTBY column is used in WHERE condition +CREATE TABLE sample_table( + time INT NOT NULL, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 3 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time', + timescaledb.compress_segmentby = 'device_id'); +INSERT INTO sample_table VALUES (1, 1, 1), (2, 2, 1), (3, 3, 1), (10, 3, 2), (11, 4, 2), (11, 1, 2); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_3_5_chunk + _timescaledb_internal._hyper_3_6_chunk +(2 rows) + +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- there should 2 rows matching the conditions coming from 2 chunks +SELECT * FROM sample_table WHERE device_id = 3 ORDER BY time, device_id; + time | device_id | val +------+-----------+----- + 3 | 3 | 1 + 10 | 3 | 2 +(2 rows) + +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 3 +(1 row) + +-- get rowcount from compressed chunks where device_id = 3 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id = 3; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id = 3; + count +------- + 1 +(1 row) + +-- delete rows with device_id = 3 +DELETE FROM sample_table WHERE device_id = 3; +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 2 +(1 row) + +-- get rowcount from compressed chunks where device_id = 3 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id = 3; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id = 3; + count +------- + 0 +(1 row) + +-- there should be no rows +SELECT * FROM sample_table WHERE device_id = 3 ORDER BY time, device_id; + time | device_id | val +------+-----------+----- +(0 rows) + +-- there should 2 rows matching the conditions coming from 2 chunks +SELECT val FROM sample_table WHERE 1 = device_id ORDER BY time, device_id; + val +----- + 1 + 2 +(2 rows) + +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 2 +(1 row) + +-- get rowcount from compressed chunks where device_id = 1 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE 1 = device_id; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE 1 = device_id; + count +------- + 1 +(1 row) + +-- update rows with device_id = 1 +UPDATE sample_table SET val = 200 WHERE 1 = device_id; +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks where device_id = 1 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE 1 = device_id; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE 1 = device_id; + count +------- + 0 +(1 row) + +-- there should be 2 rows +SELECT val FROM sample_table WHERE 1 = device_id ORDER BY time, device_id; + val +----- + 200 + 200 +(2 rows) + +DROP TABLE sample_table; +CREATE TABLE sample_table( + time INT NOT NULL, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 5 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'time, val'); +NOTICE: default order by for hypertable "sample_table" is set to "" +INSERT INTO sample_table VALUES (1, 1, 1), (2, 2, 1), (3, 3, 1), (1, 3, 2), (11, 4, 2), (1, 1, 2); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_5_9_chunk + _timescaledb_internal._hyper_5_10_chunk +(2 rows) + +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- there should 2 rows matching the conditions coming from 2 chunks +SELECT * FROM sample_table WHERE time = 1 AND val = 2 ORDER BY time, device_id; + time | device_id | val +------+-----------+----- + 1 | 1 | 2 + 1 | 3 | 2 +(2 rows) + +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 4 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks where time = 1 AND val = 2 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE time = 1 AND val = 2; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE time = 1 AND val = 2; + count +------- + 0 +(1 row) + +-- delete rows with time = 1 AND val = 2 +EXPLAIN (costs off, verbose) DELETE FROM sample_table WHERE time = 1 AND 2 = val; + QUERY PLAN +---------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on public.sample_table + Delete on _timescaledb_internal._hyper_5_9_chunk sample_table_1 + -> Seq Scan on _timescaledb_internal._hyper_5_9_chunk sample_table_1 + Output: sample_table_1.tableoid, sample_table_1.ctid + Filter: ((sample_table_1."time" = 1) AND (2 = sample_table_1.val)) +(6 rows) + +-- should delete rows from 1 of the compressed chunks +DELETE FROM sample_table WHERE time = 1 AND 2 = val; +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks wheretime = 1 AND val = 2 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE time = 1 AND val = 2; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE time = 1 AND val = 2; + count +------- + 0 +(1 row) + +-- there should be no rows +SELECT * FROM sample_table WHERE time = 1 AND val = 2 ORDER BY time, device_id; + time | device_id | val +------+-----------+----- +(0 rows) + +DROP TABLE sample_table; +-- Test chunk compile time startup exclusion +CREATE OR REPLACE FUNCTION now_s() +RETURNS timestamptz LANGUAGE PLPGSQL STABLE AS +$BODY$ +BEGIN + RETURN '2017-08-22T10:00:00'::timestamptz; +END; +$BODY$; +CREATE TABLE sample_table(time timestamptz NOT NULL, temp float, colorid integer, attr jsonb); +SELECT create_hypertable('sample_table', 'time', chunk_time_interval => 2628000000000); + create_hypertable +--------------------------- + (7,public,sample_table,t) +(1 row) + +-- create three chunks +INSERT INTO sample_table VALUES ('2017-03-22T09:18:22', 23.5, 1, '{"a": 1, "b": 2}'), + ('2017-03-22T09:18:23', 21.5, 1, '{"a": 1, "b": 2}'), + ('2017-05-22T09:18:22', 36.2, 2, '{"c": 3, "b": 2}'), + ('2017-05-22T09:18:23', 15.2, 2, '{"c": 3}'), + ('2017-08-22T09:18:22', 34.1, 3, '{"c": 4}'); +ALTER TABLE sample_table SET (timescaledb.compress, + timescaledb.compress_segmentby = 'time'); +NOTICE: default order by for hypertable "sample_table" is set to "" +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_7_13_chunk + _timescaledb_internal._hyper_7_14_chunk + _timescaledb_internal._hyper_7_15_chunk +(3 rows) + +-- ensure all chunks are compressed +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------- + 1 | _hyper_7_13_chunk + 1 | _hyper_7_14_chunk + 1 | _hyper_7_15_chunk +(3 rows) + +-- report 0 rows +SELECT * FROM sample_table WHERE time > now_s() + '-1 month' AND colorid = 4; + time | temp | colorid | attr +------+------+---------+------ +(0 rows) + +-- update 1 row +UPDATE sample_table SET colorid = 4 WHERE time > now_s() + '-1 month'; +-- report 1 row +SELECT * FROM sample_table WHERE time > now_s() + '-1 month' AND colorid = 4; + time | temp | colorid | attr +------------------------------+------+---------+---------- + Tue Aug 22 09:18:22 2017 PDT | 34.1 | 4 | {"c": 4} +(1 row) + +-- ensure that 1 chunk is partially compressed +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------- + 1 | _hyper_7_13_chunk + 1 | _hyper_7_14_chunk + 9 | _hyper_7_15_chunk +(3 rows) + +DROP TABLE sample_table; +-- test for NULL values in SEGMENTBY column +CREATE TABLE sample_table( + time INT, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 9 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time', + timescaledb.compress_segmentby = 'device_id'); +INSERT INTO sample_table VALUES (1, 1, 1), (2, NULL, 1), (3, NULL, 1), (10, NULL, 2), (11, NULL, 2), (11, 1, 2); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_9_19_chunk + _timescaledb_internal._hyper_9_20_chunk +(2 rows) + +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 2 +(1 row) + +-- get rowcount from compressed chunks where device_id IS NULL +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id IS NULL; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id IS NULL; + count +------- + 1 +(1 row) + +-- get total count of SEGMENTBY column with NULL values +SELECT COUNT(*) FROM sample_table WHERE device_id IS NULL; + count +------- + 4 +(1 row) + +-- delete NULL values in SEGMENTBY column +DELETE FROM sample_table WHERE device_id IS NULL; +-- ensure that not all rows are moved to staging area +-- should have few compressed rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks where device_id IS NULL +-- should report 0 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id IS NULL; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id IS NULL; + count +------- + 0 +(1 row) + +-- check chunk compression status after DELETE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------- + 9 | _hyper_9_19_chunk + 9 | _hyper_9_20_chunk +(2 rows) + +DROP TABLE sample_table; +-- test for IS NOT NULL values in SEGMENTBY column +CREATE TABLE sample_table( + time INT, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 11 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time', + timescaledb.compress_segmentby = 'device_id'); +INSERT INTO sample_table VALUES (1, NULL, 1), (2, NULL, 1), (3, NULL, 1), (10, 3, 2), (11, 2, 2), (11, 1, 2); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_11_23_chunk + _timescaledb_internal._hyper_11_24_chunk +(2 rows) + +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +SELECT COUNT(*) FROM sample_table WHERE val = 1234; + count +------- + 0 +(1 row) + +-- UPDATE based on IS NOT NULL condition on SEGMENTBY column +UPDATE sample_table SET val = 1234 WHERE device_id IS NOT NULL; +-- get total count of SEGMENTBY column with NULL values +SELECT COUNT(*) FROM sample_table WHERE val = 1234; + count +------- + 3 +(1 row) + +-- check chunk compression status after DELETE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_11_23_chunk + 9 | _hyper_11_24_chunk +(2 rows) + +DROP TABLE sample_table; +-- test to for <= AND >= on SEGMENTBY column +CREATE TABLE sample_table( + time INT, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 13 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time', + timescaledb.compress_segmentby = 'device_id, val'); +INSERT INTO sample_table VALUES (1, 1, 1), (2, NULL, 1), (3, 4, 1), (10, NULL, 2), (11, NULL, 2), (11, 1, 2), (13, 5, 3); +INSERT INTO sample_table VALUES (4, 3, NULL), (6, NULL, NULL), (12, NULL, NULL), (13, 4, NULL); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_13_27_chunk + _timescaledb_internal._hyper_13_28_chunk +(2 rows) + +-- test will multiple NULL/NOT NULL columns +BEGIN; +-- report 0 row +SELECT * FROM sample_table WHERE device_id IS NULL AND val = 987; + time | device_id | val +------+-----------+----- +(0 rows) + +-- these 3 rows will be affected by below UPDATE +SELECT * FROM sample_table WHERE device_id IS NULL AND val IS NOT NULL ORDER BY 1; + time | device_id | val +------+-----------+----- + 2 | | 1 + 10 | | 2 + 11 | | 2 +(3 rows) + +-- update 3 rows +UPDATE sample_table SET val = 987 WHERE device_id IS NULL AND val IS NOT NULL; +-- report 3 row +SELECT * FROM sample_table WHERE device_id IS NULL AND val = 987; + time | device_id | val +------+-----------+----- + 2 | | 987 + 10 | | 987 + 11 | | 987 +(3 rows) + +ROLLBACK; +-- test will multiple columns +BEGIN; +-- report 2 rows +SELECT * FROM sample_table WHERE device_id IS NULL AND val = 2; + time | device_id | val +------+-----------+----- + 10 | | 2 + 11 | | 2 +(2 rows) + +-- delete 2 rows +DELETE from sample_table WHERE device_id IS NULL AND val = 2; +-- report 0 rows +SELECT * FROM sample_table WHERE device_id IS NULL AND val = 2; + time | device_id | val +------+-----------+----- +(0 rows) + +ROLLBACK; +BEGIN; +-- report 1 row +SELECT * FROM sample_table WHERE device_id = 3 AND val IS NULL; + time | device_id | val +------+-----------+----- + 4 | 3 | +(1 row) + +-- delete 1 rows +DELETE from sample_table WHERE device_id = 3 AND val IS NULL; +-- report 0 rows +SELECT * FROM sample_table WHERE device_id = 3 AND val IS NULL; + time | device_id | val +------+-----------+----- +(0 rows) + +ROLLBACK; +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 5 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 5 +(1 row) + +-- get rowcount from compressed chunks where device_id >= 4 AND val <= 1 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id >= 4 AND val <= 1; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id >= 4 AND val <= 1; + count +------- + 0 +(1 row) + +-- get total count of SEGMENTBY column with device_id >= 4 AND val <= 1 +SELECT COUNT(*) FROM sample_table WHERE device_id >= 4 AND val <= 1; + count +------- + 1 +(1 row) + +-- delete NULL values in SEGMENTBY column +DELETE FROM sample_table WHERE device_id >= 4 AND val <= 1; +-- ensure that not all rows are moved to staging area +-- should have few compressed rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 4 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 5 +(1 row) + +-- get rowcount from compressed chunks where device_id IS NULL +-- should report 0 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id >= 4 AND val <= 1; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id >= 4 AND val <= 1; + count +------- + 0 +(1 row) + +-- check chunk compression status after DELETE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_13_27_chunk + 1 | _hyper_13_28_chunk +(2 rows) + +-- added tests for code coverage +UPDATE sample_table SET time = 21 WHERE (device_id) in ( 30, 51, 72, 53); +UPDATE sample_table SET time = 21 WHERE device_id + 365 = 8765; +DROP TABLE sample_table; +-- test with different physical layout +CREATE TABLE sample_table( + time INT, + device_id INT, + val INT default 8, + a INT default 10, + b INT default 11, + c INT default 12, + d INT, + e INT default 13); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 5); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 15 | public | sample_table | t +(1 row) + +INSERT INTO sample_table (time, device_id, d) VALUES (1, 1, 1), (2, NULL, 1), (3, 4, 1), (10, NULL, 2), (11, NULL, 2), (11, 1, 2), (13, 5, 3); +ALTER TABLE sample_table DROP COLUMN c; +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'device_id, d'); +NOTICE: default order by for hypertable "sample_table" is set to ""time" DESC" +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_15_31_chunk + _timescaledb_internal._hyper_15_32_chunk +(2 rows) + +ALTER TABLE sample_table ADD COLUMN c int default 23; +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_15_31_chunk + 1 | _hyper_15_32_chunk +(2 rows) + +-- get FIRST uncompressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "UNCOMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE '_hyper_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND uncompressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "UNCOMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE '_hyper_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- ensure segment by column index position in compressed and uncompressed +-- chunk is different +SELECT attname, attnum +FROM pg_attribute +WHERE attrelid IN (:'COMPRESS_CHUNK_1'::regclass, :'UNCOMPRESS_CHUNK_1'::regclass) AND attname = 'd' +ORDER BY attnum; + attname | attnum +---------+-------- + d | 4 + d | 7 +(2 rows) + +SELECT attname, attnum +FROM pg_attribute +WHERE attrelid IN (:'COMPRESS_CHUNK_2'::regclass, :'UNCOMPRESS_CHUNK_2'::regclass) AND attname = 'd' +ORDER BY attnum; + attname | attnum +---------+-------- + d | 4 + d | 7 +(2 rows) + +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 3 +(1 row) + +-- get rowcount from compressed chunks where d = 3 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE d = 3; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE d = 3; + count +------- + 1 +(1 row) + +-- get total count of SEGMENTBY column with d = 3 +SELECT COUNT(*) FROM sample_table WHERE d = 3; + count +------- + 1 +(1 row) + +-- delete based on SEGMENTBY column +DELETE FROM sample_table WHERE d = 3; +-- ensure that not all rows are moved to staging area +-- should have few compressed rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 2 +(1 row) + +-- get rowcount from compressed chunks where d = 3 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE d = 3; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE d = 3; + count +------- + 0 +(1 row) + +-- check chunk compression status after DELETE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_15_31_chunk + 1 | _hyper_15_32_chunk +(2 rows) + +-- get rowcount from compressed chunks where device_id IS NULL +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id IS NULL; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id IS NULL; + count +------- + 1 +(1 row) + +BEGIN; +-- report 0 row +SELECT * FROM sample_table WHERE a = 247; + time | device_id | val | a | b | d | e | c +------+-----------+-----+---+---+---+---+--- +(0 rows) + +-- delete 1 row +UPDATE sample_table SET a = 247 WHERE device_id IS NULL; +-- ensure rows are visible +SELECT * FROM sample_table WHERE a = 247; + time | device_id | val | a | b | d | e | c +------+-----------+-----+-----+----+---+----+---- + 2 | | 8 | 247 | 11 | 1 | 13 | 23 + 11 | | 8 | 247 | 11 | 2 | 13 | 23 + 10 | | 8 | 247 | 11 | 2 | 13 | 23 +(3 rows) + +ROLLBACK; +-- report 0 rows +SELECT COUNT(*) FROM sample_table WHERE a = 247; + count +------- + 0 +(1 row) + +-- UPDATE based on NULL values in SEGMENTBY column +UPDATE sample_table SET a = 247 WHERE device_id IS NULL; +-- report 3 rows +SELECT COUNT(*) FROM sample_table WHERE a = 247; + count +------- + 3 +(1 row) + +-- ensure that not all rows are moved to staging area +-- should have few compressed rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks where device_id IS NULL +-- should report 0 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id IS NULL; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id IS NULL; + count +------- + 0 +(1 row) + +DROP TABLE sample_table; +-- test with different physical layout +CREATE TABLE sample_table(time timestamptz, c1 text, c2 text, c3 text); +SELECT create_hypertable('sample_table','time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (17,public,sample_table,t) +(1 row) + +INSERT INTO sample_table SELECT '2000-01-01'; +ALTER TABLE sample_table DROP column c3; +ALTER TABLE sample_table ADD column c4 text; +INSERT INTO sample_table SELECT '2000-01-01', '1', '2', '3'; +ALTER TABLE sample_table SET (timescaledb.compress,timescaledb.compress_segmentby='c4'); +NOTICE: default order by for hypertable "sample_table" is set to ""time" DESC" +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_17_35_chunk +(1 row) + +BEGIN; +-- report 1 row +SELECT * FROM sample_table WHERE c4 IS NULL; + time | c1 | c2 | c4 +------------------------------+----+----+---- + Sat Jan 01 00:00:00 2000 PST | | | +(1 row) + +-- delete 1 row +DELETE FROM sample_table WHERE c4 IS NULL; +-- report 0 rows +SELECT * FROM sample_table WHERE c4 IS NULL; + time | c1 | c2 | c4 +------+----+----+---- +(0 rows) + +ROLLBACK; +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id \gset +-- report 2 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +-- report 1 row +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 IS NULL; + count +------- + 1 +(1 row) + +-- report 1 row +SELECT * FROM sample_table WHERE c4 IS NULL; + time | c1 | c2 | c4 +------------------------------+----+----+---- + Sat Jan 01 00:00:00 2000 PST | | | +(1 row) + +-- delete 1 row +DELETE FROM sample_table WHERE c4 IS NULL; +-- report 0 row +SELECT * FROM sample_table WHERE c4 IS NULL; + time | c1 | c2 | c4 +------+----+----+---- +(0 rows) + +-- report 1 row which ensure that only required row is moved and deleted +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 1 +(1 row) + +-- report 0 row +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 IS NULL; + count +------- + 0 +(1 row) + +DROP TABLE sample_table; +-- test filtering with ORDER BY columns +CREATE TABLE sample_table(time timestamptz, c1 int, c2 int, c3 int, c4 int); +SELECT create_hypertable('sample_table','time',chunk_time_interval=>'1 day'::interval); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (19,public,sample_table,t) +(1 row) + +ALTER TABLE sample_table SET (timescaledb.compress,timescaledb.compress_segmentby='c4', timescaledb.compress_orderby='c1,c2,time'); +INSERT INTO sample_table +SELECT t, c1, c2, c3, c4 +FROM generate_series(:'start_date'::timestamptz - INTERVAL '9 hours', + :'start_date'::timestamptz, + INTERVAL '1 hour') t, + generate_series(0,9,1) c1, + generate_series(0,9,1) c2, + generate_series(0,9,1) c3, + generate_series(0,9,1) c4; +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_19_37_chunk +(1 row) + +-- get FIRST chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE '_hyper_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- check that you uncompress and delete only for exact SEGMENTBY value +SET timescaledb.debug_compression_path_info TO true; +BEGIN; +-- report 10 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 = 5; + count +------- + 10 +(1 row) + +-- report 10k rows +SELECT COUNT(*) FROM sample_table WHERE c4 = 5; + count +------- + 10000 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 = 5 \gset +-- delete 10k rows +DELETE FROM sample_table WHERE c4 = 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 10. Number of compressed rows filtered by heap filters: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 = 5; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunk +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 = 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for less than SEGMENTBY value +BEGIN; +-- report 50 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 < 5; + count +------- + 50 +(1 row) + +-- report 50k rows +SELECT COUNT(*) FROM sample_table WHERE c4 < 5; + count +------- + 50000 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 < 5 \gset +-- delete 50k rows +DELETE FROM sample_table WHERE c4 < 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 50. Number of compressed rows filtered by heap filters: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 < 5; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunk +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 < 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for greater and equal than SEGMENTBY value +BEGIN; +-- report 50 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 >= 5; + count +------- + 50 +(1 row) + +-- report 50k rows +SELECT COUNT(*) FROM sample_table WHERE c4 >= 5; + count +------- + 50000 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 >= 5 \gset +-- delete 50k rows +DELETE FROM sample_table WHERE c4 >= 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 50. Number of compressed rows filtered by heap filters: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 >= 5; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunk +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 >= 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for exact ORDERBY value +-- this will uncompress segments which have min <= value and max >= value +BEGIN; +-- report 10k rows +SELECT COUNT(*) FROM sample_table WHERE c2 = 3; + count +------- + 10000 +(1 row) + +-- report 100 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_2 <= 3 and _ts_meta_max_2 >= 3; + count +------- + 100 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c2 = 3 \gset +-- delete 10k rows +DELETE FROM sample_table WHERE c2 = 3; +INFO: Number of compressed rows fetched from table scan: 100. Number of compressed rows filtered: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c2 = 3; + count +------- + 0 +(1 row) + +-- report 90k rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 90000 +(1 row) + +-- report 0 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_2 <= 3 and _ts_meta_max_2 >= 3; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for less then ORDERBY value +-- this will uncompress segments which have min < value +BEGIN; +-- report 20k rows +SELECT COUNT(*) FROM sample_table WHERE c1 < 2; + count +------- + 20000 +(1 row) + +-- report 20 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_max_1 < 2; + count +------- + 20 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c1 < 2 \gset +-- delete 20k rows +DELETE FROM sample_table WHERE c1 < 2; +INFO: Number of compressed rows fetched from table scan: 20. Number of compressed rows filtered: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c1 < 2; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunk +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_max_1 < 2; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for greater or equal then ORDERBY value +-- this will uncompress segments which have max >= value +BEGIN; +-- report 30k rows +SELECT COUNT(*) FROM sample_table WHERE c1 >= 7; + count +------- + 30000 +(1 row) + +-- report 30 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_1 >= 7; + count +------- + 30 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c1 >= 7 \gset +-- delete 30k rows +DELETE FROM sample_table WHERE c1 >= 7; +INFO: Number of compressed rows fetched from table scan: 30. Number of compressed rows filtered: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c1 >= 7; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_1 >= 7; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only tuples which satisfy SEGMENTBY +-- and ORDERBY qualifiers, segments only contain one distinct value for +-- these qualifiers, everything should be deleted that was decompressed +BEGIN; +-- report 1k rows +SELECT COUNT(*) FROM sample_table WHERE c4 = 5 and c1 = 5; + count +------- + 1000 +(1 row) + +-- report 1 row in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 5 AND _ts_meta_min_1 <= 5 and _ts_meta_max_1 >= 5; + count +------- + 1 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 = 5 and c1 = 5 \gset +-- delete 1k rows +DELETE FROM sample_table WHERE c4 = 5 and c1 = 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 10. Number of compressed rows filtered by heap filters: 9. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 = 5 and c1 = 5; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 5 AND _ts_meta_min_1 <= 5 and _ts_meta_max_1 >= 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only tuples which satisfy SEGMENTBY +-- and ORDERBY qualifiers, segments contain more than one distinct value for +-- these qualifiers, not everything should be deleted that was decompressed +BEGIN; +-- report 4k rows +SELECT COUNT(*) FROM sample_table WHERE c4 > 5 and c2 = 5; + count +------- + 4000 +(1 row) + +-- report 40 rows in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 > 5 AND _ts_meta_min_2 <= 5 and _ts_meta_max_2 >= 5; + count +------- + 40 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 > 5 and c2 = 5 \gset +-- delete 4k rows +DELETE FROM sample_table WHERE c4 > 5 and c2 = 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 40. Number of compressed rows filtered by heap filters: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 > 5 and c2 = 5; + count +------- + 0 +(1 row) + +-- report 36k rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 36000 +(1 row) + +-- report 0 rows in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 > 5 AND _ts_meta_min_2 <= 5 and _ts_meta_max_2 >= 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only tuples which satisfy SEGMENTBY +-- and ORDERBY qualifiers. +-- no: of rows satisfying SEGMENTBY qualifiers is 10 +-- no: of rows satisfying ORDERBY qualifiers is 3 +-- Once both qualifiers are applied ensure that only 7 rows are present in +-- compressed chunk +BEGIN; +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 10 compressed rows for given condition c4 = 4 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 4; + count +------- + 10 +(1 row) + +-- report 3 compressed rows for given condition c4 = 4 and c1 >= 7 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 4 AND _ts_meta_max_1 >= 7; + count +------- + 3 +(1 row) + +SELECT COUNT(*) AS "total_rows" FROM :COMPRESS_CHUNK_1 WHERE c4 = 4 \gset +SELECT COUNT(*) AS "total_affected_rows" FROM :COMPRESS_CHUNK_1 WHERE c4 = 4 AND _ts_meta_max_1 >= 7 \gset +UPDATE sample_table SET c3 = c3 + 0 WHERE c4 = 4 AND c1 >= 7; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 10. Number of compressed rows filtered by heap filters: 7. +-- report 7 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 4; + count +------- + 7 +(1 row) + +-- ensure correct number of rows are moved from compressed chunk +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM :COMPRESS_CHUNK_1 WHERE c4 = 4; + ?column? +---------- + t +(1 row) + +ROLLBACK; +RESET timescaledb.debug_compression_path_info; +--github issue: 5640 +CREATE TABLE tab1(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); +CREATE INDEX ON tab1(time); +CREATE INDEX ON tab1(device_id,time); +SELECT create_hypertable('tab1','time',create_default_indexes:=false); + create_hypertable +-------------------- + (21,public,tab1,t) +(1 row) + +ALTER TABLE tab1 DROP COLUMN filler_1; +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','57m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ALTER TABLE tab1 DROP COLUMN filler_2; +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id-1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','58m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ALTER TABLE tab1 DROP COLUMN filler_3; +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','59m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ANALYZE tab1; +-- compress chunks +ALTER TABLE tab1 SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id'); +SELECT compress_chunk(show_chunks('tab1')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_21_39_chunk + _timescaledb_internal._hyper_21_40_chunk + _timescaledb_internal._hyper_21_41_chunk +(3 rows) + +-- ensure there is an index scan generated for below DELETE query +BEGIN; +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 472 +(1 row) + +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 1000, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id); +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 4070 +(1 row) + +ANALYZE tab1; +EXPLAIN (costs off) DELETE FROM public.tab1 WHERE public.tab1.device_id = 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) + -> Delete on tab1 + Delete on _hyper_21_39_chunk tab1_1 + Delete on _hyper_21_40_chunk tab1_2 + Delete on _hyper_21_41_chunk tab1_3 + -> Append + -> Index Scan using _hyper_21_39_chunk_tab1_device_id_time_idx on _hyper_21_39_chunk tab1_1 + Index Cond: (device_id = 1) + -> Seq Scan on _hyper_21_40_chunk tab1_2 + Filter: (device_id = 1) + -> Seq Scan on _hyper_21_41_chunk tab1_3 + Filter: (device_id = 1) +(12 rows) + +DELETE FROM tab1 WHERE tab1.device_id = 1; +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 0 +(1 row) + +ROLLBACK; +-- github issue 5658 +-- verify that bitmap heap scans work on all the correct data and +-- none of it left over after the dml command +BEGIN; +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 472 +(1 row) + +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 1000, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id); +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 4070 +(1 row) + +ANALYZE tab1; +SET enable_seqscan = off; +SET enable_indexscan = off; +EXPLAIN (costs off) DELETE FROM tab1 WHERE tab1.device_id = 1; + QUERY PLAN +----------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on tab1 + Delete on _hyper_21_39_chunk tab1_1 + Delete on _hyper_21_40_chunk tab1_2 + Delete on _hyper_21_41_chunk tab1_3 + -> Append + -> Bitmap Heap Scan on _hyper_21_39_chunk tab1_1 + Recheck Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_21_39_chunk_tab1_device_id_time_idx + Index Cond: (device_id = 1) + -> Seq Scan on _hyper_21_40_chunk tab1_2 + Filter: (device_id = 1) + -> Seq Scan on _hyper_21_41_chunk tab1_3 + Filter: (device_id = 1) +(14 rows) + +DELETE FROM tab1 WHERE tab1.device_id = 1; +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 0 +(1 row) + +ROLLBACK; +-- create hypertable with space partitioning and compression +CREATE TABLE tab2(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); +CREATE INDEX ON tab2(time); +CREATE INDEX ON tab2(device_id,time); +SELECT create_hypertable('tab2','time','device_id',3,create_default_indexes:=false); + create_hypertable +-------------------- + (23,public,tab2,t) +(1 row) + +ALTER TABLE tab2 DROP COLUMN filler_1; +INSERT INTO tab2(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','35m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ALTER TABLE tab2 DROP COLUMN filler_2; +INSERT INTO tab2(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','45m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ALTER TABLE tab2 DROP COLUMN filler_3; +INSERT INTO tab2(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','55m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ANALYZE tab2; +-- compress chunks +ALTER TABLE tab2 SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id'); +SELECT compress_chunk(show_chunks('tab2')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_23_45_chunk + _timescaledb_internal._hyper_23_46_chunk + _timescaledb_internal._hyper_23_47_chunk +(3 rows) + +-- below test will cause chunks of tab2 to get decompressed +-- without fix for issue #5460 +SET timescaledb.enable_optimizations = OFF; +BEGIN; +DELETE FROM tab1 t1 USING tab2 t2 WHERE t1.device_id = t2.device_id AND t2.time > '2000-01-01'; +ROLLBACK; +--cleanup +RESET timescaledb.enable_optimizations; +DROP table tab1; +DROP table tab2; +-- test joins with UPDATE/DELETE on compression chunks +CREATE TABLE join_test1(time timestamptz NOT NULL,device text, value float); +CREATE TABLE join_test2(time timestamptz NOT NULL,device text, value float); +CREATE VIEW chunk_status AS SELECT ht.table_name AS hypertable, ch.table_name AS chunk,ch.status from _timescaledb_catalog.chunk ch INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id=ch.hypertable_id AND ht.table_name IN ('join_test1','join_test2') ORDER BY ht.id, ch.id; +SELECT table_name FROM create_hypertable('join_test1', 'time'); + table_name +------------ + join_test1 +(1 row) + +SELECT table_name FROM create_hypertable('join_test2', 'time'); + table_name +------------ + join_test2 +(1 row) + +ALTER TABLE join_test1 SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "join_test1" is set to ""time" DESC" +ALTER TABLE join_test2 SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "join_test2" is set to ""time" DESC" +INSERT INTO join_test1 VALUES ('2000-01-01','d1',0.1), ('2000-02-01','d1',0.1), ('2000-03-01','d1',0.1); +INSERT INTO join_test2 VALUES ('2000-02-01','d1',0.1), ('2000-02-01','d2',0.1), ('2000-02-01','d3',0.1); +SELECT compress_chunk(show_chunks('join_test1')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_25_51_chunk + _timescaledb_internal._hyper_25_52_chunk + _timescaledb_internal._hyper_25_53_chunk +(3 rows) + +SELECT compress_chunk(show_chunks('join_test2')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_26_54_chunk +(1 row) + +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 1 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +BEGIN; +DELETE FROM join_test1 USING join_test2; +-- only join_test1 chunks should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 9 + join_test1 | _hyper_25_53_chunk | 9 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +DELETE FROM join_test2 USING join_test1; +-- only join_test2 chunks should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 1 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 9 +(4 rows) + +ROLLBACK; +BEGIN; +DELETE FROM join_test1 t1 USING join_test1 t2 WHERE t1.time = '2000-01-01'; +-- only first chunk of join_test1 should have status change +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +DELETE FROM join_test1 t1 USING join_test1 t2 WHERE t2.time = '2000-01-01'; +-- all chunks of join_test1 should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 9 + join_test1 | _hyper_25_53_chunk | 9 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +UPDATE join_test1 t1 SET value = t1.value + 1 FROM join_test2 t2; +-- only join_test1 chunks should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 9 + join_test1 | _hyper_25_53_chunk | 9 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +UPDATE join_test2 t1 SET value = t1.value + 1 FROM join_test1 t2; +-- only join_test2 chunks should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 1 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 9 +(4 rows) + +ROLLBACK; +BEGIN; +UPDATE join_test1 t1 SET value = t1.value + 1 FROM join_test1 t2 WHERE t1.time = '2000-01-01'; +-- only first chunk of join_test1 should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +UPDATE join_test1 t1 SET value = t1.value + 1 FROM join_test1 t2 WHERE t2.time = '2000-01-01'; +-- all chunks of join_test1 should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 9 + join_test1 | _hyper_25_53_chunk | 9 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +DROP TABLE join_test1; +DROP TABLE join_test2; +-- test if index scan qualifiers are properly used +CREATE TABLE index_scan_test(time timestamptz NOT NULL, device_id int, value float); +SELECT create_hypertable('index_scan_test','time',create_default_indexes:=false); + create_hypertable +------------------------------- + (29,public,index_scan_test,t) +(1 row) + +INSERT INTO index_scan_test(time,device_id,value) SELECT time, device_id, device_id + 0.5 FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-01 23:55:00+0','1m') gtime(time), generate_series(1,5,1) gdevice(device_id); +-- compress chunks +ALTER TABLE index_scan_test SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id'); +SELECT compress_chunk(show_chunks('index_scan_test')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_29_59_chunk +(1 row) + +ANALYZE index_scan_test; +SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ht.table_name = 'index_scan_test' +AND ch1.hypertable_id = ht.id +AND ch1.table_name LIKE '_hyper%' +ORDER BY ch1.id LIMIT 1 \gset +SELECT ch2.schema_name|| '.' || ch2.table_name AS "COMP_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.chunk ch2, _timescaledb_catalog.hypertable ht +WHERE ht.table_name = 'index_scan_test' +AND ch1.hypertable_id = ht.id +AND ch1.compressed_chunk_id = ch2.id +ORDER BY ch2.id LIMIT 1 \gset +INSERT INTO index_scan_test(time,device_id,value) SELECT time, device_id, device_id + 0.5 FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','1m') gtime(time), generate_series(1,5,1) gdevice(device_id); +-- test index on single column +BEGIN; +SELECT count(*) as "UNCOMP_LEFTOVER" FROM ONLY :CHUNK_1 WHERE device_id != 2 \gset +CREATE INDEX ON index_scan_test(device_id); +EXPLAIN (costs off, verbose) DELETE FROM index_scan_test WHERE device_id = 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on public.index_scan_test + Delete on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + -> Index Scan using _hyper_29_59_chunk_index_scan_test_device_id_idx on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + Output: index_scan_test_1.tableoid, index_scan_test_1.ctid + Index Cond: (index_scan_test_1.device_id = 2) +(6 rows) + +DELETE FROM index_scan_test WHERE device_id = 2; +-- everything should be deleted +SELECT count(*) FROM index_scan_test where device_id = 2; + count +------- + 0 +(1 row) + +-- there shouldn't be anything in the uncompressed chunk where device_id = 2 +SELECT count(*) = :UNCOMP_LEFTOVER FROM ONLY :CHUNK_1; + ?column? +---------- + t +(1 row) + +-- there shouldn't be anything in the compressed chunk from device_id = 2 +SELECT count(*) FROM :COMP_CHUNK_1 where device_id = 2; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test multi column index +BEGIN; +SELECT count(*) as "UNCOMP_LEFTOVER" FROM ONLY :CHUNK_1 WHERE device_id != 2 OR time <= '2000-01-02'::timestamptz \gset +CREATE INDEX ON index_scan_test(device_id, time); +EXPLAIN (costs off, verbose) DELETE FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on public.index_scan_test + Delete on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + -> Index Scan using _hyper_29_59_chunk_index_scan_test_device_id_time_idx on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + Output: index_scan_test_1.tableoid, index_scan_test_1.ctid + Index Cond: ((index_scan_test_1.device_id = 2) AND (index_scan_test_1."time" > 'Sun Jan 02 00:00:00 2000 PST'::timestamp with time zone)) +(6 rows) + +DELETE FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; +-- everything should be deleted +SELECT count(*) FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; + count +------- + 0 +(1 row) + +-- there shouldn't be anything in the uncompressed chunk that matches predicates +SELECT count(*) = :UNCOMP_LEFTOVER FROM ONLY :CHUNK_1; + ?column? +---------- + t +(1 row) + +-- there shouldn't be anything in the compressed chunk that matches predicates +SELECT count(*) FROM :COMP_CHUNK_1 WHERE device_id = 2 AND _ts_meta_max_1 >= '2000-01-02'::timestamptz; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test index with filter condition +BEGIN; +SELECT count(*) as "UNCOMP_LEFTOVER" FROM ONLY :CHUNK_1 WHERE device_id != 2 OR time <= '2000-01-02'::timestamptz \gset +CREATE INDEX ON index_scan_test(device_id); +EXPLAIN (costs off, verbose) DELETE FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on public.index_scan_test + Delete on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + -> Index Scan using _hyper_29_59_chunk_index_scan_test_device_id_idx on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + Output: index_scan_test_1.tableoid, index_scan_test_1.ctid + Index Cond: (index_scan_test_1.device_id = 2) + Filter: (index_scan_test_1."time" > 'Sun Jan 02 00:00:00 2000 PST'::timestamp with time zone) +(7 rows) + +DELETE FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; +-- everything should be deleted +SELECT count(*) FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; + count +------- + 0 +(1 row) + +-- there shouldn't be anything in the uncompressed chunk that matches predicates +SELECT count(*) = :UNCOMP_LEFTOVER FROM ONLY :CHUNK_1; + ?column? +---------- + t +(1 row) + +-- there shouldn't be anything in the compressed chunk that matches predicates +SELECT count(*) FROM :COMP_CHUNK_1 WHERE device_id = 2 AND _ts_meta_max_1 >= '2000-01-02'::timestamptz; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test for disabling DML decompression +SHOW timescaledb.enable_dml_decompression; + timescaledb.enable_dml_decompression +-------------------------------------- + on +(1 row) + +SET timescaledb.enable_dml_decompression = false; +\set ON_ERROR_STOP 0 +-- should ERROR both UPDATE/DELETE statements because the DML decompression is disabled +UPDATE sample_table SET c3 = NULL WHERE c4 = 5; +ERROR: UPDATE/DELETE is disabled on compressed chunks +DELETE FROM sample_table WHERE c4 = 5; +ERROR: UPDATE/DELETE is disabled on compressed chunks +\set ON_ERROR_STOP 1 +-- make sure reseting the GUC we will be able to UPDATE/DELETE compressed chunks +RESET timescaledb.enable_dml_decompression; +SHOW timescaledb.enable_dml_decompression; + timescaledb.enable_dml_decompression +-------------------------------------- + on +(1 row) + +BEGIN; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 = 5 AND c3 IS NULL; + count +------- + 0 +(1 row) + +UPDATE sample_table SET c3 = NULL WHERE c4 = 5; +-- report 10k rows +SELECT count(*) FROM sample_table WHERE c4 = 5 AND c3 IS NULL; + count +------- + 10000 +(1 row) + +ROLLBACK; +BEGIN; +-- report 10k rows +SELECT count(*) FROM sample_table WHERE c4 = 5; + count +------- + 10000 +(1 row) + +DELETE FROM sample_table WHERE c4 = 5; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 = 5; + count +------- + 0 +(1 row) + +ROLLBACK; +-- create new uncompressed chunk +INSERT INTO sample_table +SELECT t, 1, 1, 1, 1 +FROM generate_series('2023-05-04 00:00:00-00'::timestamptz, + '2023-05-04 00:00:00-00'::timestamptz + INTERVAL '2 hours', + INTERVAL '1 hour') t; +-- check chunk compression status +SELECT chunk_name, is_compressed +FROM timescaledb_information.chunks +WHERE hypertable_name = 'sample_table' +ORDER BY chunk_name; + chunk_name | is_compressed +--------------------+--------------- + _hyper_19_37_chunk | t + _hyper_19_61_chunk | f +(2 rows) + +-- test for uncompressed and compressed chunks +SHOW timescaledb.enable_dml_decompression; + timescaledb.enable_dml_decompression +-------------------------------------- + on +(1 row) + +SET timescaledb.enable_dml_decompression = false; +BEGIN; +-- report 3 rows +SELECT count(*) FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz; + count +------- + 3 +(1 row) + +-- delete from uncompressed chunk should work +DELETE FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz; + count +------- + 0 +(1 row) + +ROLLBACK; +BEGIN; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz AND c3 IS NULL; + count +------- + 0 +(1 row) + +UPDATE sample_table SET c3 = NULL WHERE time >= '2023-05-04 00:00:00-00'::timestamptz; +-- report 3 rows +SELECT count(*) FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz AND c3 IS NULL; + count +------- + 3 +(1 row) + +ROLLBACK; +\set ON_ERROR_STOP 0 +-- should ERROR both UPDATE/DELETE statements because the DML decompression is disabled +-- and both statements we're touching compressed and uncompressed chunks +UPDATE sample_table SET c3 = NULL WHERE time >= '2023-03-17 00:00:00-00'::timestamptz AND c3 IS NULL; +ERROR: UPDATE/DELETE is disabled on compressed chunks +DELETE FROM sample_table WHERE time >= '2023-03-17 00:00:00-00'::timestamptz; +ERROR: UPDATE/DELETE is disabled on compressed chunks +\set ON_ERROR_STOP 1 +--github issue: 5586 +--testcase with multiple indexes +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE DATABASE test5586; +\c test5586 :ROLE_SUPERUSER +SET client_min_messages = ERROR; +CREATE EXTENSION timescaledb CASCADE; +CREATE TABLE tab1(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); +SELECT create_hypertable('tab1','time',create_default_indexes:=false); + create_hypertable +------------------- + (1,public,tab1,t) +(1 row) + +INSERT INTO tab1(filler_1, filler_2, filler_3,time,device_id,v0,v1,v2,v3) SELECT device_id, device_id+1, device_id + 2, time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id); +ALTER TABLE tab1 SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id, filler_1, filler_2, filler_3'); +SELECT compress_chunk(show_chunks('tab1')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT format('%I.%I', schema_name, table_name) AS "CHUNK" FROM _timescaledb_catalog.chunk WHERE hypertable_id = 2 \gset +-- create multiple indexes on compressed hypertable +DROP INDEX _timescaledb_internal.compress_hyper_2_2_chunk_device_id_filler_1_filler_2_filler_idx; +CREATE INDEX ON :CHUNK (_ts_meta_min_1); +CREATE INDEX ON :CHUNK (_ts_meta_min_1, _ts_meta_sequence_num); +CREATE INDEX ON :CHUNK (_ts_meta_min_1, _ts_meta_max_1, filler_1); +CREATE INDEX filler_1 ON :CHUNK (filler_1); +CREATE INDEX filler_2 ON :CHUNK (filler_2); +CREATE INDEX filler_3 ON :CHUNK (filler_3); +-- below indexes should be selected +CREATE INDEX filler_1_filler_2 ON :CHUNK (filler_1, filler_2); +CREATE INDEX filler_2_filler_3 ON :CHUNK (filler_2, filler_3); +set timescaledb.debug_compression_path_info to on; +BEGIN; +SELECT COUNT(*) FROM tab1 WHERE filler_3 = 5 AND filler_2 = 4; + count +------- + 3598 +(1 row) + +UPDATE tab1 SET v0 = v1 + v2 WHERE filler_3 = 5 AND filler_2 = 4; +INFO: Index "filler_2_filler_3" is used for scan. +INFO: Number of compressed rows fetched from index: 4. Number of compressed rows filtered by heap filters: 0. +ROLLBACK; +BEGIN; +SELECT COUNT(*) FROM tab1 WHERE filler_1 < 5 AND filler_2 = 4; + count +------- + 3598 +(1 row) + +UPDATE tab1 SET v0 = v1 + v2 WHERE filler_1 < 5 AND filler_2 = 4; +INFO: Index "filler_1_filler_2" is used for scan. +INFO: Number of compressed rows fetched from index: 4. Number of compressed rows filtered by heap filters: 0. +ROLLBACK; +-- idealy filler_1 index should be selected, +-- instead first matching index is selected +BEGIN; +SELECT COUNT(*) FROM tab1 WHERE filler_1 < 5; + count +------- + 14392 +(1 row) + +UPDATE tab1 SET v0 = v1 + v2 WHERE filler_1 < 5; +INFO: Index "compress_hyper_2_2_chunk__ts_meta_min_1__ts_meta_max_1_fill_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 16. Number of compressed rows filtered by heap filters: 0. +ROLLBACK; +RESET timescaledb.debug_compression_path_info; +DROP TABLE tab1; +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP DATABASE test5586; +--issue: #6024 +CREATE TABLE t(a integer, b integer); +SELECT create_hypertable('t', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" + create_hypertable +------------------- + (31,public,t,t) +(1 row) + +INSERT INTO t values(1, 2); +ALTER TABLE t SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "t" is set to "" +NOTICE: default order by for hypertable "t" is set to "a DESC" +SELECT compress_chunk(show_chunks('t')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_31_62_chunk +(1 row) + +-- should not crash +UPDATE t SET b = 2 WHERE tableoid = 0; +UPDATE t SET b = 2 WHERE tableoid is null; +DROP TABLE t; +-- github issue: 6367 +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE DATABASE test6367; +\c test6367 :ROLE_SUPERUSER +SET client_min_messages = ERROR; +CREATE EXTENSION timescaledb CASCADE; +CREATE TABLE t6367 ( + time timestamptz NOT NULL, + source_id varchar(64) NOT NULL, + label varchar, + data jsonb +); +SELECT table_name FROM create_hypertable('t6367', 'time'); + table_name +------------ + t6367 +(1 row) + +ALTER TABLE t6367 SET(timescaledb.compress, timescaledb.compress_segmentby = 'source_id, label', timescaledb.compress_orderby = 'time'); +INSERT INTO t6367 +SELECT time, source_id, label, '{}' AS data +FROM +generate_series('1990-01-01'::timestamptz, '1990-01-10'::timestamptz, INTERVAL '1 day') AS g1(time), +generate_series(1, 3, 1 ) AS g2(source_id), +generate_series(1, 3, 1 ) AS g3(label); +SELECT compress_chunk(c) FROM show_chunks('t6367') c; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk +(2 rows) + +SELECT format('%I.%I', schema_name, table_name) AS "CHUNK1" FROM _timescaledb_catalog.chunk WHERE hypertable_id = 2 ORDER BY id LIMIT 1 \gset +SELECT format('%I.%I', schema_name, table_name) AS "CHUNK2" FROM _timescaledb_catalog.chunk WHERE hypertable_id = 2 ORDER BY id LIMIT 1 OFFSET 1 \gset +DROP INDEX _timescaledb_internal.compress_hyper_2_3_chunk_source_id_label__ts_meta_sequence__idx; +DROP INDEX _timescaledb_internal.compress_hyper_2_4_chunk_source_id_label__ts_meta_sequence__idx; +-- testcase with no index, should use seq scan +set timescaledb.debug_compression_path_info to on; +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test case with an index which has only one +-- of the segmentby filters +CREATE INDEX source_id_idx1 ON :CHUNK1 (source_id); +CREATE INDEX source_id_idx2 ON :CHUNK2 (source_id); +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Index "source_id_idx1" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 2. +INFO: Index "source_id_idx2" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 2. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test that we are filtering NULL checks +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label IS NULL; + count +------- + 0 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label IS NULL; +INFO: Index "source_id_idx1" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 3. +INFO: Index "source_id_idx2" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 3. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label IS NULL; + count +------- + 0 +(1 row) + +ROLLBACK; +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label IS NOT NULL; + count +------- + 30 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label IS NOT NULL; +INFO: Index "source_id_idx1" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 0. +INFO: Index "source_id_idx2" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label IS NOT NULL; + count +------- + 0 +(1 row) + +ROLLBACK; +DROP INDEX _timescaledb_internal.source_id_idx1; +DROP INDEX _timescaledb_internal.source_id_idx2; +-- test case with an index which has multiple same column +CREATE INDEX source_id_source_id_idx ON :CHUNK1 (source_id, source_id); +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Index "source_id_source_id_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 2. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +DROP INDEX _timescaledb_internal.source_id_source_id_idx; +-- test using a non-btree index +-- fallback to heap scan +CREATE INDEX brin_source_id_idx ON :CHUNK1 USING brin (source_id); +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +DROP INDEX _timescaledb_internal.brin_source_id_idx; +-- test using an expression index +-- should fallback to heap scans +CREATE INDEX expr_source_id_idx ON :CHUNK1 (upper(source_id)); +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +DROP INDEX _timescaledb_internal.expr_source_id_idx; +-- test using a partial index +-- should fallback to heap scans +CREATE INDEX partial_source_id_idx ON :CHUNK1 (source_id) +WHERE _ts_meta_min_1 > '1990-01-01'::timestamptz; +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +RESET timescaledb.debug_compression_path_info; +DROP TABLE t6367; +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP DATABASE test6367; +-- Text limitting decompressed tuple during an UPDATE or DELETE +CREATE TABLE test_limit ( + timestamp int not null, + id bigint +); +SELECT * FROM create_hypertable('test_limit', 'timestamp', chunk_time_interval=>10000); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 33 | public | test_limit | t +(1 row) + +INSERT INTO test_limit SELECT t, i FROM generate_series(1,10000,1) t CROSS JOIN generate_series(1,3,1) i; +ALTER TABLE test_limit SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'timestamp' +); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_limit" is set to "" +SELECT count(compress_chunk(ch)) FROM show_chunks('test_limit') ch; + count +------- + 2 +(1 row) + +SET timescaledb.max_tuples_decompressed_per_dml_transaction = 5000; +\set VERBOSITY default +\set ON_ERROR_STOP 0 +-- Updating or deleting everything will break the set limit. +UPDATE test_limit SET id = 0; +ERROR: tuple decompression limit exceeded by operation +DETAIL: current limit: 5000, tuples decompressed: 30000 +HINT: Consider increasing timescaledb.max_tuples_decompressed_per_dml_transaction or set to 0 (unlimited). +DELETE FROM test_limit WHERE id > 0; +ERROR: tuple decompression limit exceeded by operation +DETAIL: current limit: 5000, tuples decompressed: 30000 +HINT: Consider increasing timescaledb.max_tuples_decompressed_per_dml_transaction or set to 0 (unlimited). +-- Setting to 0 should remove the limit. +SET timescaledb.max_tuples_decompressed_per_dml_transaction = 0; +UPDATE test_limit SET id = 0; +DELETE FROM test_limit WHERE id > 0; +\set ON_ERROR_STOP 1 +DROP TABLE test_limit; +-- check partial compression with DML +CREATE TABLE test_partials (time timestamptz NOT NULL, a int, b int); +SELECT create_hypertable('test_partials', 'time'); + create_hypertable +----------------------------- + (35,public,test_partials,t) +(1 row) + +INSERT INTO test_partials +VALUES -- chunk1 + ('2020-01-01 00:00'::timestamptz, 1, 2), + ('2020-01-01 00:01'::timestamptz, 2, 2), + ('2020-01-01 00:04'::timestamptz, 1, 2), + -- chunk2 + ('2021-01-01 00:00'::timestamptz, 1, 2), + ('2021-01-01 00:04'::timestamptz, 1, 2), + -- chunk3 + ('2022-01-01 00:00'::timestamptz, 1, 2), + ('2022-01-01 00:04'::timestamptz, 1, 2); +-- enable compression, compress all chunks +ALTER TABLE test_partials SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_partials" is set to "" +NOTICE: default order by for hypertable "test_partials" is set to ""time" DESC" +SELECT compress_chunk(show_chunks('test_partials')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_68_chunk + _timescaledb_internal._hyper_35_69_chunk + _timescaledb_internal._hyper_35_70_chunk +(3 rows) + +-- fully compressed +EXPLAIN (costs off) SELECT * FROM test_partials ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_partials + Order: test_partials."time" + -> Custom Scan (DecompressChunk) on _hyper_35_68_chunk + -> Sort + Sort Key: compress_hyper_36_71_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_71_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_69_chunk + -> Sort + Sort Key: compress_hyper_36_72_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_72_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_70_chunk + -> Sort + Sort Key: compress_hyper_36_73_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_73_chunk +(14 rows) + +-- verify correct results +SELECT * FROM test_partials ORDER BY time; + time | a | b +------------------------------+---+--- + Wed Jan 01 00:00:00 2020 PST | 1 | 2 + Wed Jan 01 00:01:00 2020 PST | 2 | 2 + Wed Jan 01 00:04:00 2020 PST | 1 | 2 + Fri Jan 01 00:00:00 2021 PST | 1 | 2 + Fri Jan 01 00:04:00 2021 PST | 1 | 2 + Sat Jan 01 00:00:00 2022 PST | 1 | 2 + Sat Jan 01 00:04:00 2022 PST | 1 | 2 +(7 rows) + +-- check that DML causes transparent decompression and that +-- data gets shifted to the uncompressed parts +EXPLAIN (costs off) DELETE FROM test_partials WHERE time >= ALL(SELECT time from test_partials); + QUERY PLAN +------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on test_partials + Delete on _hyper_35_68_chunk test_partials_1 + Delete on _hyper_35_69_chunk test_partials_2 + Delete on _hyper_35_70_chunk test_partials_3 + -> Custom Scan (ChunkAppend) on test_partials + -> Seq Scan on _hyper_35_68_chunk test_partials_1 + Filter: (SubPlan 1) + SubPlan 1 + -> Materialize + -> Append + -> Custom Scan (DecompressChunk) on _hyper_35_68_chunk + -> Seq Scan on compress_hyper_36_71_chunk + -> Seq Scan on _hyper_35_68_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_69_chunk + -> Seq Scan on compress_hyper_36_72_chunk + -> Seq Scan on _hyper_35_69_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_70_chunk + -> Seq Scan on compress_hyper_36_73_chunk + -> Seq Scan on _hyper_35_70_chunk + -> Seq Scan on _hyper_35_69_chunk test_partials_2 + Filter: (SubPlan 1) + -> Seq Scan on _hyper_35_70_chunk test_partials_3 + Filter: (SubPlan 1) +(24 rows) + +DELETE FROM test_partials WHERE time >= ALL(SELECT time from test_partials); +-- All 3 chunks will now become partially compressed chunks +EXPLAIN (costs off) SELECT * FROM test_partials ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_partials + Order: test_partials."time" + -> Merge Append + Sort Key: _hyper_35_68_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_35_68_chunk + -> Sort + Sort Key: compress_hyper_36_71_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_71_chunk + -> Sort + Sort Key: _hyper_35_68_chunk."time" + -> Seq Scan on _hyper_35_68_chunk + -> Merge Append + Sort Key: _hyper_35_69_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_35_69_chunk + -> Sort + Sort Key: compress_hyper_36_72_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_72_chunk + -> Sort + Sort Key: _hyper_35_69_chunk."time" + -> Seq Scan on _hyper_35_69_chunk + -> Merge Append + Sort Key: _hyper_35_70_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_35_70_chunk + -> Sort + Sort Key: compress_hyper_36_73_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_73_chunk + -> Sort + Sort Key: _hyper_35_70_chunk."time" + -> Seq Scan on _hyper_35_70_chunk +(29 rows) + +-- verify correct results +SELECT * FROM test_partials ORDER BY time; + time | a | b +------------------------------+---+--- + Wed Jan 01 00:00:00 2020 PST | 1 | 2 + Wed Jan 01 00:01:00 2020 PST | 2 | 2 + Wed Jan 01 00:04:00 2020 PST | 1 | 2 + Fri Jan 01 00:00:00 2021 PST | 1 | 2 + Fri Jan 01 00:04:00 2021 PST | 1 | 2 + Sat Jan 01 00:00:00 2022 PST | 1 | 2 +(6 rows) + +SELECT compress_chunk(show_chunks('test_partials')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_68_chunk + _timescaledb_internal._hyper_35_69_chunk + _timescaledb_internal._hyper_35_70_chunk +(3 rows) + +-- fully compressed +EXPLAIN (costs off) SELECT * FROM test_partials ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_partials + Order: test_partials."time" + -> Custom Scan (DecompressChunk) on _hyper_35_68_chunk + -> Sort + Sort Key: compress_hyper_36_74_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_74_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_69_chunk + -> Sort + Sort Key: compress_hyper_36_75_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_75_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_70_chunk + -> Sort + Sort Key: compress_hyper_36_76_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_76_chunk +(14 rows) + +DROP TABLE test_partials; +CREATE TABLE test_meta_filters(time timestamptz NOT NULL, device text, metric text, v1 float, v2 float); +CREATE INDEX ON test_meta_filters(device, metric, v1); +SELECT create_hypertable('test_meta_filters', 'time'); + create_hypertable +--------------------------------- + (37,public,test_meta_filters,t) +(1 row) + +ALTER TABLE test_meta_filters SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby='metric,time'); +INSERT INTO test_meta_filters SELECT '2020-01-01'::timestamptz,'d1','m' || metric::text,v1,v2 FROM generate_series(1,3,1) metric, generate_series(1,1000,1) v1, generate_series(1,10,1) v2 ORDER BY 1,2,3,4,5; +SELECT compress_chunk(show_chunks('test_meta_filters')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_37_77_chunk +(1 row) + +EXPLAIN (analyze, timing off, costs off, summary off) DELETE FROM test_meta_filters WHERE device = 'd1' AND metric = 'm1' AND v1 < 100; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1000 + -> Delete on test_meta_filters (actual rows=0 loops=1) + Delete on _hyper_37_77_chunk test_meta_filters_1 + -> Seq Scan on _hyper_37_77_chunk test_meta_filters_1 (actual rows=990 loops=1) + Filter: ((v1 < '100'::double precision) AND (device = 'd1'::text) AND (metric = 'm1'::text)) + Rows Removed by Filter: 10 +(8 rows) + +-- test expression pushdown in compressed dml constraints +CREATE TABLE test_pushdown(time timestamptz NOT NULL, device text); +SELECT table_name FROM create_hypertable('test_pushdown', 'time'); + table_name +--------------- + test_pushdown +(1 row) + +INSERT INTO test_pushdown SELECT '2020-01-01', 'a'; +INSERT INTO test_pushdown SELECT '2020-01-01', 'b'; +INSERT INTO test_pushdown SELECT '2020-01-01 05:00', 'c'; +CREATE TABLE devices(device text); +INSERT INTO devices VALUES ('a'), ('b'), ('c'); +CREATE TABLE devices2(device text); +INSERT INTO devices2 VALUES ('d'), ('e'), ('f'); +CREATE TABLE devices3(device text); +INSERT INTO devices3 VALUES ('b'), ('d'), ('g'); +ALTER TABLE test_pushdown SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "test_pushdown" is set to ""time" DESC" +SELECT compress_chunk(show_chunks('test_pushdown')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_39_79_chunk +(1 row) + +-- 3 batch decompressions means pushdown is not working so we expect less than 3 for all these queries +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'a' = device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: ('a'::text = device) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device < 'c' ; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 2 + Tuples decompressed: 2 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: (device < 'c'::text) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'c' > device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 2 + Tuples decompressed: 2 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: ('c'::text > device) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'c' >= device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=3 loops=1) + Filter: ('c'::text >= device) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device > 'b'; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device > 'b'::text) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = CURRENT_USER; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=0 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = CURRENT_USER) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'b' < device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: ('b'::text < device) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'b' <= device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 2 + Tuples decompressed: 2 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: ('b'::text <= device) +(7 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +-- cant pushdown OR atm +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = 'a' OR device = 'b'; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: ((device = 'a'::text) OR (device = 'b'::text)) + Rows Removed by Filter: 1 +(8 rows) + +-- test stable function +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE time = timestamptz('2020-01-01 05:00'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: ("time" = 'Wed Jan 01 05:00:00 2020 PST'::timestamp with time zone) +(7 rows) + +-- test sqlvaluefunction +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = substring(CURRENT_USER,length(CURRENT_USER)+1) || 'c'; ROLLBACK; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=1 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device = ("substring"((CURRENT_USER)::text, (length((CURRENT_USER)::text) + 1)) || 'c'::text)) +(9 rows) + +-- JOIN tests +-- no filtering in decompression +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices3 d WHERE p.device=d.device; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Merge Join (actual rows=1 loops=1) + Merge Cond: (p_1.device = d.device) + -> Sort (actual rows=3 loops=1) + Sort Key: p_1.device + Sort Method: quicksort + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: d.device + Sort Method: quicksort + -> Seq Scan on devices3 d (actual rows=3 loops=1) +(15 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices3 d WHERE p.device=d.device; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Merge Join (actual rows=1 loops=1) + Merge Cond: (p_1.device = d.device) + -> Sort (actual rows=3 loops=1) + Sort Key: p_1.device + Sort Method: quicksort + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: d.device + Sort Method: quicksort + -> Seq Scan on devices3 d (actual rows=3 loops=1) +(15 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +-- can filter in decompression even before executing join +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device AND d.device ='b'; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Nested Loop (actual rows=1 loops=1) + -> Seq Scan on devices d (actual rows=1 loops=1) + Filter: (device = 'b'::text) + Rows Removed by Filter: 2 + -> Materialize (actual rows=1 loops=1) + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=1 loops=1) + Filter: (device = 'b'::text) +(12 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device AND d.device ='b'; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Nested Loop (actual rows=1 loops=1) + -> Seq Scan on devices d (actual rows=1 loops=1) + Filter: (device = 'b'::text) + Rows Removed by Filter: 2 + -> Materialize (actual rows=1 loops=1) + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=1 loops=1) + Filter: (device = 'b'::text) +(12 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +-- test prepared statement +PREPARE q1(text) AS DELETE FROM test_pushdown WHERE device = $1; +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN EXECUTE q1('a'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device = 'a'::text) +(7 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN EXECUTE q1('a'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = 'a'::text) +(6 rows) + +BEGIN; :EXPLAIN EXECUTE q1('not here'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = 'not here'::text) +(5 rows) + +-- test arrayop pushdown less than 3 decompressions are expected for successful pushdown +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device IN ('a','d'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = ANY ('{a,d}'::text[])) +(6 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = ANY('{a,d}'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = ANY ('{a,d}'::text[])) +(6 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device IN ('a',CURRENT_USER); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=1 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device = ANY (ARRAY['a'::text, (CURRENT_USER)::text])) +(9 rows) + +-- arroyop pushdown only works for segmentby columns atm so 3 decompressions are expected for now +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE time IN ('2020-01-01','2020-01-02'); ROLLBACK; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: ("time" = ANY ('{"Wed Jan 01 00:00:00 2020 PST","Thu Jan 02 00:00:00 2020 PST"}'::timestamp with time zone[])) + Rows Removed by Filter: 1 +(8 rows) + +-- no pushdown for volatile functions +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = current_query(); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=0 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = current_query()) + Rows Removed by Filter: 3 +(10 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device IN ('a',current_query()); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=1 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device = ANY (ARRAY['a'::text, current_query()])) + Rows Removed by Filter: 2 +(10 rows) + +-- github issue #6858 +-- check update triggers work correctly both on uncompressed and compressed chunks +CREATE TABLE update_trigger_test ( + "entity_id" "uuid" NOT NULL, + "effective_date_time" timestamp with time zone NOT NULL, + "measurement" numeric NOT NULL, + "modified_at" timestamp with time zone DEFAULT "now"() NOT NULL +); +SELECT create_hypertable('update_trigger_test', 'effective_date_time'); + create_hypertable +----------------------------------- + (41,public,update_trigger_test,t) +(1 row) + +CREATE OR REPLACE FUNCTION update_modified_at_test() +RETURNS TRIGGER +LANGUAGE PLPGSQL AS $$ +BEGIN + NEW.modified_at = NOW(); + RETURN NEW; +END; $$; +CREATE TRIGGER update_trigger_test__before_update_sync_modified_at +BEFORE UPDATE ON update_trigger_test +FOR EACH ROW +EXECUTE PROCEDURE update_modified_at_test(); +INSERT INTO update_trigger_test +SELECT 'f2ca7073-1395-5770-8378-7d0339804580', '2024-04-16 04:50:00+02', +1100.00, '2024-04-23 11:56:38.494095+02' FROM generate_series(1,2500,1) c; +VACUUM FULL update_trigger_test; +BEGIN; +UPDATE update_trigger_test SET measurement = measurement + 2 +WHERE update_trigger_test.effective_date_time >= '2020-01-01T00:00:00'::timestamp AT TIME ZONE 'UTC'; +ROLLBACK; +-- try with default compression +ALTER TABLE update_trigger_test SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "update_trigger_test" is set to "" +NOTICE: default order by for hypertable "update_trigger_test" is set to "effective_date_time DESC" +SELECT compress_chunk(show_chunks('update_trigger_test')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_41_81_chunk +(1 row) + +BEGIN; +UPDATE update_trigger_test SET measurement = measurement + 2 +WHERE update_trigger_test.effective_date_time >= '2020-01-01T00:00:00'::timestamp AT TIME ZONE 'UTC'; +ROLLBACK; +-- lets try with segmentby +SELECT decompress_chunk(show_chunks('update_trigger_test')); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_41_81_chunk +(1 row) + +ALTER TABLE update_trigger_test SET (timescaledb.compress, timescaledb.compress_segmentby='entity_id'); +SELECT compress_chunk(show_chunks('update_trigger_test')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_41_81_chunk +(1 row) + +BEGIN; +UPDATE update_trigger_test SET measurement = measurement + 2 +WHERE update_trigger_test.effective_date_time >= '2020-01-01T00:00:00'::timestamp AT TIME ZONE 'UTC'; +ROLLBACK; diff --git a/tsl/test/expected/compression_update_delete-16.out b/tsl/test/expected/compression_update_delete-16.out new file mode 100644 index 00000000000..b827ee5819a --- /dev/null +++ b/tsl/test/expected/compression_update_delete-16.out @@ -0,0 +1,3405 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set EXPLAIN 'EXPLAIN (costs off, timing off, summary off, analyze)' +CREATE OR REPLACE VIEW compressed_chunk_info_view AS +SELECT + h.schema_name AS hypertable_schema, + h.table_name AS hypertable_name, + c.schema_name as chunk_schema, + c.table_name as chunk_name, + c.status as chunk_status, + comp.schema_name as compressed_chunk_schema, + comp.table_name as compressed_chunk_name +FROM + _timescaledb_catalog.hypertable h JOIN + _timescaledb_catalog.chunk c ON h.id = c.hypertable_id + LEFT JOIN _timescaledb_catalog.chunk comp +ON comp.id = c.compressed_chunk_id +; +CREATE TABLE sample_table ( + time TIMESTAMP WITH TIME ZONE NOT NULL, + sensor_id INTEGER NOT NULL, + cpu double precision null, + temperature double precision null, + name varchar(100) default 'this is a default string value' +); +SELECT * FROM create_hypertable('sample_table', 'time', + chunk_time_interval => INTERVAL '2 months'); +WARNING: column type "character varying" used for "name" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 1 | public | sample_table | t +(1 row) + +\set start_date '2022-01-28 01:09:53.583252+05:30' +INSERT INTO sample_table + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date'::timestamptz - INTERVAL '1 months', + :'start_date'::timestamptz - INTERVAL '1 week', + INTERVAL '1 hour') AS g1(time), + generate_series(1, 8, 1 ) AS g2(sensor_id) + ORDER BY + time; +\set start_date '2023-03-17 17:51:11.322998+05:30' +-- insert into new chunks +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 12, 21.98, 33.123, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 12, 17.66, 13.875, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 13, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 1, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 4, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 5, 0.988, 33.123, 'new row3'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 6, 4.6554, 47, 'new row3'); +-- enable compression +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'sensor_id' +); +NOTICE: default order by for hypertable "sample_table" is set to ""time" DESC" +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk +(2 rows) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- test rows visibility +BEGIN; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 0 +(1 row) + +-- update 4 rows +UPDATE sample_table SET name = 'updated row' WHERE cpu = 21.98 AND temperature = 33.123; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 4 +(1 row) + +ROLLBACK; +-- get count of affected rows +SELECT count(*) FROM sample_table WHERE cpu = 21.98 AND temperature = 33.123; + count +------- + 4 +(1 row) + +-- do update +UPDATE sample_table SET name = 'updated row' WHERE cpu = 21.98 AND temperature = 33.123; +-- get count of updated rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 4 +(1 row) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 9 | _hyper_1_2_chunk +(2 rows) + +-- recompress the partial chunks +SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk'); +NOTICE: chunk "_hyper_1_1_chunk" is already compressed + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT compress_chunk('_timescaledb_internal._hyper_1_2_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_2_chunk +(1 row) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- get count of affected rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 4 +(1 row) + +-- do delete +DELETE FROM sample_table WHERE name = 'updated row'; +-- get count of updated rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 0 +(1 row) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 9 | _hyper_1_2_chunk +(2 rows) + +-- recompress the paritial chunks +SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk'); +NOTICE: chunk "_hyper_1_1_chunk" is already compressed + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT compress_chunk('_timescaledb_internal._hyper_1_2_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_2_chunk +(1 row) + +-- test for IS NULL checks +-- should not UPDATE any rows +UPDATE sample_table SET temperature = 34.21 WHERE sensor_id IS NULL; +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- test for IS NOT NULL checks +-- should UPDATE all rows +UPDATE sample_table SET temperature = 34.21 WHERE sensor_id IS NOT NULL; +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 9 | _hyper_1_1_chunk + 9 | _hyper_1_2_chunk +(2 rows) + +-- recompress the paritial chunks +SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT compress_chunk('_timescaledb_internal._hyper_1_2_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_2_chunk +(1 row) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- report 0 rows +SELECT COUNT(*) FROM sample_table WHERE name = 'updated row based on < OR > comparison'; + count +------- + 0 +(1 row) + +-- get total count of rows which satifies the condition +SELECT COUNT(*) as "total_affected_rows" FROM sample_table WHERE + time > '2022-01-20 19:10:00.101514+05:30' and + time < '2022-01-20 21:10:43.855297+05:30' \gset +-- perform UPDATE with < and > comparison on SEGMENTBY column +UPDATE sample_table SET name = 'updated row based on < OR > comparison' WHERE + time > '2022-01-20 19:10:00.101514+05:30' and time < '2022-01-20 21:10:43.855297+05:30'; +-- check chunk compression status after UPDATE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 9 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- count should be same +SELECT COUNT(*) = (:total_affected_rows) FROM sample_table WHERE name = 'updated row based on < OR > comparison'; + ?column? +---------- + t +(1 row) + +DROP TABLE sample_table; +-- test to ensure that only required rows from compressed chunks +-- are extracted if SEGMENTBY column is used in WHERE condition +CREATE TABLE sample_table( + time INT NOT NULL, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 3 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time', + timescaledb.compress_segmentby = 'device_id'); +INSERT INTO sample_table VALUES (1, 1, 1), (2, 2, 1), (3, 3, 1), (10, 3, 2), (11, 4, 2), (11, 1, 2); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_3_5_chunk + _timescaledb_internal._hyper_3_6_chunk +(2 rows) + +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- there should 2 rows matching the conditions coming from 2 chunks +SELECT * FROM sample_table WHERE device_id = 3 ORDER BY time, device_id; + time | device_id | val +------+-----------+----- + 3 | 3 | 1 + 10 | 3 | 2 +(2 rows) + +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 3 +(1 row) + +-- get rowcount from compressed chunks where device_id = 3 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id = 3; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id = 3; + count +------- + 1 +(1 row) + +-- delete rows with device_id = 3 +DELETE FROM sample_table WHERE device_id = 3; +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 2 +(1 row) + +-- get rowcount from compressed chunks where device_id = 3 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id = 3; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id = 3; + count +------- + 0 +(1 row) + +-- there should be no rows +SELECT * FROM sample_table WHERE device_id = 3 ORDER BY time, device_id; + time | device_id | val +------+-----------+----- +(0 rows) + +-- there should 2 rows matching the conditions coming from 2 chunks +SELECT val FROM sample_table WHERE 1 = device_id ORDER BY time, device_id; + val +----- + 1 + 2 +(2 rows) + +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 2 +(1 row) + +-- get rowcount from compressed chunks where device_id = 1 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE 1 = device_id; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE 1 = device_id; + count +------- + 1 +(1 row) + +-- update rows with device_id = 1 +UPDATE sample_table SET val = 200 WHERE 1 = device_id; +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks where device_id = 1 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE 1 = device_id; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE 1 = device_id; + count +------- + 0 +(1 row) + +-- there should be 2 rows +SELECT val FROM sample_table WHERE 1 = device_id ORDER BY time, device_id; + val +----- + 200 + 200 +(2 rows) + +DROP TABLE sample_table; +CREATE TABLE sample_table( + time INT NOT NULL, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 5 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'time, val'); +NOTICE: default order by for hypertable "sample_table" is set to "" +INSERT INTO sample_table VALUES (1, 1, 1), (2, 2, 1), (3, 3, 1), (1, 3, 2), (11, 4, 2), (1, 1, 2); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_5_9_chunk + _timescaledb_internal._hyper_5_10_chunk +(2 rows) + +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- there should 2 rows matching the conditions coming from 2 chunks +SELECT * FROM sample_table WHERE time = 1 AND val = 2 ORDER BY time, device_id; + time | device_id | val +------+-----------+----- + 1 | 1 | 2 + 1 | 3 | 2 +(2 rows) + +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 4 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks where time = 1 AND val = 2 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE time = 1 AND val = 2; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE time = 1 AND val = 2; + count +------- + 0 +(1 row) + +-- delete rows with time = 1 AND val = 2 +EXPLAIN (costs off, verbose) DELETE FROM sample_table WHERE time = 1 AND 2 = val; + QUERY PLAN +---------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on public.sample_table + Delete on _timescaledb_internal._hyper_5_9_chunk sample_table_1 + -> Seq Scan on _timescaledb_internal._hyper_5_9_chunk sample_table_1 + Output: sample_table_1.tableoid, sample_table_1.ctid + Filter: ((sample_table_1."time" = 1) AND (2 = sample_table_1.val)) +(6 rows) + +-- should delete rows from 1 of the compressed chunks +DELETE FROM sample_table WHERE time = 1 AND 2 = val; +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks wheretime = 1 AND val = 2 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE time = 1 AND val = 2; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE time = 1 AND val = 2; + count +------- + 0 +(1 row) + +-- there should be no rows +SELECT * FROM sample_table WHERE time = 1 AND val = 2 ORDER BY time, device_id; + time | device_id | val +------+-----------+----- +(0 rows) + +DROP TABLE sample_table; +-- Test chunk compile time startup exclusion +CREATE OR REPLACE FUNCTION now_s() +RETURNS timestamptz LANGUAGE PLPGSQL STABLE AS +$BODY$ +BEGIN + RETURN '2017-08-22T10:00:00'::timestamptz; +END; +$BODY$; +CREATE TABLE sample_table(time timestamptz NOT NULL, temp float, colorid integer, attr jsonb); +SELECT create_hypertable('sample_table', 'time', chunk_time_interval => 2628000000000); + create_hypertable +--------------------------- + (7,public,sample_table,t) +(1 row) + +-- create three chunks +INSERT INTO sample_table VALUES ('2017-03-22T09:18:22', 23.5, 1, '{"a": 1, "b": 2}'), + ('2017-03-22T09:18:23', 21.5, 1, '{"a": 1, "b": 2}'), + ('2017-05-22T09:18:22', 36.2, 2, '{"c": 3, "b": 2}'), + ('2017-05-22T09:18:23', 15.2, 2, '{"c": 3}'), + ('2017-08-22T09:18:22', 34.1, 3, '{"c": 4}'); +ALTER TABLE sample_table SET (timescaledb.compress, + timescaledb.compress_segmentby = 'time'); +NOTICE: default order by for hypertable "sample_table" is set to "" +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_7_13_chunk + _timescaledb_internal._hyper_7_14_chunk + _timescaledb_internal._hyper_7_15_chunk +(3 rows) + +-- ensure all chunks are compressed +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------- + 1 | _hyper_7_13_chunk + 1 | _hyper_7_14_chunk + 1 | _hyper_7_15_chunk +(3 rows) + +-- report 0 rows +SELECT * FROM sample_table WHERE time > now_s() + '-1 month' AND colorid = 4; + time | temp | colorid | attr +------+------+---------+------ +(0 rows) + +-- update 1 row +UPDATE sample_table SET colorid = 4 WHERE time > now_s() + '-1 month'; +-- report 1 row +SELECT * FROM sample_table WHERE time > now_s() + '-1 month' AND colorid = 4; + time | temp | colorid | attr +------------------------------+------+---------+---------- + Tue Aug 22 09:18:22 2017 PDT | 34.1 | 4 | {"c": 4} +(1 row) + +-- ensure that 1 chunk is partially compressed +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------- + 1 | _hyper_7_13_chunk + 1 | _hyper_7_14_chunk + 9 | _hyper_7_15_chunk +(3 rows) + +DROP TABLE sample_table; +-- test for NULL values in SEGMENTBY column +CREATE TABLE sample_table( + time INT, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 9 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time', + timescaledb.compress_segmentby = 'device_id'); +INSERT INTO sample_table VALUES (1, 1, 1), (2, NULL, 1), (3, NULL, 1), (10, NULL, 2), (11, NULL, 2), (11, 1, 2); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_9_19_chunk + _timescaledb_internal._hyper_9_20_chunk +(2 rows) + +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 2 +(1 row) + +-- get rowcount from compressed chunks where device_id IS NULL +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id IS NULL; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id IS NULL; + count +------- + 1 +(1 row) + +-- get total count of SEGMENTBY column with NULL values +SELECT COUNT(*) FROM sample_table WHERE device_id IS NULL; + count +------- + 4 +(1 row) + +-- delete NULL values in SEGMENTBY column +DELETE FROM sample_table WHERE device_id IS NULL; +-- ensure that not all rows are moved to staging area +-- should have few compressed rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks where device_id IS NULL +-- should report 0 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id IS NULL; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id IS NULL; + count +------- + 0 +(1 row) + +-- check chunk compression status after DELETE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------- + 9 | _hyper_9_19_chunk + 9 | _hyper_9_20_chunk +(2 rows) + +DROP TABLE sample_table; +-- test for IS NOT NULL values in SEGMENTBY column +CREATE TABLE sample_table( + time INT, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 11 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time', + timescaledb.compress_segmentby = 'device_id'); +INSERT INTO sample_table VALUES (1, NULL, 1), (2, NULL, 1), (3, NULL, 1), (10, 3, 2), (11, 2, 2), (11, 1, 2); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_11_23_chunk + _timescaledb_internal._hyper_11_24_chunk +(2 rows) + +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +SELECT COUNT(*) FROM sample_table WHERE val = 1234; + count +------- + 0 +(1 row) + +-- UPDATE based on IS NOT NULL condition on SEGMENTBY column +UPDATE sample_table SET val = 1234 WHERE device_id IS NOT NULL; +-- get total count of SEGMENTBY column with NULL values +SELECT COUNT(*) FROM sample_table WHERE val = 1234; + count +------- + 3 +(1 row) + +-- check chunk compression status after DELETE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_11_23_chunk + 9 | _hyper_11_24_chunk +(2 rows) + +DROP TABLE sample_table; +-- test to for <= AND >= on SEGMENTBY column +CREATE TABLE sample_table( + time INT, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 13 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time', + timescaledb.compress_segmentby = 'device_id, val'); +INSERT INTO sample_table VALUES (1, 1, 1), (2, NULL, 1), (3, 4, 1), (10, NULL, 2), (11, NULL, 2), (11, 1, 2), (13, 5, 3); +INSERT INTO sample_table VALUES (4, 3, NULL), (6, NULL, NULL), (12, NULL, NULL), (13, 4, NULL); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_13_27_chunk + _timescaledb_internal._hyper_13_28_chunk +(2 rows) + +-- test will multiple NULL/NOT NULL columns +BEGIN; +-- report 0 row +SELECT * FROM sample_table WHERE device_id IS NULL AND val = 987; + time | device_id | val +------+-----------+----- +(0 rows) + +-- these 3 rows will be affected by below UPDATE +SELECT * FROM sample_table WHERE device_id IS NULL AND val IS NOT NULL ORDER BY 1; + time | device_id | val +------+-----------+----- + 2 | | 1 + 10 | | 2 + 11 | | 2 +(3 rows) + +-- update 3 rows +UPDATE sample_table SET val = 987 WHERE device_id IS NULL AND val IS NOT NULL; +-- report 3 row +SELECT * FROM sample_table WHERE device_id IS NULL AND val = 987; + time | device_id | val +------+-----------+----- + 2 | | 987 + 10 | | 987 + 11 | | 987 +(3 rows) + +ROLLBACK; +-- test will multiple columns +BEGIN; +-- report 2 rows +SELECT * FROM sample_table WHERE device_id IS NULL AND val = 2; + time | device_id | val +------+-----------+----- + 10 | | 2 + 11 | | 2 +(2 rows) + +-- delete 2 rows +DELETE from sample_table WHERE device_id IS NULL AND val = 2; +-- report 0 rows +SELECT * FROM sample_table WHERE device_id IS NULL AND val = 2; + time | device_id | val +------+-----------+----- +(0 rows) + +ROLLBACK; +BEGIN; +-- report 1 row +SELECT * FROM sample_table WHERE device_id = 3 AND val IS NULL; + time | device_id | val +------+-----------+----- + 4 | 3 | +(1 row) + +-- delete 1 rows +DELETE from sample_table WHERE device_id = 3 AND val IS NULL; +-- report 0 rows +SELECT * FROM sample_table WHERE device_id = 3 AND val IS NULL; + time | device_id | val +------+-----------+----- +(0 rows) + +ROLLBACK; +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 5 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 5 +(1 row) + +-- get rowcount from compressed chunks where device_id >= 4 AND val <= 1 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id >= 4 AND val <= 1; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id >= 4 AND val <= 1; + count +------- + 0 +(1 row) + +-- get total count of SEGMENTBY column with device_id >= 4 AND val <= 1 +SELECT COUNT(*) FROM sample_table WHERE device_id >= 4 AND val <= 1; + count +------- + 1 +(1 row) + +-- delete NULL values in SEGMENTBY column +DELETE FROM sample_table WHERE device_id >= 4 AND val <= 1; +-- ensure that not all rows are moved to staging area +-- should have few compressed rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 4 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 5 +(1 row) + +-- get rowcount from compressed chunks where device_id IS NULL +-- should report 0 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id >= 4 AND val <= 1; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id >= 4 AND val <= 1; + count +------- + 0 +(1 row) + +-- check chunk compression status after DELETE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_13_27_chunk + 1 | _hyper_13_28_chunk +(2 rows) + +-- added tests for code coverage +UPDATE sample_table SET time = 21 WHERE (device_id) in ( 30, 51, 72, 53); +UPDATE sample_table SET time = 21 WHERE device_id + 365 = 8765; +DROP TABLE sample_table; +-- test with different physical layout +CREATE TABLE sample_table( + time INT, + device_id INT, + val INT default 8, + a INT default 10, + b INT default 11, + c INT default 12, + d INT, + e INT default 13); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 5); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 15 | public | sample_table | t +(1 row) + +INSERT INTO sample_table (time, device_id, d) VALUES (1, 1, 1), (2, NULL, 1), (3, 4, 1), (10, NULL, 2), (11, NULL, 2), (11, 1, 2), (13, 5, 3); +ALTER TABLE sample_table DROP COLUMN c; +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'device_id, d'); +NOTICE: default order by for hypertable "sample_table" is set to ""time" DESC" +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_15_31_chunk + _timescaledb_internal._hyper_15_32_chunk +(2 rows) + +ALTER TABLE sample_table ADD COLUMN c int default 23; +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_15_31_chunk + 1 | _hyper_15_32_chunk +(2 rows) + +-- get FIRST uncompressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "UNCOMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE '_hyper_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND uncompressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "UNCOMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE '_hyper_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- ensure segment by column index position in compressed and uncompressed +-- chunk is different +SELECT attname, attnum +FROM pg_attribute +WHERE attrelid IN (:'COMPRESS_CHUNK_1'::regclass, :'UNCOMPRESS_CHUNK_1'::regclass) AND attname = 'd' +ORDER BY attnum; + attname | attnum +---------+-------- + d | 4 + d | 7 +(2 rows) + +SELECT attname, attnum +FROM pg_attribute +WHERE attrelid IN (:'COMPRESS_CHUNK_2'::regclass, :'UNCOMPRESS_CHUNK_2'::regclass) AND attname = 'd' +ORDER BY attnum; + attname | attnum +---------+-------- + d | 4 + d | 7 +(2 rows) + +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 3 +(1 row) + +-- get rowcount from compressed chunks where d = 3 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE d = 3; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE d = 3; + count +------- + 1 +(1 row) + +-- get total count of SEGMENTBY column with d = 3 +SELECT COUNT(*) FROM sample_table WHERE d = 3; + count +------- + 1 +(1 row) + +-- delete based on SEGMENTBY column +DELETE FROM sample_table WHERE d = 3; +-- ensure that not all rows are moved to staging area +-- should have few compressed rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 2 +(1 row) + +-- get rowcount from compressed chunks where d = 3 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE d = 3; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE d = 3; + count +------- + 0 +(1 row) + +-- check chunk compression status after DELETE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_15_31_chunk + 1 | _hyper_15_32_chunk +(2 rows) + +-- get rowcount from compressed chunks where device_id IS NULL +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id IS NULL; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id IS NULL; + count +------- + 1 +(1 row) + +BEGIN; +-- report 0 row +SELECT * FROM sample_table WHERE a = 247; + time | device_id | val | a | b | d | e | c +------+-----------+-----+---+---+---+---+--- +(0 rows) + +-- delete 1 row +UPDATE sample_table SET a = 247 WHERE device_id IS NULL; +-- ensure rows are visible +SELECT * FROM sample_table WHERE a = 247; + time | device_id | val | a | b | d | e | c +------+-----------+-----+-----+----+---+----+---- + 2 | | 8 | 247 | 11 | 1 | 13 | 23 + 11 | | 8 | 247 | 11 | 2 | 13 | 23 + 10 | | 8 | 247 | 11 | 2 | 13 | 23 +(3 rows) + +ROLLBACK; +-- report 0 rows +SELECT COUNT(*) FROM sample_table WHERE a = 247; + count +------- + 0 +(1 row) + +-- UPDATE based on NULL values in SEGMENTBY column +UPDATE sample_table SET a = 247 WHERE device_id IS NULL; +-- report 3 rows +SELECT COUNT(*) FROM sample_table WHERE a = 247; + count +------- + 3 +(1 row) + +-- ensure that not all rows are moved to staging area +-- should have few compressed rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks where device_id IS NULL +-- should report 0 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id IS NULL; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id IS NULL; + count +------- + 0 +(1 row) + +DROP TABLE sample_table; +-- test with different physical layout +CREATE TABLE sample_table(time timestamptz, c1 text, c2 text, c3 text); +SELECT create_hypertable('sample_table','time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (17,public,sample_table,t) +(1 row) + +INSERT INTO sample_table SELECT '2000-01-01'; +ALTER TABLE sample_table DROP column c3; +ALTER TABLE sample_table ADD column c4 text; +INSERT INTO sample_table SELECT '2000-01-01', '1', '2', '3'; +ALTER TABLE sample_table SET (timescaledb.compress,timescaledb.compress_segmentby='c4'); +NOTICE: default order by for hypertable "sample_table" is set to ""time" DESC" +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_17_35_chunk +(1 row) + +BEGIN; +-- report 1 row +SELECT * FROM sample_table WHERE c4 IS NULL; + time | c1 | c2 | c4 +------------------------------+----+----+---- + Sat Jan 01 00:00:00 2000 PST | | | +(1 row) + +-- delete 1 row +DELETE FROM sample_table WHERE c4 IS NULL; +-- report 0 rows +SELECT * FROM sample_table WHERE c4 IS NULL; + time | c1 | c2 | c4 +------+----+----+---- +(0 rows) + +ROLLBACK; +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id \gset +-- report 2 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +-- report 1 row +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 IS NULL; + count +------- + 1 +(1 row) + +-- report 1 row +SELECT * FROM sample_table WHERE c4 IS NULL; + time | c1 | c2 | c4 +------------------------------+----+----+---- + Sat Jan 01 00:00:00 2000 PST | | | +(1 row) + +-- delete 1 row +DELETE FROM sample_table WHERE c4 IS NULL; +-- report 0 row +SELECT * FROM sample_table WHERE c4 IS NULL; + time | c1 | c2 | c4 +------+----+----+---- +(0 rows) + +-- report 1 row which ensure that only required row is moved and deleted +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 1 +(1 row) + +-- report 0 row +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 IS NULL; + count +------- + 0 +(1 row) + +DROP TABLE sample_table; +-- test filtering with ORDER BY columns +CREATE TABLE sample_table(time timestamptz, c1 int, c2 int, c3 int, c4 int); +SELECT create_hypertable('sample_table','time',chunk_time_interval=>'1 day'::interval); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (19,public,sample_table,t) +(1 row) + +ALTER TABLE sample_table SET (timescaledb.compress,timescaledb.compress_segmentby='c4', timescaledb.compress_orderby='c1,c2,time'); +INSERT INTO sample_table +SELECT t, c1, c2, c3, c4 +FROM generate_series(:'start_date'::timestamptz - INTERVAL '9 hours', + :'start_date'::timestamptz, + INTERVAL '1 hour') t, + generate_series(0,9,1) c1, + generate_series(0,9,1) c2, + generate_series(0,9,1) c3, + generate_series(0,9,1) c4; +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_19_37_chunk +(1 row) + +-- get FIRST chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE '_hyper_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- check that you uncompress and delete only for exact SEGMENTBY value +SET timescaledb.debug_compression_path_info TO true; +BEGIN; +-- report 10 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 = 5; + count +------- + 10 +(1 row) + +-- report 10k rows +SELECT COUNT(*) FROM sample_table WHERE c4 = 5; + count +------- + 10000 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 = 5 \gset +-- delete 10k rows +DELETE FROM sample_table WHERE c4 = 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 10. Number of compressed rows filtered by heap filters: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 = 5; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunk +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 = 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for less than SEGMENTBY value +BEGIN; +-- report 50 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 < 5; + count +------- + 50 +(1 row) + +-- report 50k rows +SELECT COUNT(*) FROM sample_table WHERE c4 < 5; + count +------- + 50000 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 < 5 \gset +-- delete 50k rows +DELETE FROM sample_table WHERE c4 < 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 50. Number of compressed rows filtered by heap filters: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 < 5; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunk +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 < 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for greater and equal than SEGMENTBY value +BEGIN; +-- report 50 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 >= 5; + count +------- + 50 +(1 row) + +-- report 50k rows +SELECT COUNT(*) FROM sample_table WHERE c4 >= 5; + count +------- + 50000 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 >= 5 \gset +-- delete 50k rows +DELETE FROM sample_table WHERE c4 >= 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 50. Number of compressed rows filtered by heap filters: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 >= 5; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunk +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 >= 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for exact ORDERBY value +-- this will uncompress segments which have min <= value and max >= value +BEGIN; +-- report 10k rows +SELECT COUNT(*) FROM sample_table WHERE c2 = 3; + count +------- + 10000 +(1 row) + +-- report 100 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_2 <= 3 and _ts_meta_max_2 >= 3; + count +------- + 100 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c2 = 3 \gset +-- delete 10k rows +DELETE FROM sample_table WHERE c2 = 3; +INFO: Number of compressed rows fetched from table scan: 100. Number of compressed rows filtered: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c2 = 3; + count +------- + 0 +(1 row) + +-- report 90k rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 90000 +(1 row) + +-- report 0 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_2 <= 3 and _ts_meta_max_2 >= 3; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for less then ORDERBY value +-- this will uncompress segments which have min < value +BEGIN; +-- report 20k rows +SELECT COUNT(*) FROM sample_table WHERE c1 < 2; + count +------- + 20000 +(1 row) + +-- report 20 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_max_1 < 2; + count +------- + 20 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c1 < 2 \gset +-- delete 20k rows +DELETE FROM sample_table WHERE c1 < 2; +INFO: Number of compressed rows fetched from table scan: 20. Number of compressed rows filtered: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c1 < 2; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunk +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_max_1 < 2; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for greater or equal then ORDERBY value +-- this will uncompress segments which have max >= value +BEGIN; +-- report 30k rows +SELECT COUNT(*) FROM sample_table WHERE c1 >= 7; + count +------- + 30000 +(1 row) + +-- report 30 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_1 >= 7; + count +------- + 30 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c1 >= 7 \gset +-- delete 30k rows +DELETE FROM sample_table WHERE c1 >= 7; +INFO: Number of compressed rows fetched from table scan: 30. Number of compressed rows filtered: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c1 >= 7; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_1 >= 7; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only tuples which satisfy SEGMENTBY +-- and ORDERBY qualifiers, segments only contain one distinct value for +-- these qualifiers, everything should be deleted that was decompressed +BEGIN; +-- report 1k rows +SELECT COUNT(*) FROM sample_table WHERE c4 = 5 and c1 = 5; + count +------- + 1000 +(1 row) + +-- report 1 row in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 5 AND _ts_meta_min_1 <= 5 and _ts_meta_max_1 >= 5; + count +------- + 1 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 = 5 and c1 = 5 \gset +-- delete 1k rows +DELETE FROM sample_table WHERE c4 = 5 and c1 = 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 10. Number of compressed rows filtered by heap filters: 9. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 = 5 and c1 = 5; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 5 AND _ts_meta_min_1 <= 5 and _ts_meta_max_1 >= 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only tuples which satisfy SEGMENTBY +-- and ORDERBY qualifiers, segments contain more than one distinct value for +-- these qualifiers, not everything should be deleted that was decompressed +BEGIN; +-- report 4k rows +SELECT COUNT(*) FROM sample_table WHERE c4 > 5 and c2 = 5; + count +------- + 4000 +(1 row) + +-- report 40 rows in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 > 5 AND _ts_meta_min_2 <= 5 and _ts_meta_max_2 >= 5; + count +------- + 40 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 > 5 and c2 = 5 \gset +-- delete 4k rows +DELETE FROM sample_table WHERE c4 > 5 and c2 = 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 40. Number of compressed rows filtered by heap filters: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 > 5 and c2 = 5; + count +------- + 0 +(1 row) + +-- report 36k rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 36000 +(1 row) + +-- report 0 rows in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 > 5 AND _ts_meta_min_2 <= 5 and _ts_meta_max_2 >= 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only tuples which satisfy SEGMENTBY +-- and ORDERBY qualifiers. +-- no: of rows satisfying SEGMENTBY qualifiers is 10 +-- no: of rows satisfying ORDERBY qualifiers is 3 +-- Once both qualifiers are applied ensure that only 7 rows are present in +-- compressed chunk +BEGIN; +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 10 compressed rows for given condition c4 = 4 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 4; + count +------- + 10 +(1 row) + +-- report 3 compressed rows for given condition c4 = 4 and c1 >= 7 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 4 AND _ts_meta_max_1 >= 7; + count +------- + 3 +(1 row) + +SELECT COUNT(*) AS "total_rows" FROM :COMPRESS_CHUNK_1 WHERE c4 = 4 \gset +SELECT COUNT(*) AS "total_affected_rows" FROM :COMPRESS_CHUNK_1 WHERE c4 = 4 AND _ts_meta_max_1 >= 7 \gset +UPDATE sample_table SET c3 = c3 + 0 WHERE c4 = 4 AND c1 >= 7; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 10. Number of compressed rows filtered by heap filters: 7. +-- report 7 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 4; + count +------- + 7 +(1 row) + +-- ensure correct number of rows are moved from compressed chunk +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM :COMPRESS_CHUNK_1 WHERE c4 = 4; + ?column? +---------- + t +(1 row) + +ROLLBACK; +RESET timescaledb.debug_compression_path_info; +--github issue: 5640 +CREATE TABLE tab1(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); +CREATE INDEX ON tab1(time); +CREATE INDEX ON tab1(device_id,time); +SELECT create_hypertable('tab1','time',create_default_indexes:=false); + create_hypertable +-------------------- + (21,public,tab1,t) +(1 row) + +ALTER TABLE tab1 DROP COLUMN filler_1; +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','57m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ALTER TABLE tab1 DROP COLUMN filler_2; +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id-1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','58m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ALTER TABLE tab1 DROP COLUMN filler_3; +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','59m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ANALYZE tab1; +-- compress chunks +ALTER TABLE tab1 SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id'); +SELECT compress_chunk(show_chunks('tab1')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_21_39_chunk + _timescaledb_internal._hyper_21_40_chunk + _timescaledb_internal._hyper_21_41_chunk +(3 rows) + +-- ensure there is an index scan generated for below DELETE query +BEGIN; +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 472 +(1 row) + +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 1000, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id); +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 4070 +(1 row) + +ANALYZE tab1; +EXPLAIN (costs off) DELETE FROM public.tab1 WHERE public.tab1.device_id = 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) + -> Delete on tab1 + Delete on _hyper_21_39_chunk tab1_1 + Delete on _hyper_21_40_chunk tab1_2 + Delete on _hyper_21_41_chunk tab1_3 + -> Append + -> Index Scan using _hyper_21_39_chunk_tab1_device_id_time_idx on _hyper_21_39_chunk tab1_1 + Index Cond: (device_id = 1) + -> Seq Scan on _hyper_21_40_chunk tab1_2 + Filter: (device_id = 1) + -> Seq Scan on _hyper_21_41_chunk tab1_3 + Filter: (device_id = 1) +(12 rows) + +DELETE FROM tab1 WHERE tab1.device_id = 1; +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 0 +(1 row) + +ROLLBACK; +-- github issue 5658 +-- verify that bitmap heap scans work on all the correct data and +-- none of it left over after the dml command +BEGIN; +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 472 +(1 row) + +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 1000, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id); +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 4070 +(1 row) + +ANALYZE tab1; +SET enable_seqscan = off; +SET enable_indexscan = off; +EXPLAIN (costs off) DELETE FROM tab1 WHERE tab1.device_id = 1; + QUERY PLAN +----------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on tab1 + Delete on _hyper_21_39_chunk tab1_1 + Delete on _hyper_21_40_chunk tab1_2 + Delete on _hyper_21_41_chunk tab1_3 + -> Append + -> Bitmap Heap Scan on _hyper_21_39_chunk tab1_1 + Recheck Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_21_39_chunk_tab1_device_id_time_idx + Index Cond: (device_id = 1) + -> Seq Scan on _hyper_21_40_chunk tab1_2 + Filter: (device_id = 1) + -> Seq Scan on _hyper_21_41_chunk tab1_3 + Filter: (device_id = 1) +(14 rows) + +DELETE FROM tab1 WHERE tab1.device_id = 1; +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 0 +(1 row) + +ROLLBACK; +-- create hypertable with space partitioning and compression +CREATE TABLE tab2(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); +CREATE INDEX ON tab2(time); +CREATE INDEX ON tab2(device_id,time); +SELECT create_hypertable('tab2','time','device_id',3,create_default_indexes:=false); + create_hypertable +-------------------- + (23,public,tab2,t) +(1 row) + +ALTER TABLE tab2 DROP COLUMN filler_1; +INSERT INTO tab2(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','35m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ALTER TABLE tab2 DROP COLUMN filler_2; +INSERT INTO tab2(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','45m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ALTER TABLE tab2 DROP COLUMN filler_3; +INSERT INTO tab2(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','55m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ANALYZE tab2; +-- compress chunks +ALTER TABLE tab2 SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id'); +SELECT compress_chunk(show_chunks('tab2')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_23_45_chunk + _timescaledb_internal._hyper_23_46_chunk + _timescaledb_internal._hyper_23_47_chunk +(3 rows) + +-- below test will cause chunks of tab2 to get decompressed +-- without fix for issue #5460 +SET timescaledb.enable_optimizations = OFF; +BEGIN; +DELETE FROM tab1 t1 USING tab2 t2 WHERE t1.device_id = t2.device_id AND t2.time > '2000-01-01'; +ROLLBACK; +--cleanup +RESET timescaledb.enable_optimizations; +DROP table tab1; +DROP table tab2; +-- test joins with UPDATE/DELETE on compression chunks +CREATE TABLE join_test1(time timestamptz NOT NULL,device text, value float); +CREATE TABLE join_test2(time timestamptz NOT NULL,device text, value float); +CREATE VIEW chunk_status AS SELECT ht.table_name AS hypertable, ch.table_name AS chunk,ch.status from _timescaledb_catalog.chunk ch INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id=ch.hypertable_id AND ht.table_name IN ('join_test1','join_test2') ORDER BY ht.id, ch.id; +SELECT table_name FROM create_hypertable('join_test1', 'time'); + table_name +------------ + join_test1 +(1 row) + +SELECT table_name FROM create_hypertable('join_test2', 'time'); + table_name +------------ + join_test2 +(1 row) + +ALTER TABLE join_test1 SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "join_test1" is set to ""time" DESC" +ALTER TABLE join_test2 SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "join_test2" is set to ""time" DESC" +INSERT INTO join_test1 VALUES ('2000-01-01','d1',0.1), ('2000-02-01','d1',0.1), ('2000-03-01','d1',0.1); +INSERT INTO join_test2 VALUES ('2000-02-01','d1',0.1), ('2000-02-01','d2',0.1), ('2000-02-01','d3',0.1); +SELECT compress_chunk(show_chunks('join_test1')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_25_51_chunk + _timescaledb_internal._hyper_25_52_chunk + _timescaledb_internal._hyper_25_53_chunk +(3 rows) + +SELECT compress_chunk(show_chunks('join_test2')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_26_54_chunk +(1 row) + +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 1 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +BEGIN; +DELETE FROM join_test1 USING join_test2; +-- only join_test1 chunks should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 9 + join_test1 | _hyper_25_53_chunk | 9 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +DELETE FROM join_test2 USING join_test1; +-- only join_test2 chunks should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 1 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 9 +(4 rows) + +ROLLBACK; +BEGIN; +DELETE FROM join_test1 t1 USING join_test1 t2 WHERE t1.time = '2000-01-01'; +-- only first chunk of join_test1 should have status change +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +DELETE FROM join_test1 t1 USING join_test1 t2 WHERE t2.time = '2000-01-01'; +-- all chunks of join_test1 should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 9 + join_test1 | _hyper_25_53_chunk | 9 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +UPDATE join_test1 t1 SET value = t1.value + 1 FROM join_test2 t2; +-- only join_test1 chunks should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 9 + join_test1 | _hyper_25_53_chunk | 9 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +UPDATE join_test2 t1 SET value = t1.value + 1 FROM join_test1 t2; +-- only join_test2 chunks should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 1 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 9 +(4 rows) + +ROLLBACK; +BEGIN; +UPDATE join_test1 t1 SET value = t1.value + 1 FROM join_test1 t2 WHERE t1.time = '2000-01-01'; +-- only first chunk of join_test1 should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +UPDATE join_test1 t1 SET value = t1.value + 1 FROM join_test1 t2 WHERE t2.time = '2000-01-01'; +-- all chunks of join_test1 should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 9 + join_test1 | _hyper_25_53_chunk | 9 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +DROP TABLE join_test1; +DROP TABLE join_test2; +-- test if index scan qualifiers are properly used +CREATE TABLE index_scan_test(time timestamptz NOT NULL, device_id int, value float); +SELECT create_hypertable('index_scan_test','time',create_default_indexes:=false); + create_hypertable +------------------------------- + (29,public,index_scan_test,t) +(1 row) + +INSERT INTO index_scan_test(time,device_id,value) SELECT time, device_id, device_id + 0.5 FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-01 23:55:00+0','1m') gtime(time), generate_series(1,5,1) gdevice(device_id); +-- compress chunks +ALTER TABLE index_scan_test SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id'); +SELECT compress_chunk(show_chunks('index_scan_test')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_29_59_chunk +(1 row) + +ANALYZE index_scan_test; +SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ht.table_name = 'index_scan_test' +AND ch1.hypertable_id = ht.id +AND ch1.table_name LIKE '_hyper%' +ORDER BY ch1.id LIMIT 1 \gset +SELECT ch2.schema_name|| '.' || ch2.table_name AS "COMP_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.chunk ch2, _timescaledb_catalog.hypertable ht +WHERE ht.table_name = 'index_scan_test' +AND ch1.hypertable_id = ht.id +AND ch1.compressed_chunk_id = ch2.id +ORDER BY ch2.id LIMIT 1 \gset +INSERT INTO index_scan_test(time,device_id,value) SELECT time, device_id, device_id + 0.5 FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','1m') gtime(time), generate_series(1,5,1) gdevice(device_id); +-- test index on single column +BEGIN; +SELECT count(*) as "UNCOMP_LEFTOVER" FROM ONLY :CHUNK_1 WHERE device_id != 2 \gset +CREATE INDEX ON index_scan_test(device_id); +EXPLAIN (costs off, verbose) DELETE FROM index_scan_test WHERE device_id = 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on public.index_scan_test + Delete on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + -> Index Scan using _hyper_29_59_chunk_index_scan_test_device_id_idx on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + Output: index_scan_test_1.tableoid, index_scan_test_1.ctid + Index Cond: (index_scan_test_1.device_id = 2) +(6 rows) + +DELETE FROM index_scan_test WHERE device_id = 2; +-- everything should be deleted +SELECT count(*) FROM index_scan_test where device_id = 2; + count +------- + 0 +(1 row) + +-- there shouldn't be anything in the uncompressed chunk where device_id = 2 +SELECT count(*) = :UNCOMP_LEFTOVER FROM ONLY :CHUNK_1; + ?column? +---------- + t +(1 row) + +-- there shouldn't be anything in the compressed chunk from device_id = 2 +SELECT count(*) FROM :COMP_CHUNK_1 where device_id = 2; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test multi column index +BEGIN; +SELECT count(*) as "UNCOMP_LEFTOVER" FROM ONLY :CHUNK_1 WHERE device_id != 2 OR time <= '2000-01-02'::timestamptz \gset +CREATE INDEX ON index_scan_test(device_id, time); +EXPLAIN (costs off, verbose) DELETE FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on public.index_scan_test + Delete on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + -> Index Scan using _hyper_29_59_chunk_index_scan_test_device_id_time_idx on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + Output: index_scan_test_1.tableoid, index_scan_test_1.ctid + Index Cond: ((index_scan_test_1.device_id = 2) AND (index_scan_test_1."time" > 'Sun Jan 02 00:00:00 2000 PST'::timestamp with time zone)) +(6 rows) + +DELETE FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; +-- everything should be deleted +SELECT count(*) FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; + count +------- + 0 +(1 row) + +-- there shouldn't be anything in the uncompressed chunk that matches predicates +SELECT count(*) = :UNCOMP_LEFTOVER FROM ONLY :CHUNK_1; + ?column? +---------- + t +(1 row) + +-- there shouldn't be anything in the compressed chunk that matches predicates +SELECT count(*) FROM :COMP_CHUNK_1 WHERE device_id = 2 AND _ts_meta_max_1 >= '2000-01-02'::timestamptz; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test index with filter condition +BEGIN; +SELECT count(*) as "UNCOMP_LEFTOVER" FROM ONLY :CHUNK_1 WHERE device_id != 2 OR time <= '2000-01-02'::timestamptz \gset +CREATE INDEX ON index_scan_test(device_id); +EXPLAIN (costs off, verbose) DELETE FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on public.index_scan_test + Delete on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + -> Index Scan using _hyper_29_59_chunk_index_scan_test_device_id_idx on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + Output: index_scan_test_1.tableoid, index_scan_test_1.ctid + Index Cond: (index_scan_test_1.device_id = 2) + Filter: (index_scan_test_1."time" > 'Sun Jan 02 00:00:00 2000 PST'::timestamp with time zone) +(7 rows) + +DELETE FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; +-- everything should be deleted +SELECT count(*) FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; + count +------- + 0 +(1 row) + +-- there shouldn't be anything in the uncompressed chunk that matches predicates +SELECT count(*) = :UNCOMP_LEFTOVER FROM ONLY :CHUNK_1; + ?column? +---------- + t +(1 row) + +-- there shouldn't be anything in the compressed chunk that matches predicates +SELECT count(*) FROM :COMP_CHUNK_1 WHERE device_id = 2 AND _ts_meta_max_1 >= '2000-01-02'::timestamptz; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test for disabling DML decompression +SHOW timescaledb.enable_dml_decompression; + timescaledb.enable_dml_decompression +-------------------------------------- + on +(1 row) + +SET timescaledb.enable_dml_decompression = false; +\set ON_ERROR_STOP 0 +-- should ERROR both UPDATE/DELETE statements because the DML decompression is disabled +UPDATE sample_table SET c3 = NULL WHERE c4 = 5; +ERROR: UPDATE/DELETE is disabled on compressed chunks +DELETE FROM sample_table WHERE c4 = 5; +ERROR: UPDATE/DELETE is disabled on compressed chunks +\set ON_ERROR_STOP 1 +-- make sure reseting the GUC we will be able to UPDATE/DELETE compressed chunks +RESET timescaledb.enable_dml_decompression; +SHOW timescaledb.enable_dml_decompression; + timescaledb.enable_dml_decompression +-------------------------------------- + on +(1 row) + +BEGIN; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 = 5 AND c3 IS NULL; + count +------- + 0 +(1 row) + +UPDATE sample_table SET c3 = NULL WHERE c4 = 5; +-- report 10k rows +SELECT count(*) FROM sample_table WHERE c4 = 5 AND c3 IS NULL; + count +------- + 10000 +(1 row) + +ROLLBACK; +BEGIN; +-- report 10k rows +SELECT count(*) FROM sample_table WHERE c4 = 5; + count +------- + 10000 +(1 row) + +DELETE FROM sample_table WHERE c4 = 5; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 = 5; + count +------- + 0 +(1 row) + +ROLLBACK; +-- create new uncompressed chunk +INSERT INTO sample_table +SELECT t, 1, 1, 1, 1 +FROM generate_series('2023-05-04 00:00:00-00'::timestamptz, + '2023-05-04 00:00:00-00'::timestamptz + INTERVAL '2 hours', + INTERVAL '1 hour') t; +-- check chunk compression status +SELECT chunk_name, is_compressed +FROM timescaledb_information.chunks +WHERE hypertable_name = 'sample_table' +ORDER BY chunk_name; + chunk_name | is_compressed +--------------------+--------------- + _hyper_19_37_chunk | t + _hyper_19_61_chunk | f +(2 rows) + +-- test for uncompressed and compressed chunks +SHOW timescaledb.enable_dml_decompression; + timescaledb.enable_dml_decompression +-------------------------------------- + on +(1 row) + +SET timescaledb.enable_dml_decompression = false; +BEGIN; +-- report 3 rows +SELECT count(*) FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz; + count +------- + 3 +(1 row) + +-- delete from uncompressed chunk should work +DELETE FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz; + count +------- + 0 +(1 row) + +ROLLBACK; +BEGIN; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz AND c3 IS NULL; + count +------- + 0 +(1 row) + +UPDATE sample_table SET c3 = NULL WHERE time >= '2023-05-04 00:00:00-00'::timestamptz; +-- report 3 rows +SELECT count(*) FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz AND c3 IS NULL; + count +------- + 3 +(1 row) + +ROLLBACK; +\set ON_ERROR_STOP 0 +-- should ERROR both UPDATE/DELETE statements because the DML decompression is disabled +-- and both statements we're touching compressed and uncompressed chunks +UPDATE sample_table SET c3 = NULL WHERE time >= '2023-03-17 00:00:00-00'::timestamptz AND c3 IS NULL; +ERROR: UPDATE/DELETE is disabled on compressed chunks +DELETE FROM sample_table WHERE time >= '2023-03-17 00:00:00-00'::timestamptz; +ERROR: UPDATE/DELETE is disabled on compressed chunks +\set ON_ERROR_STOP 1 +--github issue: 5586 +--testcase with multiple indexes +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE DATABASE test5586; +\c test5586 :ROLE_SUPERUSER +SET client_min_messages = ERROR; +CREATE EXTENSION timescaledb CASCADE; +CREATE TABLE tab1(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); +SELECT create_hypertable('tab1','time',create_default_indexes:=false); + create_hypertable +------------------- + (1,public,tab1,t) +(1 row) + +INSERT INTO tab1(filler_1, filler_2, filler_3,time,device_id,v0,v1,v2,v3) SELECT device_id, device_id+1, device_id + 2, time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id); +ALTER TABLE tab1 SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id, filler_1, filler_2, filler_3'); +SELECT compress_chunk(show_chunks('tab1')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT format('%I.%I', schema_name, table_name) AS "CHUNK" FROM _timescaledb_catalog.chunk WHERE hypertable_id = 2 \gset +-- create multiple indexes on compressed hypertable +DROP INDEX _timescaledb_internal.compress_hyper_2_2_chunk_device_id_filler_1_filler_2_filler_idx; +CREATE INDEX ON :CHUNK (_ts_meta_min_1); +CREATE INDEX ON :CHUNK (_ts_meta_min_1, _ts_meta_sequence_num); +CREATE INDEX ON :CHUNK (_ts_meta_min_1, _ts_meta_max_1, filler_1); +CREATE INDEX filler_1 ON :CHUNK (filler_1); +CREATE INDEX filler_2 ON :CHUNK (filler_2); +CREATE INDEX filler_3 ON :CHUNK (filler_3); +-- below indexes should be selected +CREATE INDEX filler_1_filler_2 ON :CHUNK (filler_1, filler_2); +CREATE INDEX filler_2_filler_3 ON :CHUNK (filler_2, filler_3); +set timescaledb.debug_compression_path_info to on; +BEGIN; +SELECT COUNT(*) FROM tab1 WHERE filler_3 = 5 AND filler_2 = 4; + count +------- + 3598 +(1 row) + +UPDATE tab1 SET v0 = v1 + v2 WHERE filler_3 = 5 AND filler_2 = 4; +INFO: Index "filler_2_filler_3" is used for scan. +INFO: Number of compressed rows fetched from index: 4. Number of compressed rows filtered by heap filters: 0. +ROLLBACK; +BEGIN; +SELECT COUNT(*) FROM tab1 WHERE filler_1 < 5 AND filler_2 = 4; + count +------- + 3598 +(1 row) + +UPDATE tab1 SET v0 = v1 + v2 WHERE filler_1 < 5 AND filler_2 = 4; +INFO: Index "filler_1_filler_2" is used for scan. +INFO: Number of compressed rows fetched from index: 4. Number of compressed rows filtered by heap filters: 0. +ROLLBACK; +-- idealy filler_1 index should be selected, +-- instead first matching index is selected +BEGIN; +SELECT COUNT(*) FROM tab1 WHERE filler_1 < 5; + count +------- + 14392 +(1 row) + +UPDATE tab1 SET v0 = v1 + v2 WHERE filler_1 < 5; +INFO: Index "compress_hyper_2_2_chunk__ts_meta_min_1__ts_meta_max_1_fill_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 16. Number of compressed rows filtered by heap filters: 0. +ROLLBACK; +RESET timescaledb.debug_compression_path_info; +DROP TABLE tab1; +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP DATABASE test5586; +--issue: #6024 +CREATE TABLE t(a integer, b integer); +SELECT create_hypertable('t', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" + create_hypertable +------------------- + (31,public,t,t) +(1 row) + +INSERT INTO t values(1, 2); +ALTER TABLE t SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "t" is set to "" +NOTICE: default order by for hypertable "t" is set to "a DESC" +SELECT compress_chunk(show_chunks('t')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_31_62_chunk +(1 row) + +-- should not crash +UPDATE t SET b = 2 WHERE tableoid = 0; +UPDATE t SET b = 2 WHERE tableoid is null; +DROP TABLE t; +-- github issue: 6367 +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE DATABASE test6367; +\c test6367 :ROLE_SUPERUSER +SET client_min_messages = ERROR; +CREATE EXTENSION timescaledb CASCADE; +CREATE TABLE t6367 ( + time timestamptz NOT NULL, + source_id varchar(64) NOT NULL, + label varchar, + data jsonb +); +SELECT table_name FROM create_hypertable('t6367', 'time'); + table_name +------------ + t6367 +(1 row) + +ALTER TABLE t6367 SET(timescaledb.compress, timescaledb.compress_segmentby = 'source_id, label', timescaledb.compress_orderby = 'time'); +INSERT INTO t6367 +SELECT time, source_id, label, '{}' AS data +FROM +generate_series('1990-01-01'::timestamptz, '1990-01-10'::timestamptz, INTERVAL '1 day') AS g1(time), +generate_series(1, 3, 1 ) AS g2(source_id), +generate_series(1, 3, 1 ) AS g3(label); +SELECT compress_chunk(c) FROM show_chunks('t6367') c; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk +(2 rows) + +SELECT format('%I.%I', schema_name, table_name) AS "CHUNK1" FROM _timescaledb_catalog.chunk WHERE hypertable_id = 2 ORDER BY id LIMIT 1 \gset +SELECT format('%I.%I', schema_name, table_name) AS "CHUNK2" FROM _timescaledb_catalog.chunk WHERE hypertable_id = 2 ORDER BY id LIMIT 1 OFFSET 1 \gset +DROP INDEX _timescaledb_internal.compress_hyper_2_3_chunk_source_id_label__ts_meta_sequence__idx; +DROP INDEX _timescaledb_internal.compress_hyper_2_4_chunk_source_id_label__ts_meta_sequence__idx; +-- testcase with no index, should use seq scan +set timescaledb.debug_compression_path_info to on; +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test case with an index which has only one +-- of the segmentby filters +CREATE INDEX source_id_idx1 ON :CHUNK1 (source_id); +CREATE INDEX source_id_idx2 ON :CHUNK2 (source_id); +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Index "source_id_idx1" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 2. +INFO: Index "source_id_idx2" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 2. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test that we are filtering NULL checks +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label IS NULL; + count +------- + 0 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label IS NULL; +INFO: Index "source_id_idx1" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 3. +INFO: Index "source_id_idx2" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 3. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label IS NULL; + count +------- + 0 +(1 row) + +ROLLBACK; +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label IS NOT NULL; + count +------- + 30 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label IS NOT NULL; +INFO: Index "source_id_idx1" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 0. +INFO: Index "source_id_idx2" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label IS NOT NULL; + count +------- + 0 +(1 row) + +ROLLBACK; +DROP INDEX _timescaledb_internal.source_id_idx1; +DROP INDEX _timescaledb_internal.source_id_idx2; +-- test case with an index which has multiple same column +CREATE INDEX source_id_source_id_idx ON :CHUNK1 (source_id, source_id); +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Index "source_id_source_id_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 2. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +DROP INDEX _timescaledb_internal.source_id_source_id_idx; +-- test using a non-btree index +-- fallback to heap scan +CREATE INDEX brin_source_id_idx ON :CHUNK1 USING brin (source_id); +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +DROP INDEX _timescaledb_internal.brin_source_id_idx; +-- test using an expression index +-- should fallback to heap scans +CREATE INDEX expr_source_id_idx ON :CHUNK1 (upper(source_id)); +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +DROP INDEX _timescaledb_internal.expr_source_id_idx; +-- test using a partial index +-- should fallback to heap scans +CREATE INDEX partial_source_id_idx ON :CHUNK1 (source_id) +WHERE _ts_meta_min_1 > '1990-01-01'::timestamptz; +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +RESET timescaledb.debug_compression_path_info; +DROP TABLE t6367; +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP DATABASE test6367; +-- Text limitting decompressed tuple during an UPDATE or DELETE +CREATE TABLE test_limit ( + timestamp int not null, + id bigint +); +SELECT * FROM create_hypertable('test_limit', 'timestamp', chunk_time_interval=>10000); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 33 | public | test_limit | t +(1 row) + +INSERT INTO test_limit SELECT t, i FROM generate_series(1,10000,1) t CROSS JOIN generate_series(1,3,1) i; +ALTER TABLE test_limit SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'timestamp' +); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_limit" is set to "" +SELECT count(compress_chunk(ch)) FROM show_chunks('test_limit') ch; + count +------- + 2 +(1 row) + +SET timescaledb.max_tuples_decompressed_per_dml_transaction = 5000; +\set VERBOSITY default +\set ON_ERROR_STOP 0 +-- Updating or deleting everything will break the set limit. +UPDATE test_limit SET id = 0; +ERROR: tuple decompression limit exceeded by operation +DETAIL: current limit: 5000, tuples decompressed: 30000 +HINT: Consider increasing timescaledb.max_tuples_decompressed_per_dml_transaction or set to 0 (unlimited). +DELETE FROM test_limit WHERE id > 0; +ERROR: tuple decompression limit exceeded by operation +DETAIL: current limit: 5000, tuples decompressed: 30000 +HINT: Consider increasing timescaledb.max_tuples_decompressed_per_dml_transaction or set to 0 (unlimited). +-- Setting to 0 should remove the limit. +SET timescaledb.max_tuples_decompressed_per_dml_transaction = 0; +UPDATE test_limit SET id = 0; +DELETE FROM test_limit WHERE id > 0; +\set ON_ERROR_STOP 1 +DROP TABLE test_limit; +-- check partial compression with DML +CREATE TABLE test_partials (time timestamptz NOT NULL, a int, b int); +SELECT create_hypertable('test_partials', 'time'); + create_hypertable +----------------------------- + (35,public,test_partials,t) +(1 row) + +INSERT INTO test_partials +VALUES -- chunk1 + ('2020-01-01 00:00'::timestamptz, 1, 2), + ('2020-01-01 00:01'::timestamptz, 2, 2), + ('2020-01-01 00:04'::timestamptz, 1, 2), + -- chunk2 + ('2021-01-01 00:00'::timestamptz, 1, 2), + ('2021-01-01 00:04'::timestamptz, 1, 2), + -- chunk3 + ('2022-01-01 00:00'::timestamptz, 1, 2), + ('2022-01-01 00:04'::timestamptz, 1, 2); +-- enable compression, compress all chunks +ALTER TABLE test_partials SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_partials" is set to "" +NOTICE: default order by for hypertable "test_partials" is set to ""time" DESC" +SELECT compress_chunk(show_chunks('test_partials')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_68_chunk + _timescaledb_internal._hyper_35_69_chunk + _timescaledb_internal._hyper_35_70_chunk +(3 rows) + +-- fully compressed +EXPLAIN (costs off) SELECT * FROM test_partials ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_partials + Order: test_partials."time" + -> Custom Scan (DecompressChunk) on _hyper_35_68_chunk + -> Sort + Sort Key: compress_hyper_36_71_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_71_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_69_chunk + -> Sort + Sort Key: compress_hyper_36_72_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_72_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_70_chunk + -> Sort + Sort Key: compress_hyper_36_73_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_73_chunk +(14 rows) + +-- verify correct results +SELECT * FROM test_partials ORDER BY time; + time | a | b +------------------------------+---+--- + Wed Jan 01 00:00:00 2020 PST | 1 | 2 + Wed Jan 01 00:01:00 2020 PST | 2 | 2 + Wed Jan 01 00:04:00 2020 PST | 1 | 2 + Fri Jan 01 00:00:00 2021 PST | 1 | 2 + Fri Jan 01 00:04:00 2021 PST | 1 | 2 + Sat Jan 01 00:00:00 2022 PST | 1 | 2 + Sat Jan 01 00:04:00 2022 PST | 1 | 2 +(7 rows) + +-- check that DML causes transparent decompression and that +-- data gets shifted to the uncompressed parts +EXPLAIN (costs off) DELETE FROM test_partials WHERE time >= ALL(SELECT time from test_partials); + QUERY PLAN +------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on test_partials + Delete on _hyper_35_68_chunk test_partials_1 + Delete on _hyper_35_69_chunk test_partials_2 + Delete on _hyper_35_70_chunk test_partials_3 + -> Custom Scan (ChunkAppend) on test_partials + -> Seq Scan on _hyper_35_68_chunk test_partials_1 + Filter: (SubPlan 1) + SubPlan 1 + -> Materialize + -> Append + -> Custom Scan (DecompressChunk) on _hyper_35_68_chunk + -> Seq Scan on compress_hyper_36_71_chunk + -> Seq Scan on _hyper_35_68_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_69_chunk + -> Seq Scan on compress_hyper_36_72_chunk + -> Seq Scan on _hyper_35_69_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_70_chunk + -> Seq Scan on compress_hyper_36_73_chunk + -> Seq Scan on _hyper_35_70_chunk + -> Seq Scan on _hyper_35_69_chunk test_partials_2 + Filter: (SubPlan 1) + -> Seq Scan on _hyper_35_70_chunk test_partials_3 + Filter: (SubPlan 1) +(24 rows) + +DELETE FROM test_partials WHERE time >= ALL(SELECT time from test_partials); +-- All 3 chunks will now become partially compressed chunks +EXPLAIN (costs off) SELECT * FROM test_partials ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_partials + Order: test_partials."time" + -> Merge Append + Sort Key: _hyper_35_68_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_35_68_chunk + -> Sort + Sort Key: compress_hyper_36_71_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_71_chunk + -> Sort + Sort Key: _hyper_35_68_chunk."time" + -> Seq Scan on _hyper_35_68_chunk + -> Merge Append + Sort Key: _hyper_35_69_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_35_69_chunk + -> Sort + Sort Key: compress_hyper_36_72_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_72_chunk + -> Sort + Sort Key: _hyper_35_69_chunk."time" + -> Seq Scan on _hyper_35_69_chunk + -> Merge Append + Sort Key: _hyper_35_70_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_35_70_chunk + -> Sort + Sort Key: compress_hyper_36_73_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_73_chunk + -> Sort + Sort Key: _hyper_35_70_chunk."time" + -> Seq Scan on _hyper_35_70_chunk +(29 rows) + +-- verify correct results +SELECT * FROM test_partials ORDER BY time; + time | a | b +------------------------------+---+--- + Wed Jan 01 00:00:00 2020 PST | 1 | 2 + Wed Jan 01 00:01:00 2020 PST | 2 | 2 + Wed Jan 01 00:04:00 2020 PST | 1 | 2 + Fri Jan 01 00:00:00 2021 PST | 1 | 2 + Fri Jan 01 00:04:00 2021 PST | 1 | 2 + Sat Jan 01 00:00:00 2022 PST | 1 | 2 +(6 rows) + +SELECT compress_chunk(show_chunks('test_partials')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_68_chunk + _timescaledb_internal._hyper_35_69_chunk + _timescaledb_internal._hyper_35_70_chunk +(3 rows) + +-- fully compressed +EXPLAIN (costs off) SELECT * FROM test_partials ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_partials + Order: test_partials."time" + -> Custom Scan (DecompressChunk) on _hyper_35_68_chunk + -> Sort + Sort Key: compress_hyper_36_74_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_74_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_69_chunk + -> Sort + Sort Key: compress_hyper_36_75_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_75_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_70_chunk + -> Sort + Sort Key: compress_hyper_36_76_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_76_chunk +(14 rows) + +DROP TABLE test_partials; +CREATE TABLE test_meta_filters(time timestamptz NOT NULL, device text, metric text, v1 float, v2 float); +CREATE INDEX ON test_meta_filters(device, metric, v1); +SELECT create_hypertable('test_meta_filters', 'time'); + create_hypertable +--------------------------------- + (37,public,test_meta_filters,t) +(1 row) + +ALTER TABLE test_meta_filters SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby='metric,time'); +INSERT INTO test_meta_filters SELECT '2020-01-01'::timestamptz,'d1','m' || metric::text,v1,v2 FROM generate_series(1,3,1) metric, generate_series(1,1000,1) v1, generate_series(1,10,1) v2 ORDER BY 1,2,3,4,5; +SELECT compress_chunk(show_chunks('test_meta_filters')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_37_77_chunk +(1 row) + +EXPLAIN (analyze, timing off, costs off, summary off) DELETE FROM test_meta_filters WHERE device = 'd1' AND metric = 'm1' AND v1 < 100; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1000 + -> Delete on test_meta_filters (actual rows=0 loops=1) + Delete on _hyper_37_77_chunk test_meta_filters_1 + -> Seq Scan on _hyper_37_77_chunk test_meta_filters_1 (actual rows=990 loops=1) + Filter: ((v1 < '100'::double precision) AND (device = 'd1'::text) AND (metric = 'm1'::text)) + Rows Removed by Filter: 10 +(8 rows) + +-- test expression pushdown in compressed dml constraints +CREATE TABLE test_pushdown(time timestamptz NOT NULL, device text); +SELECT table_name FROM create_hypertable('test_pushdown', 'time'); + table_name +--------------- + test_pushdown +(1 row) + +INSERT INTO test_pushdown SELECT '2020-01-01', 'a'; +INSERT INTO test_pushdown SELECT '2020-01-01', 'b'; +INSERT INTO test_pushdown SELECT '2020-01-01 05:00', 'c'; +CREATE TABLE devices(device text); +INSERT INTO devices VALUES ('a'), ('b'), ('c'); +CREATE TABLE devices2(device text); +INSERT INTO devices2 VALUES ('d'), ('e'), ('f'); +CREATE TABLE devices3(device text); +INSERT INTO devices3 VALUES ('b'), ('d'), ('g'); +ALTER TABLE test_pushdown SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "test_pushdown" is set to ""time" DESC" +SELECT compress_chunk(show_chunks('test_pushdown')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_39_79_chunk +(1 row) + +-- 3 batch decompressions means pushdown is not working so we expect less than 3 for all these queries +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'a' = device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: ('a'::text = device) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device < 'c' ; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 2 + Tuples decompressed: 2 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: (device < 'c'::text) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'c' > device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 2 + Tuples decompressed: 2 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: ('c'::text > device) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'c' >= device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=3 loops=1) + Filter: ('c'::text >= device) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device > 'b'; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device > 'b'::text) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = CURRENT_USER; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=0 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = CURRENT_USER) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'b' < device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: ('b'::text < device) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'b' <= device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 2 + Tuples decompressed: 2 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: ('b'::text <= device) +(7 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +-- cant pushdown OR atm +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = 'a' OR device = 'b'; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: ((device = 'a'::text) OR (device = 'b'::text)) + Rows Removed by Filter: 1 +(8 rows) + +-- test stable function +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE time = timestamptz('2020-01-01 05:00'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: ("time" = 'Wed Jan 01 05:00:00 2020 PST'::timestamp with time zone) +(7 rows) + +-- test sqlvaluefunction +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = substring(CURRENT_USER,length(CURRENT_USER)+1) || 'c'; ROLLBACK; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=1 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device = ("substring"((CURRENT_USER)::text, (length((CURRENT_USER)::text) + 1)) || 'c'::text)) +(9 rows) + +-- JOIN tests +-- no filtering in decompression +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices3 d WHERE p.device=d.device; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Merge Join (actual rows=1 loops=1) + Merge Cond: (p_1.device = d.device) + -> Sort (actual rows=3 loops=1) + Sort Key: p_1.device + Sort Method: quicksort + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: d.device + Sort Method: quicksort + -> Seq Scan on devices3 d (actual rows=3 loops=1) +(15 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices3 d WHERE p.device=d.device; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Merge Join (actual rows=1 loops=1) + Merge Cond: (p_1.device = d.device) + -> Sort (actual rows=3 loops=1) + Sort Key: p_1.device + Sort Method: quicksort + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: d.device + Sort Method: quicksort + -> Seq Scan on devices3 d (actual rows=3 loops=1) +(15 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +-- can filter in decompression even before executing join +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device AND d.device ='b'; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Nested Loop (actual rows=1 loops=1) + -> Seq Scan on devices d (actual rows=1 loops=1) + Filter: (device = 'b'::text) + Rows Removed by Filter: 2 + -> Materialize (actual rows=1 loops=1) + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=1 loops=1) + Filter: (device = 'b'::text) +(12 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device AND d.device ='b'; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Nested Loop (actual rows=1 loops=1) + -> Seq Scan on devices d (actual rows=1 loops=1) + Filter: (device = 'b'::text) + Rows Removed by Filter: 2 + -> Materialize (actual rows=1 loops=1) + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=1 loops=1) + Filter: (device = 'b'::text) +(12 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +-- test prepared statement +PREPARE q1(text) AS DELETE FROM test_pushdown WHERE device = $1; +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN EXECUTE q1('a'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device = 'a'::text) +(7 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN EXECUTE q1('a'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = 'a'::text) +(6 rows) + +BEGIN; :EXPLAIN EXECUTE q1('not here'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = 'not here'::text) +(5 rows) + +-- test arrayop pushdown less than 3 decompressions are expected for successful pushdown +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device IN ('a','d'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = ANY ('{a,d}'::text[])) +(6 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = ANY('{a,d}'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = ANY ('{a,d}'::text[])) +(6 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device IN ('a',CURRENT_USER); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=1 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device = ANY (ARRAY['a'::text, (CURRENT_USER)::text])) +(9 rows) + +-- arroyop pushdown only works for segmentby columns atm so 3 decompressions are expected for now +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE time IN ('2020-01-01','2020-01-02'); ROLLBACK; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: ("time" = ANY ('{"Wed Jan 01 00:00:00 2020 PST","Thu Jan 02 00:00:00 2020 PST"}'::timestamp with time zone[])) + Rows Removed by Filter: 1 +(8 rows) + +-- no pushdown for volatile functions +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = current_query(); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=0 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = current_query()) + Rows Removed by Filter: 3 +(10 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device IN ('a',current_query()); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=1 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device = ANY (ARRAY['a'::text, current_query()])) + Rows Removed by Filter: 2 +(10 rows) + +-- github issue #6858 +-- check update triggers work correctly both on uncompressed and compressed chunks +CREATE TABLE update_trigger_test ( + "entity_id" "uuid" NOT NULL, + "effective_date_time" timestamp with time zone NOT NULL, + "measurement" numeric NOT NULL, + "modified_at" timestamp with time zone DEFAULT "now"() NOT NULL +); +SELECT create_hypertable('update_trigger_test', 'effective_date_time'); + create_hypertable +----------------------------------- + (41,public,update_trigger_test,t) +(1 row) + +CREATE OR REPLACE FUNCTION update_modified_at_test() +RETURNS TRIGGER +LANGUAGE PLPGSQL AS $$ +BEGIN + NEW.modified_at = NOW(); + RETURN NEW; +END; $$; +CREATE TRIGGER update_trigger_test__before_update_sync_modified_at +BEFORE UPDATE ON update_trigger_test +FOR EACH ROW +EXECUTE PROCEDURE update_modified_at_test(); +INSERT INTO update_trigger_test +SELECT 'f2ca7073-1395-5770-8378-7d0339804580', '2024-04-16 04:50:00+02', +1100.00, '2024-04-23 11:56:38.494095+02' FROM generate_series(1,2500,1) c; +VACUUM FULL update_trigger_test; +BEGIN; +UPDATE update_trigger_test SET measurement = measurement + 2 +WHERE update_trigger_test.effective_date_time >= '2020-01-01T00:00:00'::timestamp AT TIME ZONE 'UTC'; +ROLLBACK; +-- try with default compression +ALTER TABLE update_trigger_test SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "update_trigger_test" is set to "" +NOTICE: default order by for hypertable "update_trigger_test" is set to "effective_date_time DESC" +SELECT compress_chunk(show_chunks('update_trigger_test')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_41_81_chunk +(1 row) + +BEGIN; +UPDATE update_trigger_test SET measurement = measurement + 2 +WHERE update_trigger_test.effective_date_time >= '2020-01-01T00:00:00'::timestamp AT TIME ZONE 'UTC'; +ROLLBACK; +-- lets try with segmentby +SELECT decompress_chunk(show_chunks('update_trigger_test')); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_41_81_chunk +(1 row) + +ALTER TABLE update_trigger_test SET (timescaledb.compress, timescaledb.compress_segmentby='entity_id'); +SELECT compress_chunk(show_chunks('update_trigger_test')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_41_81_chunk +(1 row) + +BEGIN; +UPDATE update_trigger_test SET measurement = measurement + 2 +WHERE update_trigger_test.effective_date_time >= '2020-01-01T00:00:00'::timestamp AT TIME ZONE 'UTC'; +ROLLBACK; diff --git a/tsl/test/expected/compression_update_delete-17.out b/tsl/test/expected/compression_update_delete-17.out new file mode 100644 index 00000000000..58d56c91f37 --- /dev/null +++ b/tsl/test/expected/compression_update_delete-17.out @@ -0,0 +1,3405 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set EXPLAIN 'EXPLAIN (costs off, timing off, summary off, analyze)' +CREATE OR REPLACE VIEW compressed_chunk_info_view AS +SELECT + h.schema_name AS hypertable_schema, + h.table_name AS hypertable_name, + c.schema_name as chunk_schema, + c.table_name as chunk_name, + c.status as chunk_status, + comp.schema_name as compressed_chunk_schema, + comp.table_name as compressed_chunk_name +FROM + _timescaledb_catalog.hypertable h JOIN + _timescaledb_catalog.chunk c ON h.id = c.hypertable_id + LEFT JOIN _timescaledb_catalog.chunk comp +ON comp.id = c.compressed_chunk_id +; +CREATE TABLE sample_table ( + time TIMESTAMP WITH TIME ZONE NOT NULL, + sensor_id INTEGER NOT NULL, + cpu double precision null, + temperature double precision null, + name varchar(100) default 'this is a default string value' +); +SELECT * FROM create_hypertable('sample_table', 'time', + chunk_time_interval => INTERVAL '2 months'); +WARNING: column type "character varying" used for "name" does not follow best practices + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 1 | public | sample_table | t +(1 row) + +\set start_date '2022-01-28 01:09:53.583252+05:30' +INSERT INTO sample_table + SELECT + time + (INTERVAL '1 minute' * random()) AS time, + sensor_id, + random() AS cpu, + random()* 100 AS temperature + FROM + generate_series(:'start_date'::timestamptz - INTERVAL '1 months', + :'start_date'::timestamptz - INTERVAL '1 week', + INTERVAL '1 hour') AS g1(time), + generate_series(1, 8, 1 ) AS g2(sensor_id) + ORDER BY + time; +\set start_date '2023-03-17 17:51:11.322998+05:30' +-- insert into new chunks +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 12, 21.98, 33.123, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 12, 17.66, 13.875, 'new row1'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 13, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 1, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 4, 21.98, 33.123, 'new row2'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 5, 0.988, 33.123, 'new row3'); +INSERT INTO sample_table VALUES (:'start_date'::timestamptz, 6, 4.6554, 47, 'new row3'); +-- enable compression +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'sensor_id' +); +NOTICE: default order by for hypertable "sample_table" is set to ""time" DESC" +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk +(2 rows) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- test rows visibility +BEGIN; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 0 +(1 row) + +-- update 4 rows +UPDATE sample_table SET name = 'updated row' WHERE cpu = 21.98 AND temperature = 33.123; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 4 +(1 row) + +ROLLBACK; +-- get count of affected rows +SELECT count(*) FROM sample_table WHERE cpu = 21.98 AND temperature = 33.123; + count +------- + 4 +(1 row) + +-- do update +UPDATE sample_table SET name = 'updated row' WHERE cpu = 21.98 AND temperature = 33.123; +-- get count of updated rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 4 +(1 row) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 9 | _hyper_1_2_chunk +(2 rows) + +-- recompress the partial chunks +SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk'); +NOTICE: chunk "_hyper_1_1_chunk" is already compressed + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT compress_chunk('_timescaledb_internal._hyper_1_2_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_2_chunk +(1 row) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- get count of affected rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 4 +(1 row) + +-- do delete +DELETE FROM sample_table WHERE name = 'updated row'; +-- get count of updated rows +SELECT count(*) FROM sample_table WHERE name = 'updated row'; + count +------- + 0 +(1 row) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 9 | _hyper_1_2_chunk +(2 rows) + +-- recompress the paritial chunks +SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk'); +NOTICE: chunk "_hyper_1_1_chunk" is already compressed + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT compress_chunk('_timescaledb_internal._hyper_1_2_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_2_chunk +(1 row) + +-- test for IS NULL checks +-- should not UPDATE any rows +UPDATE sample_table SET temperature = 34.21 WHERE sensor_id IS NULL; +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- test for IS NOT NULL checks +-- should UPDATE all rows +UPDATE sample_table SET temperature = 34.21 WHERE sensor_id IS NOT NULL; +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 9 | _hyper_1_1_chunk + 9 | _hyper_1_2_chunk +(2 rows) + +-- recompress the paritial chunks +SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT compress_chunk('_timescaledb_internal._hyper_1_2_chunk'); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_2_chunk +(1 row) + +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 1 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- report 0 rows +SELECT COUNT(*) FROM sample_table WHERE name = 'updated row based on < OR > comparison'; + count +------- + 0 +(1 row) + +-- get total count of rows which satifies the condition +SELECT COUNT(*) as "total_affected_rows" FROM sample_table WHERE + time > '2022-01-20 19:10:00.101514+05:30' and + time < '2022-01-20 21:10:43.855297+05:30' \gset +-- perform UPDATE with < and > comparison on SEGMENTBY column +UPDATE sample_table SET name = 'updated row based on < OR > comparison' WHERE + time > '2022-01-20 19:10:00.101514+05:30' and time < '2022-01-20 21:10:43.855297+05:30'; +-- check chunk compression status after UPDATE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------ + 9 | _hyper_1_1_chunk + 1 | _hyper_1_2_chunk +(2 rows) + +-- count should be same +SELECT COUNT(*) = (:total_affected_rows) FROM sample_table WHERE name = 'updated row based on < OR > comparison'; + ?column? +---------- + t +(1 row) + +DROP TABLE sample_table; +-- test to ensure that only required rows from compressed chunks +-- are extracted if SEGMENTBY column is used in WHERE condition +CREATE TABLE sample_table( + time INT NOT NULL, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 3 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time', + timescaledb.compress_segmentby = 'device_id'); +INSERT INTO sample_table VALUES (1, 1, 1), (2, 2, 1), (3, 3, 1), (10, 3, 2), (11, 4, 2), (11, 1, 2); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_3_5_chunk + _timescaledb_internal._hyper_3_6_chunk +(2 rows) + +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- there should 2 rows matching the conditions coming from 2 chunks +SELECT * FROM sample_table WHERE device_id = 3 ORDER BY time, device_id; + time | device_id | val +------+-----------+----- + 3 | 3 | 1 + 10 | 3 | 2 +(2 rows) + +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 3 +(1 row) + +-- get rowcount from compressed chunks where device_id = 3 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id = 3; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id = 3; + count +------- + 1 +(1 row) + +-- delete rows with device_id = 3 +DELETE FROM sample_table WHERE device_id = 3; +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 2 +(1 row) + +-- get rowcount from compressed chunks where device_id = 3 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id = 3; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id = 3; + count +------- + 0 +(1 row) + +-- there should be no rows +SELECT * FROM sample_table WHERE device_id = 3 ORDER BY time, device_id; + time | device_id | val +------+-----------+----- +(0 rows) + +-- there should 2 rows matching the conditions coming from 2 chunks +SELECT val FROM sample_table WHERE 1 = device_id ORDER BY time, device_id; + val +----- + 1 + 2 +(2 rows) + +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 2 +(1 row) + +-- get rowcount from compressed chunks where device_id = 1 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE 1 = device_id; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE 1 = device_id; + count +------- + 1 +(1 row) + +-- update rows with device_id = 1 +UPDATE sample_table SET val = 200 WHERE 1 = device_id; +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks where device_id = 1 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE 1 = device_id; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE 1 = device_id; + count +------- + 0 +(1 row) + +-- there should be 2 rows +SELECT val FROM sample_table WHERE 1 = device_id ORDER BY time, device_id; + val +----- + 200 + 200 +(2 rows) + +DROP TABLE sample_table; +CREATE TABLE sample_table( + time INT NOT NULL, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 5 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'time, val'); +NOTICE: default order by for hypertable "sample_table" is set to "" +INSERT INTO sample_table VALUES (1, 1, 1), (2, 2, 1), (3, 3, 1), (1, 3, 2), (11, 4, 2), (1, 1, 2); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_5_9_chunk + _timescaledb_internal._hyper_5_10_chunk +(2 rows) + +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- there should 2 rows matching the conditions coming from 2 chunks +SELECT * FROM sample_table WHERE time = 1 AND val = 2 ORDER BY time, device_id; + time | device_id | val +------+-----------+----- + 1 | 1 | 2 + 1 | 3 | 2 +(2 rows) + +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 4 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks where time = 1 AND val = 2 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE time = 1 AND val = 2; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE time = 1 AND val = 2; + count +------- + 0 +(1 row) + +-- delete rows with time = 1 AND val = 2 +EXPLAIN (costs off, verbose) DELETE FROM sample_table WHERE time = 1 AND 2 = val; + QUERY PLAN +---------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on public.sample_table + Delete on _timescaledb_internal._hyper_5_9_chunk sample_table_1 + -> Seq Scan on _timescaledb_internal._hyper_5_9_chunk sample_table_1 + Output: sample_table_1.tableoid, sample_table_1.ctid + Filter: ((sample_table_1."time" = 1) AND (2 = sample_table_1.val)) +(6 rows) + +-- should delete rows from 1 of the compressed chunks +DELETE FROM sample_table WHERE time = 1 AND 2 = val; +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks wheretime = 1 AND val = 2 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE time = 1 AND val = 2; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE time = 1 AND val = 2; + count +------- + 0 +(1 row) + +-- there should be no rows +SELECT * FROM sample_table WHERE time = 1 AND val = 2 ORDER BY time, device_id; + time | device_id | val +------+-----------+----- +(0 rows) + +DROP TABLE sample_table; +-- Test chunk compile time startup exclusion +CREATE OR REPLACE FUNCTION now_s() +RETURNS timestamptz LANGUAGE PLPGSQL STABLE AS +$BODY$ +BEGIN + RETURN '2017-08-22T10:00:00'::timestamptz; +END; +$BODY$; +CREATE TABLE sample_table(time timestamptz NOT NULL, temp float, colorid integer, attr jsonb); +SELECT create_hypertable('sample_table', 'time', chunk_time_interval => 2628000000000); + create_hypertable +--------------------------- + (7,public,sample_table,t) +(1 row) + +-- create three chunks +INSERT INTO sample_table VALUES ('2017-03-22T09:18:22', 23.5, 1, '{"a": 1, "b": 2}'), + ('2017-03-22T09:18:23', 21.5, 1, '{"a": 1, "b": 2}'), + ('2017-05-22T09:18:22', 36.2, 2, '{"c": 3, "b": 2}'), + ('2017-05-22T09:18:23', 15.2, 2, '{"c": 3}'), + ('2017-08-22T09:18:22', 34.1, 3, '{"c": 4}'); +ALTER TABLE sample_table SET (timescaledb.compress, + timescaledb.compress_segmentby = 'time'); +NOTICE: default order by for hypertable "sample_table" is set to "" +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_7_13_chunk + _timescaledb_internal._hyper_7_14_chunk + _timescaledb_internal._hyper_7_15_chunk +(3 rows) + +-- ensure all chunks are compressed +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------- + 1 | _hyper_7_13_chunk + 1 | _hyper_7_14_chunk + 1 | _hyper_7_15_chunk +(3 rows) + +-- report 0 rows +SELECT * FROM sample_table WHERE time > now_s() + '-1 month' AND colorid = 4; + time | temp | colorid | attr +------+------+---------+------ +(0 rows) + +-- update 1 row +UPDATE sample_table SET colorid = 4 WHERE time > now_s() + '-1 month'; +-- report 1 row +SELECT * FROM sample_table WHERE time > now_s() + '-1 month' AND colorid = 4; + time | temp | colorid | attr +------------------------------+------+---------+---------- + Tue Aug 22 09:18:22 2017 PDT | 34.1 | 4 | {"c": 4} +(1 row) + +-- ensure that 1 chunk is partially compressed +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------- + 1 | _hyper_7_13_chunk + 1 | _hyper_7_14_chunk + 9 | _hyper_7_15_chunk +(3 rows) + +DROP TABLE sample_table; +-- test for NULL values in SEGMENTBY column +CREATE TABLE sample_table( + time INT, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 9 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time', + timescaledb.compress_segmentby = 'device_id'); +INSERT INTO sample_table VALUES (1, 1, 1), (2, NULL, 1), (3, NULL, 1), (10, NULL, 2), (11, NULL, 2), (11, 1, 2); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_9_19_chunk + _timescaledb_internal._hyper_9_20_chunk +(2 rows) + +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 2 +(1 row) + +-- get rowcount from compressed chunks where device_id IS NULL +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id IS NULL; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id IS NULL; + count +------- + 1 +(1 row) + +-- get total count of SEGMENTBY column with NULL values +SELECT COUNT(*) FROM sample_table WHERE device_id IS NULL; + count +------- + 4 +(1 row) + +-- delete NULL values in SEGMENTBY column +DELETE FROM sample_table WHERE device_id IS NULL; +-- ensure that not all rows are moved to staging area +-- should have few compressed rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks where device_id IS NULL +-- should report 0 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id IS NULL; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id IS NULL; + count +------- + 0 +(1 row) + +-- check chunk compression status after DELETE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+------------------- + 9 | _hyper_9_19_chunk + 9 | _hyper_9_20_chunk +(2 rows) + +DROP TABLE sample_table; +-- test for IS NOT NULL values in SEGMENTBY column +CREATE TABLE sample_table( + time INT, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 11 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time', + timescaledb.compress_segmentby = 'device_id'); +INSERT INTO sample_table VALUES (1, NULL, 1), (2, NULL, 1), (3, NULL, 1), (10, 3, 2), (11, 2, 2), (11, 1, 2); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_11_23_chunk + _timescaledb_internal._hyper_11_24_chunk +(2 rows) + +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +SELECT COUNT(*) FROM sample_table WHERE val = 1234; + count +------- + 0 +(1 row) + +-- UPDATE based on IS NOT NULL condition on SEGMENTBY column +UPDATE sample_table SET val = 1234 WHERE device_id IS NOT NULL; +-- get total count of SEGMENTBY column with NULL values +SELECT COUNT(*) FROM sample_table WHERE val = 1234; + count +------- + 3 +(1 row) + +-- check chunk compression status after DELETE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_11_23_chunk + 9 | _hyper_11_24_chunk +(2 rows) + +DROP TABLE sample_table; +-- test to for <= AND >= on SEGMENTBY column +CREATE TABLE sample_table( + time INT, + device_id INT, + val INT); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 10); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 13 | public | sample_table | t +(1 row) + +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'time', + timescaledb.compress_segmentby = 'device_id, val'); +INSERT INTO sample_table VALUES (1, 1, 1), (2, NULL, 1), (3, 4, 1), (10, NULL, 2), (11, NULL, 2), (11, 1, 2), (13, 5, 3); +INSERT INTO sample_table VALUES (4, 3, NULL), (6, NULL, NULL), (12, NULL, NULL), (13, 4, NULL); +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_13_27_chunk + _timescaledb_internal._hyper_13_28_chunk +(2 rows) + +-- test will multiple NULL/NOT NULL columns +BEGIN; +-- report 0 row +SELECT * FROM sample_table WHERE device_id IS NULL AND val = 987; + time | device_id | val +------+-----------+----- +(0 rows) + +-- these 3 rows will be affected by below UPDATE +SELECT * FROM sample_table WHERE device_id IS NULL AND val IS NOT NULL ORDER BY 1; + time | device_id | val +------+-----------+----- + 2 | | 1 + 10 | | 2 + 11 | | 2 +(3 rows) + +-- update 3 rows +UPDATE sample_table SET val = 987 WHERE device_id IS NULL AND val IS NOT NULL; +-- report 3 row +SELECT * FROM sample_table WHERE device_id IS NULL AND val = 987; + time | device_id | val +------+-----------+----- + 2 | | 987 + 10 | | 987 + 11 | | 987 +(3 rows) + +ROLLBACK; +-- test will multiple columns +BEGIN; +-- report 2 rows +SELECT * FROM sample_table WHERE device_id IS NULL AND val = 2; + time | device_id | val +------+-----------+----- + 10 | | 2 + 11 | | 2 +(2 rows) + +-- delete 2 rows +DELETE from sample_table WHERE device_id IS NULL AND val = 2; +-- report 0 rows +SELECT * FROM sample_table WHERE device_id IS NULL AND val = 2; + time | device_id | val +------+-----------+----- +(0 rows) + +ROLLBACK; +BEGIN; +-- report 1 row +SELECT * FROM sample_table WHERE device_id = 3 AND val IS NULL; + time | device_id | val +------+-----------+----- + 4 | 3 | +(1 row) + +-- delete 1 rows +DELETE from sample_table WHERE device_id = 3 AND val IS NULL; +-- report 0 rows +SELECT * FROM sample_table WHERE device_id = 3 AND val IS NULL; + time | device_id | val +------+-----------+----- +(0 rows) + +ROLLBACK; +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 5 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 5 +(1 row) + +-- get rowcount from compressed chunks where device_id >= 4 AND val <= 1 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id >= 4 AND val <= 1; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id >= 4 AND val <= 1; + count +------- + 0 +(1 row) + +-- get total count of SEGMENTBY column with device_id >= 4 AND val <= 1 +SELECT COUNT(*) FROM sample_table WHERE device_id >= 4 AND val <= 1; + count +------- + 1 +(1 row) + +-- delete NULL values in SEGMENTBY column +DELETE FROM sample_table WHERE device_id >= 4 AND val <= 1; +-- ensure that not all rows are moved to staging area +-- should have few compressed rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 4 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 5 +(1 row) + +-- get rowcount from compressed chunks where device_id IS NULL +-- should report 0 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id >= 4 AND val <= 1; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id >= 4 AND val <= 1; + count +------- + 0 +(1 row) + +-- check chunk compression status after DELETE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_13_27_chunk + 1 | _hyper_13_28_chunk +(2 rows) + +-- added tests for code coverage +UPDATE sample_table SET time = 21 WHERE (device_id) in ( 30, 51, 72, 53); +UPDATE sample_table SET time = 21 WHERE device_id + 365 = 8765; +DROP TABLE sample_table; +-- test with different physical layout +CREATE TABLE sample_table( + time INT, + device_id INT, + val INT default 8, + a INT default 10, + b INT default 11, + c INT default 12, + d INT, + e INT default 13); +SELECT * FROM create_hypertable('sample_table', 'time', chunk_time_interval => 5); +NOTICE: adding not-null constraint to column "time" + hypertable_id | schema_name | table_name | created +---------------+-------------+--------------+--------- + 15 | public | sample_table | t +(1 row) + +INSERT INTO sample_table (time, device_id, d) VALUES (1, 1, 1), (2, NULL, 1), (3, 4, 1), (10, NULL, 2), (11, NULL, 2), (11, 1, 2), (13, 5, 3); +ALTER TABLE sample_table DROP COLUMN c; +ALTER TABLE sample_table SET ( + timescaledb.compress, + timescaledb.compress_segmentby = 'device_id, d'); +NOTICE: default order by for hypertable "sample_table" is set to ""time" DESC" +-- compress all chunks +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_15_31_chunk + _timescaledb_internal._hyper_15_32_chunk +(2 rows) + +ALTER TABLE sample_table ADD COLUMN c int default 23; +-- check chunk compression status +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_15_31_chunk + 1 | _hyper_15_32_chunk +(2 rows) + +-- get FIRST uncompressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "UNCOMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE '_hyper_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND uncompressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "UNCOMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE '_hyper_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get SECOND compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_2" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id DESC LIMIT 1 \gset +-- ensure segment by column index position in compressed and uncompressed +-- chunk is different +SELECT attname, attnum +FROM pg_attribute +WHERE attrelid IN (:'COMPRESS_CHUNK_1'::regclass, :'UNCOMPRESS_CHUNK_1'::regclass) AND attname = 'd' +ORDER BY attnum; + attname | attnum +---------+-------- + d | 4 + d | 7 +(2 rows) + +SELECT attname, attnum +FROM pg_attribute +WHERE attrelid IN (:'COMPRESS_CHUNK_2'::regclass, :'UNCOMPRESS_CHUNK_2'::regclass) AND attname = 'd' +ORDER BY attnum; + attname | attnum +---------+-------- + d | 4 + d | 7 +(2 rows) + +-- get total rowcount from compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 3 +(1 row) + +-- get rowcount from compressed chunks where d = 3 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE d = 3; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE d = 3; + count +------- + 1 +(1 row) + +-- get total count of SEGMENTBY column with d = 3 +SELECT COUNT(*) FROM sample_table WHERE d = 3; + count +------- + 1 +(1 row) + +-- delete based on SEGMENTBY column +DELETE FROM sample_table WHERE d = 3; +-- ensure that not all rows are moved to staging area +-- should have few compressed rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 3 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 2 +(1 row) + +-- get rowcount from compressed chunks where d = 3 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE d = 3; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE d = 3; + count +------- + 0 +(1 row) + +-- check chunk compression status after DELETE +SELECT chunk_status, + chunk_name as "CHUNK_NAME" +FROM compressed_chunk_info_view +WHERE hypertable_name = 'sample_table' ORDER BY chunk_name; + chunk_status | CHUNK_NAME +--------------+-------------------- + 1 | _hyper_15_31_chunk + 1 | _hyper_15_32_chunk +(2 rows) + +-- get rowcount from compressed chunks where device_id IS NULL +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id IS NULL; + count +------- + 1 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id IS NULL; + count +------- + 1 +(1 row) + +BEGIN; +-- report 0 row +SELECT * FROM sample_table WHERE a = 247; + time | device_id | val | a | b | d | e | c +------+-----------+-----+---+---+---+---+--- +(0 rows) + +-- delete 1 row +UPDATE sample_table SET a = 247 WHERE device_id IS NULL; +-- ensure rows are visible +SELECT * FROM sample_table WHERE a = 247; + time | device_id | val | a | b | d | e | c +------+-----------+-----+-----+----+---+----+---- + 2 | | 8 | 247 | 11 | 1 | 13 | 23 + 11 | | 8 | 247 | 11 | 2 | 13 | 23 + 10 | | 8 | 247 | 11 | 2 | 13 | 23 +(3 rows) + +ROLLBACK; +-- report 0 rows +SELECT COUNT(*) FROM sample_table WHERE a = 247; + count +------- + 0 +(1 row) + +-- UPDATE based on NULL values in SEGMENTBY column +UPDATE sample_table SET a = 247 WHERE device_id IS NULL; +-- report 3 rows +SELECT COUNT(*) FROM sample_table WHERE a = 247; + count +------- + 3 +(1 row) + +-- ensure that not all rows are moved to staging area +-- should have few compressed rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2; + count +------- + 1 +(1 row) + +-- get rowcount from compressed chunks where device_id IS NULL +-- should report 0 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE device_id IS NULL; + count +------- + 0 +(1 row) + +SELECT COUNT(*) FROM :COMPRESS_CHUNK_2 WHERE device_id IS NULL; + count +------- + 0 +(1 row) + +DROP TABLE sample_table; +-- test with different physical layout +CREATE TABLE sample_table(time timestamptz, c1 text, c2 text, c3 text); +SELECT create_hypertable('sample_table','time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (17,public,sample_table,t) +(1 row) + +INSERT INTO sample_table SELECT '2000-01-01'; +ALTER TABLE sample_table DROP column c3; +ALTER TABLE sample_table ADD column c4 text; +INSERT INTO sample_table SELECT '2000-01-01', '1', '2', '3'; +ALTER TABLE sample_table SET (timescaledb.compress,timescaledb.compress_segmentby='c4'); +NOTICE: default order by for hypertable "sample_table" is set to ""time" DESC" +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_17_35_chunk +(1 row) + +BEGIN; +-- report 1 row +SELECT * FROM sample_table WHERE c4 IS NULL; + time | c1 | c2 | c4 +------------------------------+----+----+---- + Sat Jan 01 00:00:00 2000 PST | | | +(1 row) + +-- delete 1 row +DELETE FROM sample_table WHERE c4 IS NULL; +-- report 0 rows +SELECT * FROM sample_table WHERE c4 IS NULL; + time | c1 | c2 | c4 +------+----+----+---- +(0 rows) + +ROLLBACK; +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id \gset +-- report 2 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 2 +(1 row) + +-- report 1 row +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 IS NULL; + count +------- + 1 +(1 row) + +-- report 1 row +SELECT * FROM sample_table WHERE c4 IS NULL; + time | c1 | c2 | c4 +------------------------------+----+----+---- + Sat Jan 01 00:00:00 2000 PST | | | +(1 row) + +-- delete 1 row +DELETE FROM sample_table WHERE c4 IS NULL; +-- report 0 row +SELECT * FROM sample_table WHERE c4 IS NULL; + time | c1 | c2 | c4 +------+----+----+---- +(0 rows) + +-- report 1 row which ensure that only required row is moved and deleted +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1; + count +------- + 1 +(1 row) + +-- report 0 row +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 IS NULL; + count +------- + 0 +(1 row) + +DROP TABLE sample_table; +-- test filtering with ORDER BY columns +CREATE TABLE sample_table(time timestamptz, c1 int, c2 int, c3 int, c4 int); +SELECT create_hypertable('sample_table','time',chunk_time_interval=>'1 day'::interval); +NOTICE: adding not-null constraint to column "time" + create_hypertable +---------------------------- + (19,public,sample_table,t) +(1 row) + +ALTER TABLE sample_table SET (timescaledb.compress,timescaledb.compress_segmentby='c4', timescaledb.compress_orderby='c1,c2,time'); +INSERT INTO sample_table +SELECT t, c1, c2, c3, c4 +FROM generate_series(:'start_date'::timestamptz - INTERVAL '9 hours', + :'start_date'::timestamptz, + INTERVAL '1 hour') t, + generate_series(0,9,1) c1, + generate_series(0,9,1) c2, + generate_series(0,9,1) c3, + generate_series(0,9,1) c4; +SELECT compress_chunk(show_chunks('sample_table')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_19_37_chunk +(1 row) + +-- get FIRST chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE '_hyper_%' +ORDER BY ch1.id LIMIT 1 \gset +-- get FIRST compressed chunk +SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%' +ORDER BY ch1.id LIMIT 1 \gset +-- check that you uncompress and delete only for exact SEGMENTBY value +SET timescaledb.debug_compression_path_info TO true; +BEGIN; +-- report 10 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 = 5; + count +------- + 10 +(1 row) + +-- report 10k rows +SELECT COUNT(*) FROM sample_table WHERE c4 = 5; + count +------- + 10000 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 = 5 \gset +-- delete 10k rows +DELETE FROM sample_table WHERE c4 = 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 10. Number of compressed rows filtered by heap filters: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 = 5; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunk +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 = 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for less than SEGMENTBY value +BEGIN; +-- report 50 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 < 5; + count +------- + 50 +(1 row) + +-- report 50k rows +SELECT COUNT(*) FROM sample_table WHERE c4 < 5; + count +------- + 50000 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 < 5 \gset +-- delete 50k rows +DELETE FROM sample_table WHERE c4 < 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 50. Number of compressed rows filtered by heap filters: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 < 5; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunk +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 < 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for greater and equal than SEGMENTBY value +BEGIN; +-- report 50 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 >= 5; + count +------- + 50 +(1 row) + +-- report 50k rows +SELECT COUNT(*) FROM sample_table WHERE c4 >= 5; + count +------- + 50000 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 >= 5 \gset +-- delete 50k rows +DELETE FROM sample_table WHERE c4 >= 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 50. Number of compressed rows filtered by heap filters: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 >= 5; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunk +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 >= 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for exact ORDERBY value +-- this will uncompress segments which have min <= value and max >= value +BEGIN; +-- report 10k rows +SELECT COUNT(*) FROM sample_table WHERE c2 = 3; + count +------- + 10000 +(1 row) + +-- report 100 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_2 <= 3 and _ts_meta_max_2 >= 3; + count +------- + 100 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c2 = 3 \gset +-- delete 10k rows +DELETE FROM sample_table WHERE c2 = 3; +INFO: Number of compressed rows fetched from table scan: 100. Number of compressed rows filtered: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c2 = 3; + count +------- + 0 +(1 row) + +-- report 90k rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 90000 +(1 row) + +-- report 0 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_2 <= 3 and _ts_meta_max_2 >= 3; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for less then ORDERBY value +-- this will uncompress segments which have min < value +BEGIN; +-- report 20k rows +SELECT COUNT(*) FROM sample_table WHERE c1 < 2; + count +------- + 20000 +(1 row) + +-- report 20 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_max_1 < 2; + count +------- + 20 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c1 < 2 \gset +-- delete 20k rows +DELETE FROM sample_table WHERE c1 < 2; +INFO: Number of compressed rows fetched from table scan: 20. Number of compressed rows filtered: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c1 < 2; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunk +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_max_1 < 2; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only for greater or equal then ORDERBY value +-- this will uncompress segments which have max >= value +BEGIN; +-- report 30k rows +SELECT COUNT(*) FROM sample_table WHERE c1 >= 7; + count +------- + 30000 +(1 row) + +-- report 30 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_1 >= 7; + count +------- + 30 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c1 >= 7 \gset +-- delete 30k rows +DELETE FROM sample_table WHERE c1 >= 7; +INFO: Number of compressed rows fetched from table scan: 30. Number of compressed rows filtered: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c1 >= 7; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_1 >= 7; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only tuples which satisfy SEGMENTBY +-- and ORDERBY qualifiers, segments only contain one distinct value for +-- these qualifiers, everything should be deleted that was decompressed +BEGIN; +-- report 1k rows +SELECT COUNT(*) FROM sample_table WHERE c4 = 5 and c1 = 5; + count +------- + 1000 +(1 row) + +-- report 1 row in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 5 AND _ts_meta_min_1 <= 5 and _ts_meta_max_1 >= 5; + count +------- + 1 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 = 5 and c1 = 5 \gset +-- delete 1k rows +DELETE FROM sample_table WHERE c4 = 5 and c1 = 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 10. Number of compressed rows filtered by heap filters: 9. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 = 5 and c1 = 5; + count +------- + 0 +(1 row) + +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 0 rows in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 5 AND _ts_meta_min_1 <= 5 and _ts_meta_max_1 >= 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only tuples which satisfy SEGMENTBY +-- and ORDERBY qualifiers, segments contain more than one distinct value for +-- these qualifiers, not everything should be deleted that was decompressed +BEGIN; +-- report 4k rows +SELECT COUNT(*) FROM sample_table WHERE c4 > 5 and c2 = 5; + count +------- + 4000 +(1 row) + +-- report 40 rows in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 > 5 AND _ts_meta_min_2 <= 5 and _ts_meta_max_2 >= 5; + count +------- + 40 +(1 row) + +-- fetch total and number of affected rows +SELECT COUNT(*) AS "total_rows" FROM sample_table \gset +SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 > 5 and c2 = 5 \gset +-- delete 4k rows +DELETE FROM sample_table WHERE c4 > 5 and c2 = 5; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 40. Number of compressed rows filtered by heap filters: 0. +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 > 5 and c2 = 5; + count +------- + 0 +(1 row) + +-- report 36k rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 36000 +(1 row) + +-- report 0 rows in compressed chunks +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 > 5 AND _ts_meta_min_2 <= 5 and _ts_meta_max_2 >= 5; + count +------- + 0 +(1 row) + +-- validate correct number of rows was deleted +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table; + ?column? +---------- + t +(1 row) + +ROLLBACK; +-- check that you uncompress and delete only tuples which satisfy SEGMENTBY +-- and ORDERBY qualifiers. +-- no: of rows satisfying SEGMENTBY qualifiers is 10 +-- no: of rows satisfying ORDERBY qualifiers is 3 +-- Once both qualifiers are applied ensure that only 7 rows are present in +-- compressed chunk +BEGIN; +-- report 0 rows in uncompressed chunk +SELECT COUNT(*) FROM ONLY :CHUNK_1; + count +------- + 0 +(1 row) + +-- report 10 compressed rows for given condition c4 = 4 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 4; + count +------- + 10 +(1 row) + +-- report 3 compressed rows for given condition c4 = 4 and c1 >= 7 +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 4 AND _ts_meta_max_1 >= 7; + count +------- + 3 +(1 row) + +SELECT COUNT(*) AS "total_rows" FROM :COMPRESS_CHUNK_1 WHERE c4 = 4 \gset +SELECT COUNT(*) AS "total_affected_rows" FROM :COMPRESS_CHUNK_1 WHERE c4 = 4 AND _ts_meta_max_1 >= 7 \gset +UPDATE sample_table SET c3 = c3 + 0 WHERE c4 = 4 AND c1 >= 7; +INFO: Index "compress_hyper_20_38_chunk_c4__ts_meta_sequence_num_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 10. Number of compressed rows filtered by heap filters: 7. +-- report 7 rows +SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 4; + count +------- + 7 +(1 row) + +-- ensure correct number of rows are moved from compressed chunk +-- report true +SELECT COUNT(*) = :total_rows - :total_affected_rows FROM :COMPRESS_CHUNK_1 WHERE c4 = 4; + ?column? +---------- + t +(1 row) + +ROLLBACK; +RESET timescaledb.debug_compression_path_info; +--github issue: 5640 +CREATE TABLE tab1(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); +CREATE INDEX ON tab1(time); +CREATE INDEX ON tab1(device_id,time); +SELECT create_hypertable('tab1','time',create_default_indexes:=false); + create_hypertable +-------------------- + (21,public,tab1,t) +(1 row) + +ALTER TABLE tab1 DROP COLUMN filler_1; +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','57m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ALTER TABLE tab1 DROP COLUMN filler_2; +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id-1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','58m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ALTER TABLE tab1 DROP COLUMN filler_3; +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','59m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ANALYZE tab1; +-- compress chunks +ALTER TABLE tab1 SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id'); +SELECT compress_chunk(show_chunks('tab1')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_21_39_chunk + _timescaledb_internal._hyper_21_40_chunk + _timescaledb_internal._hyper_21_41_chunk +(3 rows) + +-- ensure there is an index scan generated for below DELETE query +BEGIN; +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 472 +(1 row) + +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 1000, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id); +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 4070 +(1 row) + +ANALYZE tab1; +EXPLAIN (costs off) DELETE FROM public.tab1 WHERE public.tab1.device_id = 1; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) + -> Delete on tab1 + Delete on _hyper_21_39_chunk tab1_1 + Delete on _hyper_21_40_chunk tab1_2 + Delete on _hyper_21_41_chunk tab1_3 + -> Append + -> Index Scan using _hyper_21_39_chunk_tab1_device_id_time_idx on _hyper_21_39_chunk tab1_1 + Index Cond: (device_id = 1) + -> Seq Scan on _hyper_21_40_chunk tab1_2 + Filter: (device_id = 1) + -> Seq Scan on _hyper_21_41_chunk tab1_3 + Filter: (device_id = 1) +(12 rows) + +DELETE FROM tab1 WHERE tab1.device_id = 1; +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 0 +(1 row) + +ROLLBACK; +-- github issue 5658 +-- verify that bitmap heap scans work on all the correct data and +-- none of it left over after the dml command +BEGIN; +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 472 +(1 row) + +INSERT INTO tab1(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 1000, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id); +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 4070 +(1 row) + +ANALYZE tab1; +SET enable_seqscan = off; +SET enable_indexscan = off; +EXPLAIN (costs off) DELETE FROM tab1 WHERE tab1.device_id = 1; + QUERY PLAN +----------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on tab1 + Delete on _hyper_21_39_chunk tab1_1 + Delete on _hyper_21_40_chunk tab1_2 + Delete on _hyper_21_41_chunk tab1_3 + -> Append + -> Bitmap Heap Scan on _hyper_21_39_chunk tab1_1 + Recheck Cond: (device_id = 1) + -> Bitmap Index Scan on _hyper_21_39_chunk_tab1_device_id_time_idx + Index Cond: (device_id = 1) + -> Seq Scan on _hyper_21_40_chunk tab1_2 + Filter: (device_id = 1) + -> Seq Scan on _hyper_21_41_chunk tab1_3 + Filter: (device_id = 1) +(14 rows) + +DELETE FROM tab1 WHERE tab1.device_id = 1; +SELECT count(*) FROM tab1 WHERE device_id = 1; + count +------- + 0 +(1 row) + +ROLLBACK; +-- create hypertable with space partitioning and compression +CREATE TABLE tab2(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); +CREATE INDEX ON tab2(time); +CREATE INDEX ON tab2(device_id,time); +SELECT create_hypertable('tab2','time','device_id',3,create_default_indexes:=false); + create_hypertable +-------------------- + (23,public,tab2,t) +(1 row) + +ALTER TABLE tab2 DROP COLUMN filler_1; +INSERT INTO tab2(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','35m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ALTER TABLE tab2 DROP COLUMN filler_2; +INSERT INTO tab2(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','45m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ALTER TABLE tab2 DROP COLUMN filler_3; +INSERT INTO tab2(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','55m') gtime(time), generate_series(1,1,1) gdevice(device_id); +ANALYZE tab2; +-- compress chunks +ALTER TABLE tab2 SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id'); +SELECT compress_chunk(show_chunks('tab2')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_23_45_chunk + _timescaledb_internal._hyper_23_46_chunk + _timescaledb_internal._hyper_23_47_chunk +(3 rows) + +-- below test will cause chunks of tab2 to get decompressed +-- without fix for issue #5460 +SET timescaledb.enable_optimizations = OFF; +BEGIN; +DELETE FROM tab1 t1 USING tab2 t2 WHERE t1.device_id = t2.device_id AND t2.time > '2000-01-01'; +ROLLBACK; +--cleanup +RESET timescaledb.enable_optimizations; +DROP table tab1; +DROP table tab2; +-- test joins with UPDATE/DELETE on compression chunks +CREATE TABLE join_test1(time timestamptz NOT NULL,device text, value float); +CREATE TABLE join_test2(time timestamptz NOT NULL,device text, value float); +CREATE VIEW chunk_status AS SELECT ht.table_name AS hypertable, ch.table_name AS chunk,ch.status from _timescaledb_catalog.chunk ch INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id=ch.hypertable_id AND ht.table_name IN ('join_test1','join_test2') ORDER BY ht.id, ch.id; +SELECT table_name FROM create_hypertable('join_test1', 'time'); + table_name +------------ + join_test1 +(1 row) + +SELECT table_name FROM create_hypertable('join_test2', 'time'); + table_name +------------ + join_test2 +(1 row) + +ALTER TABLE join_test1 SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "join_test1" is set to ""time" DESC" +ALTER TABLE join_test2 SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "join_test2" is set to ""time" DESC" +INSERT INTO join_test1 VALUES ('2000-01-01','d1',0.1), ('2000-02-01','d1',0.1), ('2000-03-01','d1',0.1); +INSERT INTO join_test2 VALUES ('2000-02-01','d1',0.1), ('2000-02-01','d2',0.1), ('2000-02-01','d3',0.1); +SELECT compress_chunk(show_chunks('join_test1')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_25_51_chunk + _timescaledb_internal._hyper_25_52_chunk + _timescaledb_internal._hyper_25_53_chunk +(3 rows) + +SELECT compress_chunk(show_chunks('join_test2')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_26_54_chunk +(1 row) + +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 1 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +BEGIN; +DELETE FROM join_test1 USING join_test2; +-- only join_test1 chunks should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 9 + join_test1 | _hyper_25_53_chunk | 9 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +DELETE FROM join_test2 USING join_test1; +-- only join_test2 chunks should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 1 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 9 +(4 rows) + +ROLLBACK; +BEGIN; +DELETE FROM join_test1 t1 USING join_test1 t2 WHERE t1.time = '2000-01-01'; +-- only first chunk of join_test1 should have status change +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +DELETE FROM join_test1 t1 USING join_test1 t2 WHERE t2.time = '2000-01-01'; +-- all chunks of join_test1 should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 9 + join_test1 | _hyper_25_53_chunk | 9 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +UPDATE join_test1 t1 SET value = t1.value + 1 FROM join_test2 t2; +-- only join_test1 chunks should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 9 + join_test1 | _hyper_25_53_chunk | 9 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +UPDATE join_test2 t1 SET value = t1.value + 1 FROM join_test1 t2; +-- only join_test2 chunks should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 1 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 9 +(4 rows) + +ROLLBACK; +BEGIN; +UPDATE join_test1 t1 SET value = t1.value + 1 FROM join_test1 t2 WHERE t1.time = '2000-01-01'; +-- only first chunk of join_test1 should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 1 + join_test1 | _hyper_25_53_chunk | 1 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +BEGIN; +UPDATE join_test1 t1 SET value = t1.value + 1 FROM join_test1 t2 WHERE t2.time = '2000-01-01'; +-- all chunks of join_test1 should have status 9 +SELECT * FROM chunk_status; + hypertable | chunk | status +------------+--------------------+-------- + join_test1 | _hyper_25_51_chunk | 9 + join_test1 | _hyper_25_52_chunk | 9 + join_test1 | _hyper_25_53_chunk | 9 + join_test2 | _hyper_26_54_chunk | 1 +(4 rows) + +ROLLBACK; +DROP TABLE join_test1; +DROP TABLE join_test2; +-- test if index scan qualifiers are properly used +CREATE TABLE index_scan_test(time timestamptz NOT NULL, device_id int, value float); +SELECT create_hypertable('index_scan_test','time',create_default_indexes:=false); + create_hypertable +------------------------------- + (29,public,index_scan_test,t) +(1 row) + +INSERT INTO index_scan_test(time,device_id,value) SELECT time, device_id, device_id + 0.5 FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-01 23:55:00+0','1m') gtime(time), generate_series(1,5,1) gdevice(device_id); +-- compress chunks +ALTER TABLE index_scan_test SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id'); +SELECT compress_chunk(show_chunks('index_scan_test')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_29_59_chunk +(1 row) + +ANALYZE index_scan_test; +SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht +WHERE ht.table_name = 'index_scan_test' +AND ch1.hypertable_id = ht.id +AND ch1.table_name LIKE '_hyper%' +ORDER BY ch1.id LIMIT 1 \gset +SELECT ch2.schema_name|| '.' || ch2.table_name AS "COMP_CHUNK_1" +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.chunk ch2, _timescaledb_catalog.hypertable ht +WHERE ht.table_name = 'index_scan_test' +AND ch1.hypertable_id = ht.id +AND ch1.compressed_chunk_id = ch2.id +ORDER BY ch2.id LIMIT 1 \gset +INSERT INTO index_scan_test(time,device_id,value) SELECT time, device_id, device_id + 0.5 FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','1m') gtime(time), generate_series(1,5,1) gdevice(device_id); +-- test index on single column +BEGIN; +SELECT count(*) as "UNCOMP_LEFTOVER" FROM ONLY :CHUNK_1 WHERE device_id != 2 \gset +CREATE INDEX ON index_scan_test(device_id); +EXPLAIN (costs off, verbose) DELETE FROM index_scan_test WHERE device_id = 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on public.index_scan_test + Delete on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + -> Index Scan using _hyper_29_59_chunk_index_scan_test_device_id_idx on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + Output: index_scan_test_1.tableoid, index_scan_test_1.ctid + Index Cond: (index_scan_test_1.device_id = 2) +(6 rows) + +DELETE FROM index_scan_test WHERE device_id = 2; +-- everything should be deleted +SELECT count(*) FROM index_scan_test where device_id = 2; + count +------- + 0 +(1 row) + +-- there shouldn't be anything in the uncompressed chunk where device_id = 2 +SELECT count(*) = :UNCOMP_LEFTOVER FROM ONLY :CHUNK_1; + ?column? +---------- + t +(1 row) + +-- there shouldn't be anything in the compressed chunk from device_id = 2 +SELECT count(*) FROM :COMP_CHUNK_1 where device_id = 2; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test multi column index +BEGIN; +SELECT count(*) as "UNCOMP_LEFTOVER" FROM ONLY :CHUNK_1 WHERE device_id != 2 OR time <= '2000-01-02'::timestamptz \gset +CREATE INDEX ON index_scan_test(device_id, time); +EXPLAIN (costs off, verbose) DELETE FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on public.index_scan_test + Delete on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + -> Index Scan using _hyper_29_59_chunk_index_scan_test_device_id_time_idx on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + Output: index_scan_test_1.tableoid, index_scan_test_1.ctid + Index Cond: ((index_scan_test_1.device_id = 2) AND (index_scan_test_1."time" > 'Sun Jan 02 00:00:00 2000 PST'::timestamp with time zone)) +(6 rows) + +DELETE FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; +-- everything should be deleted +SELECT count(*) FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; + count +------- + 0 +(1 row) + +-- there shouldn't be anything in the uncompressed chunk that matches predicates +SELECT count(*) = :UNCOMP_LEFTOVER FROM ONLY :CHUNK_1; + ?column? +---------- + t +(1 row) + +-- there shouldn't be anything in the compressed chunk that matches predicates +SELECT count(*) FROM :COMP_CHUNK_1 WHERE device_id = 2 AND _ts_meta_max_1 >= '2000-01-02'::timestamptz; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test index with filter condition +BEGIN; +SELECT count(*) as "UNCOMP_LEFTOVER" FROM ONLY :CHUNK_1 WHERE device_id != 2 OR time <= '2000-01-02'::timestamptz \gset +CREATE INDEX ON index_scan_test(device_id); +EXPLAIN (costs off, verbose) DELETE FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on public.index_scan_test + Delete on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + -> Index Scan using _hyper_29_59_chunk_index_scan_test_device_id_idx on _timescaledb_internal._hyper_29_59_chunk index_scan_test_1 + Output: index_scan_test_1.tableoid, index_scan_test_1.ctid + Index Cond: (index_scan_test_1.device_id = 2) + Filter: (index_scan_test_1."time" > 'Sun Jan 02 00:00:00 2000 PST'::timestamp with time zone) +(7 rows) + +DELETE FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; +-- everything should be deleted +SELECT count(*) FROM index_scan_test WHERE device_id = 2 AND time > '2000-01-02'::timestamptz; + count +------- + 0 +(1 row) + +-- there shouldn't be anything in the uncompressed chunk that matches predicates +SELECT count(*) = :UNCOMP_LEFTOVER FROM ONLY :CHUNK_1; + ?column? +---------- + t +(1 row) + +-- there shouldn't be anything in the compressed chunk that matches predicates +SELECT count(*) FROM :COMP_CHUNK_1 WHERE device_id = 2 AND _ts_meta_max_1 >= '2000-01-02'::timestamptz; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test for disabling DML decompression +SHOW timescaledb.enable_dml_decompression; + timescaledb.enable_dml_decompression +-------------------------------------- + on +(1 row) + +SET timescaledb.enable_dml_decompression = false; +\set ON_ERROR_STOP 0 +-- should ERROR both UPDATE/DELETE statements because the DML decompression is disabled +UPDATE sample_table SET c3 = NULL WHERE c4 = 5; +ERROR: UPDATE/DELETE is disabled on compressed chunks +DELETE FROM sample_table WHERE c4 = 5; +ERROR: UPDATE/DELETE is disabled on compressed chunks +\set ON_ERROR_STOP 1 +-- make sure reseting the GUC we will be able to UPDATE/DELETE compressed chunks +RESET timescaledb.enable_dml_decompression; +SHOW timescaledb.enable_dml_decompression; + timescaledb.enable_dml_decompression +-------------------------------------- + on +(1 row) + +BEGIN; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 = 5 AND c3 IS NULL; + count +------- + 0 +(1 row) + +UPDATE sample_table SET c3 = NULL WHERE c4 = 5; +-- report 10k rows +SELECT count(*) FROM sample_table WHERE c4 = 5 AND c3 IS NULL; + count +------- + 10000 +(1 row) + +ROLLBACK; +BEGIN; +-- report 10k rows +SELECT count(*) FROM sample_table WHERE c4 = 5; + count +------- + 10000 +(1 row) + +DELETE FROM sample_table WHERE c4 = 5; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE c4 = 5; + count +------- + 0 +(1 row) + +ROLLBACK; +-- create new uncompressed chunk +INSERT INTO sample_table +SELECT t, 1, 1, 1, 1 +FROM generate_series('2023-05-04 00:00:00-00'::timestamptz, + '2023-05-04 00:00:00-00'::timestamptz + INTERVAL '2 hours', + INTERVAL '1 hour') t; +-- check chunk compression status +SELECT chunk_name, is_compressed +FROM timescaledb_information.chunks +WHERE hypertable_name = 'sample_table' +ORDER BY chunk_name; + chunk_name | is_compressed +--------------------+--------------- + _hyper_19_37_chunk | t + _hyper_19_61_chunk | f +(2 rows) + +-- test for uncompressed and compressed chunks +SHOW timescaledb.enable_dml_decompression; + timescaledb.enable_dml_decompression +-------------------------------------- + on +(1 row) + +SET timescaledb.enable_dml_decompression = false; +BEGIN; +-- report 3 rows +SELECT count(*) FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz; + count +------- + 3 +(1 row) + +-- delete from uncompressed chunk should work +DELETE FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz; + count +------- + 0 +(1 row) + +ROLLBACK; +BEGIN; +-- report 0 rows +SELECT count(*) FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz AND c3 IS NULL; + count +------- + 0 +(1 row) + +UPDATE sample_table SET c3 = NULL WHERE time >= '2023-05-04 00:00:00-00'::timestamptz; +-- report 3 rows +SELECT count(*) FROM sample_table WHERE time >= '2023-05-04 00:00:00-00'::timestamptz AND c3 IS NULL; + count +------- + 3 +(1 row) + +ROLLBACK; +\set ON_ERROR_STOP 0 +-- should ERROR both UPDATE/DELETE statements because the DML decompression is disabled +-- and both statements we're touching compressed and uncompressed chunks +UPDATE sample_table SET c3 = NULL WHERE time >= '2023-03-17 00:00:00-00'::timestamptz AND c3 IS NULL; +ERROR: UPDATE/DELETE is disabled on compressed chunks +DELETE FROM sample_table WHERE time >= '2023-03-17 00:00:00-00'::timestamptz; +ERROR: UPDATE/DELETE is disabled on compressed chunks +\set ON_ERROR_STOP 1 +--github issue: 5586 +--testcase with multiple indexes +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE DATABASE test5586; +\c test5586 :ROLE_SUPERUSER +SET client_min_messages = ERROR; +CREATE EXTENSION timescaledb CASCADE; +CREATE TABLE tab1(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float); +SELECT create_hypertable('tab1','time',create_default_indexes:=false); + create_hypertable +------------------- + (1,public,tab1,t) +(1 row) + +INSERT INTO tab1(filler_1, filler_2, filler_3,time,device_id,v0,v1,v2,v3) SELECT device_id, device_id+1, device_id + 2, time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id); +ALTER TABLE tab1 SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id, filler_1, filler_2, filler_3'); +SELECT compress_chunk(show_chunks('tab1')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +SELECT format('%I.%I', schema_name, table_name) AS "CHUNK" FROM _timescaledb_catalog.chunk WHERE hypertable_id = 2 \gset +-- create multiple indexes on compressed hypertable +DROP INDEX _timescaledb_internal.compress_hyper_2_2_chunk_device_id_filler_1_filler_2_filler_idx; +CREATE INDEX ON :CHUNK (_ts_meta_min_1); +CREATE INDEX ON :CHUNK (_ts_meta_min_1, _ts_meta_sequence_num); +CREATE INDEX ON :CHUNK (_ts_meta_min_1, _ts_meta_max_1, filler_1); +CREATE INDEX filler_1 ON :CHUNK (filler_1); +CREATE INDEX filler_2 ON :CHUNK (filler_2); +CREATE INDEX filler_3 ON :CHUNK (filler_3); +-- below indexes should be selected +CREATE INDEX filler_1_filler_2 ON :CHUNK (filler_1, filler_2); +CREATE INDEX filler_2_filler_3 ON :CHUNK (filler_2, filler_3); +set timescaledb.debug_compression_path_info to on; +BEGIN; +SELECT COUNT(*) FROM tab1 WHERE filler_3 = 5 AND filler_2 = 4; + count +------- + 3598 +(1 row) + +UPDATE tab1 SET v0 = v1 + v2 WHERE filler_3 = 5 AND filler_2 = 4; +INFO: Index "filler_2_filler_3" is used for scan. +INFO: Number of compressed rows fetched from index: 4. Number of compressed rows filtered by heap filters: 0. +ROLLBACK; +BEGIN; +SELECT COUNT(*) FROM tab1 WHERE filler_1 < 5 AND filler_2 = 4; + count +------- + 3598 +(1 row) + +UPDATE tab1 SET v0 = v1 + v2 WHERE filler_1 < 5 AND filler_2 = 4; +INFO: Index "filler_1_filler_2" is used for scan. +INFO: Number of compressed rows fetched from index: 4. Number of compressed rows filtered by heap filters: 0. +ROLLBACK; +-- idealy filler_1 index should be selected, +-- instead first matching index is selected +BEGIN; +SELECT COUNT(*) FROM tab1 WHERE filler_1 < 5; + count +------- + 14392 +(1 row) + +UPDATE tab1 SET v0 = v1 + v2 WHERE filler_1 < 5; +INFO: Index "compress_hyper_2_2_chunk__ts_meta_min_1__ts_meta_max_1_fill_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 16. Number of compressed rows filtered by heap filters: 0. +ROLLBACK; +RESET timescaledb.debug_compression_path_info; +DROP TABLE tab1; +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP DATABASE test5586; +--issue: #6024 +CREATE TABLE t(a integer, b integer); +SELECT create_hypertable('t', 'a', chunk_time_interval=> 10); +NOTICE: adding not-null constraint to column "a" + create_hypertable +------------------- + (31,public,t,t) +(1 row) + +INSERT INTO t values(1, 2); +ALTER TABLE t SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "t" is set to "" +NOTICE: default order by for hypertable "t" is set to "a DESC" +SELECT compress_chunk(show_chunks('t')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_31_62_chunk +(1 row) + +-- should not crash +UPDATE t SET b = 2 WHERE tableoid = 0; +UPDATE t SET b = 2 WHERE tableoid is null; +DROP TABLE t; +-- github issue: 6367 +\c :TEST_DBNAME :ROLE_SUPERUSER +CREATE DATABASE test6367; +\c test6367 :ROLE_SUPERUSER +SET client_min_messages = ERROR; +CREATE EXTENSION timescaledb CASCADE; +CREATE TABLE t6367 ( + time timestamptz NOT NULL, + source_id varchar(64) NOT NULL, + label varchar, + data jsonb +); +SELECT table_name FROM create_hypertable('t6367', 'time'); + table_name +------------ + t6367 +(1 row) + +ALTER TABLE t6367 SET(timescaledb.compress, timescaledb.compress_segmentby = 'source_id, label', timescaledb.compress_orderby = 'time'); +INSERT INTO t6367 +SELECT time, source_id, label, '{}' AS data +FROM +generate_series('1990-01-01'::timestamptz, '1990-01-10'::timestamptz, INTERVAL '1 day') AS g1(time), +generate_series(1, 3, 1 ) AS g2(source_id), +generate_series(1, 3, 1 ) AS g3(label); +SELECT compress_chunk(c) FROM show_chunks('t6367') c; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk +(2 rows) + +SELECT format('%I.%I', schema_name, table_name) AS "CHUNK1" FROM _timescaledb_catalog.chunk WHERE hypertable_id = 2 ORDER BY id LIMIT 1 \gset +SELECT format('%I.%I', schema_name, table_name) AS "CHUNK2" FROM _timescaledb_catalog.chunk WHERE hypertable_id = 2 ORDER BY id LIMIT 1 OFFSET 1 \gset +DROP INDEX _timescaledb_internal.compress_hyper_2_3_chunk_source_id_label__ts_meta_sequence__idx; +DROP INDEX _timescaledb_internal.compress_hyper_2_4_chunk_source_id_label__ts_meta_sequence__idx; +-- testcase with no index, should use seq scan +set timescaledb.debug_compression_path_info to on; +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test case with an index which has only one +-- of the segmentby filters +CREATE INDEX source_id_idx1 ON :CHUNK1 (source_id); +CREATE INDEX source_id_idx2 ON :CHUNK2 (source_id); +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Index "source_id_idx1" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 2. +INFO: Index "source_id_idx2" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 2. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +-- test that we are filtering NULL checks +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label IS NULL; + count +------- + 0 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label IS NULL; +INFO: Index "source_id_idx1" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 3. +INFO: Index "source_id_idx2" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 3. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label IS NULL; + count +------- + 0 +(1 row) + +ROLLBACK; +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label IS NOT NULL; + count +------- + 30 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label IS NOT NULL; +INFO: Index "source_id_idx1" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 0. +INFO: Index "source_id_idx2" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label IS NOT NULL; + count +------- + 0 +(1 row) + +ROLLBACK; +DROP INDEX _timescaledb_internal.source_id_idx1; +DROP INDEX _timescaledb_internal.source_id_idx2; +-- test case with an index which has multiple same column +CREATE INDEX source_id_source_id_idx ON :CHUNK1 (source_id, source_id); +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Index "source_id_source_id_idx" is used for scan. +INFO: Number of compressed rows fetched from index: 3. Number of compressed rows filtered by heap filters: 2. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +DROP INDEX _timescaledb_internal.source_id_source_id_idx; +-- test using a non-btree index +-- fallback to heap scan +CREATE INDEX brin_source_id_idx ON :CHUNK1 USING brin (source_id); +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +DROP INDEX _timescaledb_internal.brin_source_id_idx; +-- test using an expression index +-- should fallback to heap scans +CREATE INDEX expr_source_id_idx ON :CHUNK1 (upper(source_id)); +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +DROP INDEX _timescaledb_internal.expr_source_id_idx; +-- test using a partial index +-- should fallback to heap scans +CREATE INDEX partial_source_id_idx ON :CHUNK1 (source_id) +WHERE _ts_meta_min_1 > '1990-01-01'::timestamptz; +BEGIN; +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 10 +(1 row) + +UPDATE t6367 SET source_id = '0' WHERE source_id = '2' AND label = '1'; +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +INFO: Number of compressed rows fetched from table scan: 1. Number of compressed rows filtered: 0. +SELECT count(*) FROM t6367 WHERE source_id = '2' AND label = '1'; + count +------- + 0 +(1 row) + +ROLLBACK; +RESET timescaledb.debug_compression_path_info; +DROP TABLE t6367; +\c :TEST_DBNAME :ROLE_SUPERUSER +DROP DATABASE test6367; +-- Text limitting decompressed tuple during an UPDATE or DELETE +CREATE TABLE test_limit ( + timestamp int not null, + id bigint +); +SELECT * FROM create_hypertable('test_limit', 'timestamp', chunk_time_interval=>10000); + hypertable_id | schema_name | table_name | created +---------------+-------------+------------+--------- + 33 | public | test_limit | t +(1 row) + +INSERT INTO test_limit SELECT t, i FROM generate_series(1,10000,1) t CROSS JOIN generate_series(1,3,1) i; +ALTER TABLE test_limit SET ( + timescaledb.compress, + timescaledb.compress_orderby = 'timestamp' +); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_limit" is set to "" +SELECT count(compress_chunk(ch)) FROM show_chunks('test_limit') ch; + count +------- + 2 +(1 row) + +SET timescaledb.max_tuples_decompressed_per_dml_transaction = 5000; +\set VERBOSITY default +\set ON_ERROR_STOP 0 +-- Updating or deleting everything will break the set limit. +UPDATE test_limit SET id = 0; +ERROR: tuple decompression limit exceeded by operation +DETAIL: current limit: 5000, tuples decompressed: 30000 +HINT: Consider increasing timescaledb.max_tuples_decompressed_per_dml_transaction or set to 0 (unlimited). +DELETE FROM test_limit WHERE id > 0; +ERROR: tuple decompression limit exceeded by operation +DETAIL: current limit: 5000, tuples decompressed: 30000 +HINT: Consider increasing timescaledb.max_tuples_decompressed_per_dml_transaction or set to 0 (unlimited). +-- Setting to 0 should remove the limit. +SET timescaledb.max_tuples_decompressed_per_dml_transaction = 0; +UPDATE test_limit SET id = 0; +DELETE FROM test_limit WHERE id > 0; +\set ON_ERROR_STOP 1 +DROP TABLE test_limit; +-- check partial compression with DML +CREATE TABLE test_partials (time timestamptz NOT NULL, a int, b int); +SELECT create_hypertable('test_partials', 'time'); + create_hypertable +----------------------------- + (35,public,test_partials,t) +(1 row) + +INSERT INTO test_partials +VALUES -- chunk1 + ('2020-01-01 00:00'::timestamptz, 1, 2), + ('2020-01-01 00:01'::timestamptz, 2, 2), + ('2020-01-01 00:04'::timestamptz, 1, 2), + -- chunk2 + ('2021-01-01 00:00'::timestamptz, 1, 2), + ('2021-01-01 00:04'::timestamptz, 1, 2), + -- chunk3 + ('2022-01-01 00:00'::timestamptz, 1, 2), + ('2022-01-01 00:04'::timestamptz, 1, 2); +-- enable compression, compress all chunks +ALTER TABLE test_partials SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "test_partials" is set to "" +NOTICE: default order by for hypertable "test_partials" is set to ""time" DESC" +SELECT compress_chunk(show_chunks('test_partials')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_68_chunk + _timescaledb_internal._hyper_35_69_chunk + _timescaledb_internal._hyper_35_70_chunk +(3 rows) + +-- fully compressed +EXPLAIN (costs off) SELECT * FROM test_partials ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_partials + Order: test_partials."time" + -> Custom Scan (DecompressChunk) on _hyper_35_68_chunk + -> Sort + Sort Key: compress_hyper_36_71_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_71_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_69_chunk + -> Sort + Sort Key: compress_hyper_36_72_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_72_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_70_chunk + -> Sort + Sort Key: compress_hyper_36_73_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_73_chunk +(14 rows) + +-- verify correct results +SELECT * FROM test_partials ORDER BY time; + time | a | b +------------------------------+---+--- + Wed Jan 01 00:00:00 2020 PST | 1 | 2 + Wed Jan 01 00:01:00 2020 PST | 2 | 2 + Wed Jan 01 00:04:00 2020 PST | 1 | 2 + Fri Jan 01 00:00:00 2021 PST | 1 | 2 + Fri Jan 01 00:04:00 2021 PST | 1 | 2 + Sat Jan 01 00:00:00 2022 PST | 1 | 2 + Sat Jan 01 00:04:00 2022 PST | 1 | 2 +(7 rows) + +-- check that DML causes transparent decompression and that +-- data gets shifted to the uncompressed parts +EXPLAIN (costs off) DELETE FROM test_partials WHERE time >= ALL(SELECT time from test_partials); + QUERY PLAN +------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) + -> Delete on test_partials + Delete on _hyper_35_68_chunk test_partials_1 + Delete on _hyper_35_69_chunk test_partials_2 + Delete on _hyper_35_70_chunk test_partials_3 + -> Custom Scan (ChunkAppend) on test_partials + -> Seq Scan on _hyper_35_68_chunk test_partials_1 + Filter: (ALL ("time" >= (SubPlan 1).col1)) + SubPlan 1 + -> Materialize + -> Append + -> Custom Scan (DecompressChunk) on _hyper_35_68_chunk + -> Seq Scan on compress_hyper_36_71_chunk + -> Seq Scan on _hyper_35_68_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_69_chunk + -> Seq Scan on compress_hyper_36_72_chunk + -> Seq Scan on _hyper_35_69_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_70_chunk + -> Seq Scan on compress_hyper_36_73_chunk + -> Seq Scan on _hyper_35_70_chunk + -> Seq Scan on _hyper_35_69_chunk test_partials_2 + Filter: (ALL ("time" >= (SubPlan 1).col1)) + -> Seq Scan on _hyper_35_70_chunk test_partials_3 + Filter: (ALL ("time" >= (SubPlan 1).col1)) +(24 rows) + +DELETE FROM test_partials WHERE time >= ALL(SELECT time from test_partials); +-- All 3 chunks will now become partially compressed chunks +EXPLAIN (costs off) SELECT * FROM test_partials ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_partials + Order: test_partials."time" + -> Merge Append + Sort Key: _hyper_35_68_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_35_68_chunk + -> Sort + Sort Key: compress_hyper_36_71_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_71_chunk + -> Sort + Sort Key: _hyper_35_68_chunk."time" + -> Seq Scan on _hyper_35_68_chunk + -> Merge Append + Sort Key: _hyper_35_69_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_35_69_chunk + -> Sort + Sort Key: compress_hyper_36_72_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_72_chunk + -> Sort + Sort Key: _hyper_35_69_chunk."time" + -> Seq Scan on _hyper_35_69_chunk + -> Merge Append + Sort Key: _hyper_35_70_chunk."time" + -> Custom Scan (DecompressChunk) on _hyper_35_70_chunk + -> Sort + Sort Key: compress_hyper_36_73_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_73_chunk + -> Sort + Sort Key: _hyper_35_70_chunk."time" + -> Seq Scan on _hyper_35_70_chunk +(29 rows) + +-- verify correct results +SELECT * FROM test_partials ORDER BY time; + time | a | b +------------------------------+---+--- + Wed Jan 01 00:00:00 2020 PST | 1 | 2 + Wed Jan 01 00:01:00 2020 PST | 2 | 2 + Wed Jan 01 00:04:00 2020 PST | 1 | 2 + Fri Jan 01 00:00:00 2021 PST | 1 | 2 + Fri Jan 01 00:04:00 2021 PST | 1 | 2 + Sat Jan 01 00:00:00 2022 PST | 1 | 2 +(6 rows) + +SELECT compress_chunk(show_chunks('test_partials')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_35_68_chunk + _timescaledb_internal._hyper_35_69_chunk + _timescaledb_internal._hyper_35_70_chunk +(3 rows) + +-- fully compressed +EXPLAIN (costs off) SELECT * FROM test_partials ORDER BY time; + QUERY PLAN +------------------------------------------------------------------------------- + Custom Scan (ChunkAppend) on test_partials + Order: test_partials."time" + -> Custom Scan (DecompressChunk) on _hyper_35_68_chunk + -> Sort + Sort Key: compress_hyper_36_74_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_74_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_69_chunk + -> Sort + Sort Key: compress_hyper_36_75_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_75_chunk + -> Custom Scan (DecompressChunk) on _hyper_35_70_chunk + -> Sort + Sort Key: compress_hyper_36_76_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_36_76_chunk +(14 rows) + +DROP TABLE test_partials; +CREATE TABLE test_meta_filters(time timestamptz NOT NULL, device text, metric text, v1 float, v2 float); +CREATE INDEX ON test_meta_filters(device, metric, v1); +SELECT create_hypertable('test_meta_filters', 'time'); + create_hypertable +--------------------------------- + (37,public,test_meta_filters,t) +(1 row) + +ALTER TABLE test_meta_filters SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby='metric,time'); +INSERT INTO test_meta_filters SELECT '2020-01-01'::timestamptz,'d1','m' || metric::text,v1,v2 FROM generate_series(1,3,1) metric, generate_series(1,1000,1) v1, generate_series(1,10,1) v2 ORDER BY 1,2,3,4,5; +SELECT compress_chunk(show_chunks('test_meta_filters')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_37_77_chunk +(1 row) + +EXPLAIN (analyze, timing off, costs off, summary off) DELETE FROM test_meta_filters WHERE device = 'd1' AND metric = 'm1' AND v1 < 100; + QUERY PLAN +------------------------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1000 + -> Delete on test_meta_filters (actual rows=0 loops=1) + Delete on _hyper_37_77_chunk test_meta_filters_1 + -> Seq Scan on _hyper_37_77_chunk test_meta_filters_1 (actual rows=990 loops=1) + Filter: ((v1 < '100'::double precision) AND (device = 'd1'::text) AND (metric = 'm1'::text)) + Rows Removed by Filter: 10 +(8 rows) + +-- test expression pushdown in compressed dml constraints +CREATE TABLE test_pushdown(time timestamptz NOT NULL, device text); +SELECT table_name FROM create_hypertable('test_pushdown', 'time'); + table_name +--------------- + test_pushdown +(1 row) + +INSERT INTO test_pushdown SELECT '2020-01-01', 'a'; +INSERT INTO test_pushdown SELECT '2020-01-01', 'b'; +INSERT INTO test_pushdown SELECT '2020-01-01 05:00', 'c'; +CREATE TABLE devices(device text); +INSERT INTO devices VALUES ('a'), ('b'), ('c'); +CREATE TABLE devices2(device text); +INSERT INTO devices2 VALUES ('d'), ('e'), ('f'); +CREATE TABLE devices3(device text); +INSERT INTO devices3 VALUES ('b'), ('d'), ('g'); +ALTER TABLE test_pushdown SET (timescaledb.compress, timescaledb.compress_segmentby='device'); +NOTICE: default order by for hypertable "test_pushdown" is set to ""time" DESC" +SELECT compress_chunk(show_chunks('test_pushdown')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_39_79_chunk +(1 row) + +-- 3 batch decompressions means pushdown is not working so we expect less than 3 for all these queries +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'a' = device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: ('a'::text = device) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device < 'c' ; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 2 + Tuples decompressed: 2 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: (device < 'c'::text) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'c' > device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 2 + Tuples decompressed: 2 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: ('c'::text > device) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'c' >= device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=3 loops=1) + Filter: ('c'::text >= device) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device > 'b'; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device > 'b'::text) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = CURRENT_USER; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=0 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = CURRENT_USER) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'b' < device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: ('b'::text < device) +(7 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE 'b' <= device; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 2 + Tuples decompressed: 2 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: ('b'::text <= device) +(7 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +-- cant pushdown OR atm +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = 'a' OR device = 'b'; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: ((device = 'a'::text) OR (device = 'b'::text)) + Rows Removed by Filter: 1 +(8 rows) + +-- test stable function +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE time = timestamptz('2020-01-01 05:00'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: ("time" = 'Wed Jan 01 05:00:00 2020 PST'::timestamp with time zone) +(7 rows) + +-- test sqlvaluefunction +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = substring(CURRENT_USER,length(CURRENT_USER)+1) || 'c'; ROLLBACK; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=1 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device = ("substring"((CURRENT_USER)::text, (length((CURRENT_USER)::text) + 1)) || 'c'::text)) +(9 rows) + +-- JOIN tests +-- no filtering in decompression +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices3 d WHERE p.device=d.device; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Merge Join (actual rows=1 loops=1) + Merge Cond: (p_1.device = d.device) + -> Sort (actual rows=3 loops=1) + Sort Key: p_1.device + Sort Method: quicksort + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: d.device + Sort Method: quicksort + -> Seq Scan on devices3 d (actual rows=3 loops=1) +(15 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices3 d WHERE p.device=d.device; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Merge Join (actual rows=1 loops=1) + Merge Cond: (p_1.device = d.device) + -> Sort (actual rows=3 loops=1) + Sort Key: p_1.device + Sort Method: quicksort + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=3 loops=1) + -> Sort (actual rows=2 loops=1) + Sort Key: d.device + Sort Method: quicksort + -> Seq Scan on devices3 d (actual rows=3 loops=1) +(15 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +-- can filter in decompression even before executing join +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device AND d.device ='b'; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Nested Loop (actual rows=1 loops=1) + -> Seq Scan on devices d (actual rows=1 loops=1) + Filter: (device = 'b'::text) + Rows Removed by Filter: 2 + -> Materialize (actual rows=1 loops=1) + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=1 loops=1) + Filter: (device = 'b'::text) +(12 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN DELETE FROM test_pushdown p USING devices d WHERE p.device=d.device AND d.device ='b'; SELECT * FROM test_pushdown p ORDER BY p; ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown p (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk p_1 + -> Nested Loop (actual rows=1 loops=1) + -> Seq Scan on devices d (actual rows=1 loops=1) + Filter: (device = 'b'::text) + Rows Removed by Filter: 2 + -> Materialize (actual rows=1 loops=1) + -> Seq Scan on _hyper_39_79_chunk p_1 (actual rows=1 loops=1) + Filter: (device = 'b'::text) +(12 rows) + + time | device +------------------------------+-------- + Wed Jan 01 00:00:00 2020 PST | a + Wed Jan 01 05:00:00 2020 PST | c +(2 rows) + +-- test prepared statement +PREPARE q1(text) AS DELETE FROM test_pushdown WHERE device = $1; +SET timescaledb.enable_compressed_direct_batch_delete TO false; +BEGIN; :EXPLAIN EXECUTE q1('a'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device = 'a'::text) +(7 rows) + +RESET timescaledb.enable_compressed_direct_batch_delete; +BEGIN; :EXPLAIN EXECUTE q1('a'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = 'a'::text) +(6 rows) + +BEGIN; :EXPLAIN EXECUTE q1('not here'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = 'not here'::text) +(5 rows) + +-- test arrayop pushdown less than 3 decompressions are expected for successful pushdown +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device IN ('a','d'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = ANY ('{a,d}'::text[])) +(6 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = ANY('{a,d}'); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches deleted: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = ANY ('{a,d}'::text[])) +(6 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device IN ('a',CURRENT_USER); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 1 + Tuples decompressed: 1 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=1 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device = ANY (ARRAY['a'::text, (CURRENT_USER)::text])) +(9 rows) + +-- arroyop pushdown only works for segmentby columns atm so 3 decompressions are expected for now +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE time IN ('2020-01-01','2020-01-02'); ROLLBACK; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------- + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=2 loops=1) + Filter: ("time" = ANY ('{"Wed Jan 01 00:00:00 2020 PST","Thu Jan 02 00:00:00 2020 PST"}'::timestamp with time zone[])) + Rows Removed by Filter: 1 +(8 rows) + +-- no pushdown for volatile functions +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device = current_query(); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=0 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=0 loops=1) + Filter: (device = current_query()) + Rows Removed by Filter: 3 +(10 rows) + +BEGIN; :EXPLAIN DELETE FROM test_pushdown WHERE device IN ('a',current_query()); ROLLBACK; + QUERY PLAN +------------------------------------------------------------------------------------------ + Custom Scan (HypertableModify) (actual rows=0 loops=1) + Batches decompressed: 3 + Tuples decompressed: 3 + -> Delete on test_pushdown (actual rows=0 loops=1) + Delete on _hyper_39_79_chunk test_pushdown_1 + -> Custom Scan (ChunkAppend) on test_pushdown (actual rows=1 loops=1) + Chunks excluded during startup: 0 + -> Seq Scan on _hyper_39_79_chunk test_pushdown_1 (actual rows=1 loops=1) + Filter: (device = ANY (ARRAY['a'::text, current_query()])) + Rows Removed by Filter: 2 +(10 rows) + +-- github issue #6858 +-- check update triggers work correctly both on uncompressed and compressed chunks +CREATE TABLE update_trigger_test ( + "entity_id" "uuid" NOT NULL, + "effective_date_time" timestamp with time zone NOT NULL, + "measurement" numeric NOT NULL, + "modified_at" timestamp with time zone DEFAULT "now"() NOT NULL +); +SELECT create_hypertable('update_trigger_test', 'effective_date_time'); + create_hypertable +----------------------------------- + (41,public,update_trigger_test,t) +(1 row) + +CREATE OR REPLACE FUNCTION update_modified_at_test() +RETURNS TRIGGER +LANGUAGE PLPGSQL AS $$ +BEGIN + NEW.modified_at = NOW(); + RETURN NEW; +END; $$; +CREATE TRIGGER update_trigger_test__before_update_sync_modified_at +BEFORE UPDATE ON update_trigger_test +FOR EACH ROW +EXECUTE PROCEDURE update_modified_at_test(); +INSERT INTO update_trigger_test +SELECT 'f2ca7073-1395-5770-8378-7d0339804580', '2024-04-16 04:50:00+02', +1100.00, '2024-04-23 11:56:38.494095+02' FROM generate_series(1,2500,1) c; +VACUUM FULL update_trigger_test; +BEGIN; +UPDATE update_trigger_test SET measurement = measurement + 2 +WHERE update_trigger_test.effective_date_time >= '2020-01-01T00:00:00'::timestamp AT TIME ZONE 'UTC'; +ROLLBACK; +-- try with default compression +ALTER TABLE update_trigger_test SET (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "update_trigger_test" is set to "" +NOTICE: default order by for hypertable "update_trigger_test" is set to "effective_date_time DESC" +SELECT compress_chunk(show_chunks('update_trigger_test')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_41_81_chunk +(1 row) + +BEGIN; +UPDATE update_trigger_test SET measurement = measurement + 2 +WHERE update_trigger_test.effective_date_time >= '2020-01-01T00:00:00'::timestamp AT TIME ZONE 'UTC'; +ROLLBACK; +-- lets try with segmentby +SELECT decompress_chunk(show_chunks('update_trigger_test')); + decompress_chunk +------------------------------------------ + _timescaledb_internal._hyper_41_81_chunk +(1 row) + +ALTER TABLE update_trigger_test SET (timescaledb.compress, timescaledb.compress_segmentby='entity_id'); +SELECT compress_chunk(show_chunks('update_trigger_test')); + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_41_81_chunk +(1 row) + +BEGIN; +UPDATE update_trigger_test SET measurement = measurement + 2 +WHERE update_trigger_test.effective_date_time >= '2020-01-01T00:00:00'::timestamp AT TIME ZONE 'UTC'; +ROLLBACK; diff --git a/tsl/test/expected/merge_append_partially_compressed-17.out b/tsl/test/expected/merge_append_partially_compressed-17.out index 4b2a7e16cbe..9ee91829636 100644 --- a/tsl/test/expected/merge_append_partially_compressed-17.out +++ b/tsl/test/expected/merge_append_partially_compressed-17.out @@ -877,6 +877,31 @@ SELECT * FROM test1 ORDER BY x1, x2, x5, time, x3 DESC LIMIT 10; -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) (12 rows) +-- test append with join column in orderby +-- #6975 +CREATE TABLE join_table ( + x1 integer, + y1 float); +INSERT INTO join_table VALUES (1, 1.0), (2,2.0); +:PREFIX +SELECT * FROM test1 t1 JOIN join_table jt ON t1.x1 = jt.x1 +ORDER BY t1.x1, jt.y1; + QUERY PLAN +-------------------------------------------------------------------------------------------------------- + Sort (actual rows=4 loops=1) + Sort Key: t1_1.x1, jt.y1 + Sort Method: quicksort + -> Hash Join (actual rows=4 loops=1) + Hash Cond: (jt.x1 = t1_1.x1) + -> Seq Scan on join_table jt (actual rows=2 loops=1) + -> Hash (actual rows=5 loops=1) + Buckets: 4096 Batches: 1 + -> Append (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk t1_1 (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Seq Scan on _hyper_3_7_chunk t1_1 (actual rows=1 loops=1) +(12 rows) + --------------------------------------------------------------------------- -- test queries without ordered append, but still eligible for sort pushdown --------------------------------------------------------------------------- diff --git a/tsl/test/expected/plan_skip_scan-17.out b/tsl/test/expected/plan_skip_scan-17.out index 9891b7bd08b..b2e8ca0290f 100644 --- a/tsl/test/expected/plan_skip_scan-17.out +++ b/tsl/test/expected/plan_skip_scan-17.out @@ -1239,14 +1239,15 @@ UNION SELECT b.* FROM QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------- Unique (actual rows=10001 loops=1) - -> Sort (actual rows=20002 loops=1) + -> Merge Append (actual rows=20002 loops=1) Sort Key: skip_scan.dev, skip_scan."time" - Sort Method: quicksort - -> Append (actual rows=20002 loops=1) - -> Unique (actual rows=10001 loops=1) - -> Index Only Scan using skip_scan_dev_time_idx on skip_scan (actual rows=10001 loops=1) - Index Cond: (dev IS NOT NULL) - Heap Fetches: 10001 + -> Unique (actual rows=10001 loops=1) + -> Index Only Scan using skip_scan_dev_time_idx on skip_scan (actual rows=10001 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 10001 + -> Sort (actual rows=10001 loops=1) + Sort Key: skip_scan_2.dev, skip_scan_2."time" + Sort Method: quicksort -> Nested Loop (actual rows=10001 loops=1) -> Unique (actual rows=12 loops=1) -> Custom Scan (SkipScan) on skip_scan skip_scan_1 (actual rows=12 loops=1) @@ -1258,7 +1259,7 @@ UNION SELECT b.* FROM -> Index Only Scan using skip_scan_dev_time_idx on skip_scan skip_scan_2 (actual rows=833 loops=12) Index Cond: ((dev = skip_scan_1.dev) AND ("time" > NULL::integer)) Heap Fetches: 10001 -(20 rows) +(21 rows) -- SkipScan into INSERT :PREFIX INSERT INTO skip_scan_insert(time, dev, val, query) SELECT time, dev, val, 'q10_1' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE) a; @@ -3632,25 +3633,26 @@ UNION SELECT b.* FROM QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ Unique (actual rows=10000 loops=1) - -> Sort (actual rows=20000 loops=1) + -> Merge Append (actual rows=20000 loops=1) Sort Key: _hyper_1_1_chunk.dev, _hyper_1_1_chunk."time" - Sort Method: quicksort - -> Append (actual rows=20000 loops=1) - -> Unique (actual rows=10000 loops=1) - -> Merge Append (actual rows=10000 loops=1) - Sort Key: _hyper_1_1_chunk.dev, _hyper_1_1_chunk."time" - -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=2500 loops=1) - Index Cond: (dev IS NOT NULL) - Heap Fetches: 2500 - -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=2500 loops=1) - Index Cond: (dev IS NOT NULL) - Heap Fetches: 2500 - -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk (actual rows=2500 loops=1) - Index Cond: (dev IS NOT NULL) - Heap Fetches: 2500 - -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=2500 loops=1) - Index Cond: (dev IS NOT NULL) - Heap Fetches: 2500 + -> Unique (actual rows=10000 loops=1) + -> Merge Append (actual rows=10000 loops=1) + Sort Key: _hyper_1_1_chunk.dev, _hyper_1_1_chunk."time" + -> Index Only Scan using _hyper_1_1_chunk_skip_scan_ht_dev_time_idx on _hyper_1_1_chunk (actual rows=2500 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 2500 + -> Index Only Scan using _hyper_1_2_chunk_skip_scan_ht_dev_time_idx on _hyper_1_2_chunk (actual rows=2500 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 2500 + -> Index Only Scan using _hyper_1_3_chunk_skip_scan_ht_dev_time_idx on _hyper_1_3_chunk (actual rows=2500 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 2500 + -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk (actual rows=2500 loops=1) + Index Cond: (dev IS NOT NULL) + Heap Fetches: 2500 + -> Sort (actual rows=10000 loops=1) + Sort Key: _hyper_1_1_chunk_2.dev, _hyper_1_1_chunk_2."time" + Sort Method: quicksort -> Nested Loop (actual rows=10000 loops=1) -> Unique (actual rows=11 loops=1) -> Merge Append (actual rows=44 loops=1) @@ -3690,7 +3692,7 @@ UNION SELECT b.* FROM -> Index Only Scan using _hyper_1_4_chunk_skip_scan_ht_dev_time_idx on _hyper_1_4_chunk _hyper_1_4_chunk_2 (actual rows=227 loops=11) Index Cond: ((dev = _hyper_1_1_chunk_1.dev) AND ("time" > NULL::integer)) Heap Fetches: 2500 -(59 rows) +(60 rows) -- SkipScan into INSERT :PREFIX INSERT INTO skip_scan_insert(time, dev, val, query) SELECT time, dev, val, 'q10_1' FROM (SELECT DISTINCT ON (dev) * FROM :TABLE) a; diff --git a/tsl/test/expected/transparent_decompression-17.out b/tsl/test/expected/transparent_decompression-17.out index f16e9896430..f1c98979e6d 100644 --- a/tsl/test/expected/transparent_decompression-17.out +++ b/tsl/test/expected/transparent_decompression-17.out @@ -9298,22 +9298,25 @@ EXPLAIN (costs off) SELECT * FROM metrics ORDER BY time, device_id; (10 rows) EXPLAIN (costs off) SELECT time_bucket('10 minutes', time) bucket, avg(v0) avg_v0 FROM metrics GROUP BY bucket; - QUERY PLAN --------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------- Finalize HashAggregate Group Key: (time_bucket('@ 10 mins'::interval, _hyper_1_1_chunk."time")) -> Gather Workers Planned: 3 - -> Partial HashAggregate - Group Key: time_bucket('@ 10 mins'::interval, _hyper_1_1_chunk."time") - -> Result - -> Parallel Append - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk - -> Parallel Seq Scan on compress_hyper_5_15_chunk - -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk - -> Parallel Seq Scan on compress_hyper_5_16_chunk - -> Parallel Seq Scan on _hyper_1_2_chunk -(13 rows) + -> Parallel Append + -> Partial HashAggregate + Group Key: time_bucket('@ 10 mins'::interval, _hyper_1_1_chunk."time") + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Parallel Seq Scan on compress_hyper_5_15_chunk + -> Partial HashAggregate + Group Key: time_bucket('@ 10 mins'::interval, _hyper_1_3_chunk."time") + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + -> Parallel Seq Scan on compress_hyper_5_16_chunk + -> Partial HashAggregate + Group Key: time_bucket('@ 10 mins'::interval, _hyper_1_2_chunk."time") + -> Parallel Seq Scan on _hyper_1_2_chunk +(16 rows) EXPLAIN (costs off) SELECT * FROM metrics_space ORDER BY time, device_id; QUERY PLAN diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 2b10a3181e0..b8abd5a5fb1 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -10,7 +10,6 @@ set(TEST_FILES cagg_deprecated_bucket_ng.sql cagg_errors.sql cagg_invalidation.sql - cagg_permissions.sql cagg_policy.sql cagg_query.sql cagg_refresh.sql @@ -22,6 +21,7 @@ set(TEST_FILES compress_float8_corrupt.sql compressed_detoaster.sql compressed_collation.sql + compression.sql compression_create_compressed_table.sql compression_conflicts.sql compression_defaults.sql @@ -41,7 +41,8 @@ set(TEST_FILES size_utils_tsl.sql skip_scan.sql transparent_decompression_join_index.sql - vector_agg_param.sql) + vector_agg_param.sql + vectorized_aggregation.sql) if(USE_TELEMETRY) list(APPEND TEST_FILES bgw_telemetry.sql) @@ -63,6 +64,7 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) chunk_api.sql chunk_merge.sql chunk_utils_compression.sql + chunk_utils_internal.sql compression_algos.sql compression_bgw.sql compression_ddl.sql @@ -101,14 +103,6 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) vector_agg_segmentby.sql) endif(CMAKE_BUILD_TYPE MATCHES Debug) -if((${PG_VERSION_MAJOR} GREATER_EQUAL "14")) - if(CMAKE_BUILD_TYPE MATCHES Debug) - list(APPEND TEST_FILES chunk_utils_internal.sql - compression_update_delete.sql) - endif() - list(APPEND TEST_FILES compression.sql compression_permissions.sql) -endif() - if((${PG_VERSION_MAJOR} GREATER_EQUAL "15")) if(CMAKE_BUILD_TYPE MATCHES Debug) list(APPEND TEST_FILES bgw_scheduler_control.sql) @@ -140,24 +134,16 @@ set(SOLO_TESTS set(TEST_TEMPLATES cagg_union_view.sql.in + cagg_permissions.sql.in chunk_column_stats.sql.in + compression_permissions.sql.in compression_sorted_merge.sql.in merge_append_partially_compressed.sql.in + modify_exclusion.sql.in plan_skip_scan.sql.in transparent_decompression.sql.in transparent_decompression_ordered_index.sql.in) -# This test runs only with PG version >= 14 -if((${PG_VERSION_MAJOR} GREATER_EQUAL "14")) - set(TEST_FILES_ON_VERSION_GE_14 modify_exclusion.sql.in) -endif() - -if((${PG_VERSION_MAJOR} GREATER "14")) - # This is a test for a prototype feature that has different outputs on PG14, - # so we disable it on v14 not to have to work with multiple references. - list(APPEND TEST_FILES vectorized_aggregation.sql) -endif() - if(CMAKE_BUILD_TYPE MATCHES Debug) list( APPEND @@ -168,6 +154,7 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) cagg_repair.sql.in cagg_usage.sql.in compression_errors.sql.in + compression_update_delete.sql.in continuous_aggs.sql.in) if(USE_TELEMETRY) list(APPEND TEST_FILES telemetry_stats.sql) @@ -199,16 +186,6 @@ foreach(TEMPLATE_FILE ${TEST_TEMPLATES}) list(APPEND TEST_FILES ${TEST_FILE}) endforeach(TEMPLATE_FILE) -foreach(TEST_FILES_GE_14 ${TEST_FILES_ON_VERSION_GE_14}) - string(LENGTH ${TEST_FILES_GE_14} TEST_FILES_GE_14_NAME_LEN) - math(EXPR TEST_FILES_GE_14_NAME_LEN ${TEST_FILES_GE_14_NAME_LEN}-7) - string(SUBSTRING ${TEST_FILES_GE_14} 0 ${TEST_FILES_GE_14_NAME_LEN} TEMPLATE) - set(TEST_FILE ${TEMPLATE}-${TEST_VERSION_SUFFIX}.sql) - configure_file(${TEST_FILES_GE_14} ${CMAKE_CURRENT_SOURCE_DIR}/${TEST_FILE} - COPYONLY) - list(APPEND TEST_FILES ${TEST_FILE}) -endforeach(TEST_FILES_GE_14) - if(NOT TEST_GROUP_SIZE) set(PARALLEL_GROUP_SIZE 10) else() diff --git a/tsl/test/sql/cagg_permissions.sql b/tsl/test/sql/cagg_permissions.sql.in similarity index 100% rename from tsl/test/sql/cagg_permissions.sql rename to tsl/test/sql/cagg_permissions.sql.in diff --git a/tsl/test/sql/chunk_utils_internal.sql b/tsl/test/sql/chunk_utils_internal.sql index 328d24d37b7..1ac6ed7c817 100644 --- a/tsl/test/sql/chunk_utils_internal.sql +++ b/tsl/test/sql/chunk_utils_internal.sql @@ -10,6 +10,8 @@ -- * attach_foreign_table_chunk -- * hypertable_osm_range_update +\set EXPLAIN 'EXPLAIN (COSTS OFF)' + CREATE OR REPLACE VIEW chunk_view AS SELECT ht.table_name AS hypertable_name, @@ -375,7 +377,7 @@ SELECT * from ht_try WHERE timec > '2020-01-01 01:00' ORDER BY 1; -- test ordered append BEGIN; -- before updating the ranges -EXPLAIN SELECT * FROM ht_try ORDER BY 1; +:EXPLAIN SELECT * FROM ht_try ORDER BY 1; -- range before update SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds @@ -388,11 +390,11 @@ SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds WHERE c.table_name = 'child_fdw_table' AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id; -- should be ordered append now -EXPLAIN SELECT * FROM ht_try ORDER BY 1; +:EXPLAIN SELECT * FROM ht_try ORDER BY 1; SELECT * FROM ht_try ORDER BY 1; -- test invalid range - should not be ordered append SELECT _timescaledb_functions.hypertable_osm_range_update('ht_try'); -EXPLAIN SELECT * from ht_try ORDER BY 1; +:EXPLAIN SELECT * from ht_try ORDER BY 1; SELECT * from ht_try ORDER BY 1; ROLLBACK; @@ -408,14 +410,14 @@ SELECT * FROM hypertable_approximate_size('ht_try'); --TEST GUC variable to enable/disable OSM chunk SET timescaledb.enable_tiered_reads=false; -EXPLAIN (COSTS OFF) SELECT * from ht_try; -EXPLAIN (COSTS OFF) SELECT * from ht_try WHERE timec > '2022-01-01 01:00'; -EXPLAIN (COSTS OFF) SELECT * from ht_try WHERE timec < '2023-01-01 01:00'; +:EXPLAIN SELECT * from ht_try; +:EXPLAIN SELECT * from ht_try WHERE timec > '2022-01-01 01:00'; +:EXPLAIN SELECT * from ht_try WHERE timec < '2023-01-01 01:00'; SET timescaledb.enable_tiered_reads=true; -EXPLAIN (COSTS OFF) SELECT * from ht_try; +:EXPLAIN SELECT * from ht_try; -- foreign chunk contains data from Jan 2020, so it is skipped during planning -EXPLAIN (COSTS OFF) SELECT * from ht_try WHERE timec > '2022-01-01 01:00'; -EXPLAIN (COSTS OFF) SELECT * from ht_try WHERE timec < '2023-01-01 01:00'; +:EXPLAIN SELECT * from ht_try WHERE timec > '2022-01-01 01:00'; +:EXPLAIN SELECT * from ht_try WHERE timec < '2023-01-01 01:00'; -- This test verifies that a bugfix regarding the way `ROWID_VAR`s are adjusted -- in the chunks' targetlists on DELETE/UPDATE works (including partially @@ -428,7 +430,7 @@ DO $$ DECLARE r RECORD; BEGIN - EXPLAIN UPDATE ht_try SET value = 2 + EXPLAIN (COSTS OFF) UPDATE ht_try SET value = 2 WHERE acq_id = 10 AND timec > now() - '15 years'::interval INTO r; END $$ LANGUAGE plpgsql; diff --git a/tsl/test/sql/compression.sql b/tsl/test/sql/compression.sql index 2c0ae2083b1..0dbb18fa54c 100644 --- a/tsl/test/sql/compression.sql +++ b/tsl/test/sql/compression.sql @@ -529,7 +529,8 @@ SELECT create_hypertable('stattest', 'time'); INSERT INTO stattest SELECT '2020/02/20 01:00'::TIMESTAMPTZ + ('1 hour'::interval * v), 250 * v FROM generate_series(0,25) v; SELECT table_name INTO TEMPORARY temptable FROM _timescaledb_catalog.chunk WHERE hypertable_id = (SELECT id FROM _timescaledb_catalog.hypertable WHERE table_name = 'stattest'); \set statchunk '(select table_name from temptable)' -SELECT * FROM pg_stats WHERE tablename = :statchunk; +SELECT schemaname, tablename, attname, inherited, null_frac, avg_width, n_distinct, most_common_vals, most_common_freqs, histogram_bounds, correlation, most_common_elems, most_common_elem_freqs, elem_count_histogram +FROM pg_stats WHERE tablename = :statchunk; ALTER TABLE stattest SET (timescaledb.compress); -- check that approximate_row_count works with all normal chunks diff --git a/tsl/test/sql/compression_permissions.sql b/tsl/test/sql/compression_permissions.sql.in similarity index 100% rename from tsl/test/sql/compression_permissions.sql rename to tsl/test/sql/compression_permissions.sql.in diff --git a/tsl/test/sql/compression_update_delete.sql b/tsl/test/sql/compression_update_delete.sql.in similarity index 100% rename from tsl/test/sql/compression_update_delete.sql rename to tsl/test/sql/compression_update_delete.sql.in diff --git a/tsl/test/sql/include/chunk_utils_internal_orderedappend.sql b/tsl/test/sql/include/chunk_utils_internal_orderedappend.sql index d14482d9a3b..0002f6474cf 100644 --- a/tsl/test/sql/include/chunk_utils_internal_orderedappend.sql +++ b/tsl/test/sql/include/chunk_utils_internal_orderedappend.sql @@ -92,7 +92,7 @@ SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; -- ordered append should be possible as ranges do not overlap -EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; +:EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; SELECT * FROM test_chunkapp ORDER BY 1; -- but, insert should not be possible SELECT ts_setup_osm_hook(); @@ -103,23 +103,23 @@ SELECT ts_undo_osm_hook(); -- reset range to infinity SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp',empty:=false); -- ordered append not possible because range is invalid and empty was not specified -EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; +:EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; SELECT * FROM test_chunkapp ORDER BY 1; SELECT cc.chunk_id, c.table_name, c.status, c.osm_chunk, cc.dimension_slice_id, ds.range_start, ds.range_end FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _timescaledb_catalog.dimension_slice ds WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id; -- but also, OSM chunk should be included in the scan, since range is invalid and chunk is not empty -EXPLAIN SELECT * FROM test_chunkapp WHERE time < '2023-01-01' ORDER BY 1; +:EXPLAIN SELECT * FROM test_chunkapp WHERE time < '2023-01-01' ORDER BY 1; SELECT * FROM test_chunkapp WHERE time < '2023-01-01' ORDER BY 1; -- now set empty to true, should ordered append \c postgres_fdw_db :ROLE_4; DELETE FROM test_chunkapp_fdw; \c :TEST_DBNAME :ROLE_4; SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp', NULL::timestamptz, NULL, empty => true); -EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; +:EXPLAIN SELECT * FROM test_chunkapp ORDER BY 1; SELECT * FROM test_chunkapp ORDER BY 1; -- should exclude the OSM chunk this time since it is empty -EXPLAIN SELECT * FROM test_chunkapp WHERE time < '2023-01-01' ORDER BY 1; +:EXPLAIN SELECT * FROM test_chunkapp WHERE time < '2023-01-01' ORDER BY 1; SELECT * FROM test_chunkapp WHERE time < '2023-01-01' ORDER BY 1; \set ON_ERROR_STOP 0 diff --git a/tsl/test/t/002_logrepl_decomp_marker.pl b/tsl/test/t/002_logrepl_decomp_marker.pl index 07b8c3b25d8..b00c6b61614 100644 --- a/tsl/test/t/002_logrepl_decomp_marker.pl +++ b/tsl/test/t/002_logrepl_decomp_marker.pl @@ -7,8 +7,6 @@ use TimescaleNode; use Test::More; -plan skip_all => "PostgreSQL version < 14" if $ENV{PG_VERSION_MAJOR} < 14; - # This test checks the creation of logical replication messages # used to mark the start and end of activity happening as a result # compression/decompression.