diff --git a/src/planner/partialize.c b/src/planner/partialize.c index c42fcd3938e..4552e6aeec2 100644 --- a/src/planner/partialize.c +++ b/src/planner/partialize.c @@ -461,6 +461,15 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI if (subpaths == NIL) return; + if (list_length(subpaths) < 2) + { + /* + * Doesn't make sense to add per-chunk aggregation paths if there's + * only one chunk. + */ + return; + } + /* Generate agg paths on top of the append children */ List *sorted_subpaths = NIL; List *hashed_subpaths = NIL; @@ -580,6 +589,14 @@ generate_partial_agg_pushdown_path(PlannerInfo *root, Path *cheapest_partial_pat if (subpaths == NIL) return; + if (list_length(subpaths) < 2) + { + /* + * Doesn't make sense to add per-chunk aggregation paths if there's + * only one chunk. + */ + return; + } /* Generate agg paths on top of the append children */ ListCell *lc; List *sorted_subpaths = NIL; diff --git a/test/expected/agg_bookends.out b/test/expected/agg_bookends.out index 2aaccddefd8..1328887b26b 100644 --- a/test/expected/agg_bookends.out +++ b/test/expected/agg_bookends.out @@ -77,116 +77,92 @@ SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.ena (5 rows) :PREFIX SELECT last(temp, time_alt) FROM btest; - QUERY PLAN ------------------------------------------------------------------- - Finalize HashAggregate (actual rows=1 loops=1) - Batches: 1 - -> Partial HashAggregate (actual rows=1 loops=1) - Batches: 1 - -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) -(5 rows) + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(2 rows) :PREFIX SELECT first(temp, time_alt) FROM btest; - QUERY PLAN ------------------------------------------------------------------- - Finalize HashAggregate (actual rows=1 loops=1) - Batches: 1 - -> Partial HashAggregate (actual rows=1 loops=1) - Batches: 1 - -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) -(5 rows) + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(2 rows) :PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------ Sort (actual rows=2 loops=1) Sort Key: _hyper_1_1_chunk.gp Sort Method: quicksort - -> Finalize HashAggregate (actual rows=2 loops=1) + -> HashAggregate (actual rows=2 loops=1) Group Key: _hyper_1_1_chunk.gp Batches: 1 - -> Partial HashAggregate (actual rows=2 loops=1) - Group Key: _hyper_1_1_chunk.gp - Batches: 1 - -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) -(10 rows) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) :PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------ Sort (actual rows=2 loops=1) Sort Key: _hyper_1_1_chunk.gp Sort Method: quicksort - -> Finalize HashAggregate (actual rows=2 loops=1) + -> HashAggregate (actual rows=2 loops=1) Group Key: _hyper_1_1_chunk.gp Batches: 1 - -> Partial HashAggregate (actual rows=2 loops=1) - Group Key: _hyper_1_1_chunk.gp - Batches: 1 - -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) -(10 rows) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) --check whole row :PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------ Sort (actual rows=2 loops=1) Sort Key: _hyper_1_1_chunk.gp Sort Method: quicksort - -> Finalize HashAggregate (actual rows=2 loops=1) + -> HashAggregate (actual rows=2 loops=1) Group Key: _hyper_1_1_chunk.gp Batches: 1 - -> Partial HashAggregate (actual rows=2 loops=1) - Group Key: _hyper_1_1_chunk.gp - Batches: 1 - -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) -(10 rows) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) --check toasted col :PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------ Sort (actual rows=2 loops=1) Sort Key: _hyper_1_1_chunk.gp Sort Method: quicksort - -> Finalize HashAggregate (actual rows=2 loops=1) + -> HashAggregate (actual rows=2 loops=1) Group Key: _hyper_1_1_chunk.gp Batches: 1 - -> Partial HashAggregate (actual rows=2 loops=1) - Group Key: _hyper_1_1_chunk.gp - Batches: 1 - -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) -(10 rows) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) :PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------ Sort (actual rows=2 loops=1) Sort Key: _hyper_1_1_chunk.gp Sort Method: quicksort - -> Finalize HashAggregate (actual rows=2 loops=1) + -> HashAggregate (actual rows=2 loops=1) Group Key: _hyper_1_1_chunk.gp Batches: 1 - -> Partial HashAggregate (actual rows=2 loops=1) - Group Key: _hyper_1_1_chunk.gp - Batches: 1 - -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) -(10 rows) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) :PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; - QUERY PLAN ------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------ Sort (actual rows=2 loops=1) Sort Key: _hyper_1_1_chunk.gp Sort Method: quicksort - -> Finalize HashAggregate (actual rows=2 loops=1) + -> HashAggregate (actual rows=2 loops=1) Group Key: _hyper_1_1_chunk.gp Batches: 1 - -> Partial HashAggregate (actual rows=2 loops=1) - Group Key: _hyper_1_1_chunk.gp - Batches: 1 - -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) -(10 rows) + -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1) +(7 rows) BEGIN; --check null value as last element @@ -849,24 +825,18 @@ INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL); (5 rows) :PREFIX SELECT first(time, quantity) FROM btest_numeric; - QUERY PLAN ------------------------------------------------------------------- - Finalize HashAggregate (actual rows=1 loops=1) - Batches: 1 - -> Partial HashAggregate (actual rows=1 loops=1) - Batches: 1 - -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) -(5 rows) + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) +(2 rows) :PREFIX SELECT last(time, quantity) FROM btest_numeric; - QUERY PLAN ------------------------------------------------------------------- - Finalize HashAggregate (actual rows=1 loops=1) - Batches: 1 - -> Partial HashAggregate (actual rows=1 loops=1) - Batches: 1 - -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) -(5 rows) + QUERY PLAN +------------------------------------------------------------ + Aggregate (actual rows=1 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1) +(2 rows) -- NULL values followed by non-NULL values INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); diff --git a/test/expected/parallel-13.out b/test/expected/parallel-13.out index e960d671655..845fe60993f 100644 --- a/test/expected/parallel-13.out +++ b/test/expected/parallel-13.out @@ -641,11 +641,11 @@ SET max_parallel_workers_per_gather TO 2; Finalize Aggregate -> Gather Workers Planned: 2 - -> Result - One-Time Filter: (length(version()) > 0) - -> Parallel Custom Scan (ChunkAppend) on test - Chunks excluded during startup: 0 - -> Partial Aggregate + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 -> Result One-Time Filter: (length(version()) > 0) -> Parallel Seq Scan on _hyper_1_2_chunk diff --git a/test/expected/parallel-14.out b/test/expected/parallel-14.out index e960d671655..845fe60993f 100644 --- a/test/expected/parallel-14.out +++ b/test/expected/parallel-14.out @@ -641,11 +641,11 @@ SET max_parallel_workers_per_gather TO 2; Finalize Aggregate -> Gather Workers Planned: 2 - -> Result - One-Time Filter: (length(version()) > 0) - -> Parallel Custom Scan (ChunkAppend) on test - Chunks excluded during startup: 0 - -> Partial Aggregate + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 -> Result One-Time Filter: (length(version()) > 0) -> Parallel Seq Scan on _hyper_1_2_chunk diff --git a/test/expected/parallel-15.out b/test/expected/parallel-15.out index 4bc5a7a276e..177ed0cb02c 100644 --- a/test/expected/parallel-15.out +++ b/test/expected/parallel-15.out @@ -642,11 +642,11 @@ SET max_parallel_workers_per_gather TO 2; Finalize Aggregate -> Gather Workers Planned: 2 - -> Result - One-Time Filter: (length(version()) > 0) - -> Parallel Custom Scan (ChunkAppend) on test - Chunks excluded during startup: 0 - -> Partial Aggregate + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 -> Result One-Time Filter: (length(version()) > 0) -> Parallel Seq Scan on _hyper_1_2_chunk diff --git a/test/expected/parallel-16.out b/test/expected/parallel-16.out index ba3bd81b657..01d899fe9f2 100644 --- a/test/expected/parallel-16.out +++ b/test/expected/parallel-16.out @@ -643,11 +643,11 @@ SET max_parallel_workers_per_gather TO 2; Finalize Aggregate -> Gather Workers Planned: 2 - -> Result - One-Time Filter: (length(version()) > 0) - -> Parallel Custom Scan (ChunkAppend) on test - Chunks excluded during startup: 0 - -> Partial Aggregate + -> Partial Aggregate + -> Result + One-Time Filter: (length(version()) > 0) + -> Parallel Custom Scan (ChunkAppend) on test + Chunks excluded during startup: 0 -> Result One-Time Filter: (length(version()) > 0) -> Parallel Seq Scan on _hyper_1_2_chunk diff --git a/test/expected/partitionwise-13.out b/test/expected/partitionwise-13.out index 8d511c54589..ea178ed5813 100644 --- a/test/expected/partitionwise-13.out +++ b/test/expected/partitionwise-13.out @@ -889,18 +889,15 @@ FROM hyper_timepart WHERE device = 1 GROUP BY 1 LIMIT 10; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ Limit Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp)) - -> Finalize GroupAggregate + -> GroupAggregate Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp) Group Key: _hyper_3_8_chunk.device - -> Partial GroupAggregate - Output: _hyper_3_8_chunk.device, PARTIAL avg(_hyper_3_8_chunk.temp) - Group Key: _hyper_3_8_chunk.device - -> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk - Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp - Index Cond: (_hyper_3_8_chunk.device = 1) -(11 rows) + -> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk + Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp + Index Cond: (_hyper_3_8_chunk.device = 1) +(8 rows) diff --git a/test/expected/partitionwise-14.out b/test/expected/partitionwise-14.out index 8d511c54589..ea178ed5813 100644 --- a/test/expected/partitionwise-14.out +++ b/test/expected/partitionwise-14.out @@ -889,18 +889,15 @@ FROM hyper_timepart WHERE device = 1 GROUP BY 1 LIMIT 10; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ Limit Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp)) - -> Finalize GroupAggregate + -> GroupAggregate Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp) Group Key: _hyper_3_8_chunk.device - -> Partial GroupAggregate - Output: _hyper_3_8_chunk.device, PARTIAL avg(_hyper_3_8_chunk.temp) - Group Key: _hyper_3_8_chunk.device - -> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk - Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp - Index Cond: (_hyper_3_8_chunk.device = 1) -(11 rows) + -> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk + Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp + Index Cond: (_hyper_3_8_chunk.device = 1) +(8 rows) diff --git a/test/expected/partitionwise-15.out b/test/expected/partitionwise-15.out index 8d511c54589..ea178ed5813 100644 --- a/test/expected/partitionwise-15.out +++ b/test/expected/partitionwise-15.out @@ -889,18 +889,15 @@ FROM hyper_timepart WHERE device = 1 GROUP BY 1 LIMIT 10; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ Limit Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp)) - -> Finalize GroupAggregate + -> GroupAggregate Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp) Group Key: _hyper_3_8_chunk.device - -> Partial GroupAggregate - Output: _hyper_3_8_chunk.device, PARTIAL avg(_hyper_3_8_chunk.temp) - Group Key: _hyper_3_8_chunk.device - -> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk - Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp - Index Cond: (_hyper_3_8_chunk.device = 1) -(11 rows) + -> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk + Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp + Index Cond: (_hyper_3_8_chunk.device = 1) +(8 rows) diff --git a/test/expected/partitionwise-16.out b/test/expected/partitionwise-16.out index 102a11e3539..831f84d0e79 100644 --- a/test/expected/partitionwise-16.out +++ b/test/expected/partitionwise-16.out @@ -889,16 +889,14 @@ FROM hyper_timepart WHERE device = 1 GROUP BY 1 LIMIT 10; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------ Limit Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp)) - -> Finalize GroupAggregate + -> GroupAggregate Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp) - -> Partial GroupAggregate - Output: _hyper_3_8_chunk.device, PARTIAL avg(_hyper_3_8_chunk.temp) - -> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk - Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp - Index Cond: (_hyper_3_8_chunk.device = 1) -(9 rows) + -> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk + Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp + Index Cond: (_hyper_3_8_chunk.device = 1) +(7 rows) diff --git a/tsl/test/expected/compression_qualpushdown.out b/tsl/test/expected/compression_qualpushdown.out index 4394bc31113..7219e0acc1f 100644 --- a/tsl/test/expected/compression_qualpushdown.out +++ b/tsl/test/expected/compression_qualpushdown.out @@ -291,7 +291,7 @@ SELECT compress_chunk(i) FROM show_chunks('deleteme') i; EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE segment::text like '%4%'; QUERY PLAN --------------------------------------------------------- - Finalize Aggregate + Aggregate -> Custom Scan (DecompressChunk) on _hyper_7_8_chunk -> Seq Scan on compress_hyper_8_9_chunk Filter: ((segment)::text ~~ '%4%'::text) @@ -300,7 +300,7 @@ EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE segment::text like '%4% EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE '4' = segment::text; QUERY PLAN --------------------------------------------------------- - Finalize Aggregate + Aggregate -> Custom Scan (DecompressChunk) on _hyper_7_8_chunk -> Seq Scan on compress_hyper_8_9_chunk Filter: ('4'::text = (segment)::text) diff --git a/tsl/test/expected/merge_append_partially_compressed-13.out b/tsl/test/expected/merge_append_partially_compressed-13.out index ae11ff788de..81fecb77aa1 100644 --- a/tsl/test/expected/merge_append_partially_compressed-13.out +++ b/tsl/test/expected/merge_append_partially_compressed-13.out @@ -4,6 +4,7 @@ -- this test checks the validity of the produced plans for partially compressed chunks -- when injecting query_pathkeys on top of the append -- path that combines the uncompressed and compressed parts of a chunk. +set enable_parallel_append to off; -- for less flaky plans set timescaledb.enable_decompression_sorted_merge = off; \set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float); @@ -690,12 +691,14 @@ SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC; -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) (13 rows) +set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows +set enable_hashagg to off; -- different on PG13 :PREFIX SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------- Limit (actual rows=5 loops=1) - -> Finalize GroupAggregate (actual rows=5 loops=1) + -> GroupAggregate (actual rows=5 loops=1) Group Key: test1."time", test1.x1, test1.x2 -> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) Order: test1."time", test1.x1, test1.x2 @@ -704,20 +707,16 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10 -> Sort (actual rows=4 loops=1) Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Method: quicksort - -> Partial HashAggregate (actual rows=4 loops=1) - Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 - Batches: 1 - -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) - -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) -> Sort (actual rows=1 loops=1) Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Method: quicksort - -> Partial HashAggregate (actual rows=1 loops=1) - Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 - Batches: 1 - -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) -(22 rows) + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(16 rows) +reset max_parallel_workers_per_gather; +reset enable_hashagg; :PREFIX SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10; QUERY PLAN diff --git a/tsl/test/expected/merge_append_partially_compressed-14.out b/tsl/test/expected/merge_append_partially_compressed-14.out index d7ead712181..7142a997e94 100644 --- a/tsl/test/expected/merge_append_partially_compressed-14.out +++ b/tsl/test/expected/merge_append_partially_compressed-14.out @@ -4,6 +4,7 @@ -- this test checks the validity of the produced plans for partially compressed chunks -- when injecting query_pathkeys on top of the append -- path that combines the uncompressed and compressed parts of a chunk. +set enable_parallel_append to off; -- for less flaky plans set timescaledb.enable_decompression_sorted_merge = off; \set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float); @@ -690,12 +691,14 @@ SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC; -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) (13 rows) +set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows +set enable_hashagg to off; -- different on PG13 :PREFIX SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------- Limit (actual rows=5 loops=1) - -> Finalize GroupAggregate (actual rows=5 loops=1) + -> GroupAggregate (actual rows=5 loops=1) Group Key: test1."time", test1.x1, test1.x2 -> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) Order: test1."time", test1.x1, test1.x2 @@ -704,20 +707,16 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10 -> Sort (actual rows=4 loops=1) Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Method: quicksort - -> Partial HashAggregate (actual rows=4 loops=1) - Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 - Batches: 1 - -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) - -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) -> Sort (actual rows=1 loops=1) Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Method: quicksort - -> Partial HashAggregate (actual rows=1 loops=1) - Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 - Batches: 1 - -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) -(22 rows) + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(16 rows) +reset max_parallel_workers_per_gather; +reset enable_hashagg; :PREFIX SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10; QUERY PLAN diff --git a/tsl/test/expected/merge_append_partially_compressed-15.out b/tsl/test/expected/merge_append_partially_compressed-15.out index 76fdb218d66..4b5b9c1452d 100644 --- a/tsl/test/expected/merge_append_partially_compressed-15.out +++ b/tsl/test/expected/merge_append_partially_compressed-15.out @@ -4,6 +4,7 @@ -- this test checks the validity of the produced plans for partially compressed chunks -- when injecting query_pathkeys on top of the append -- path that combines the uncompressed and compressed parts of a chunk. +set enable_parallel_append to off; -- for less flaky plans set timescaledb.enable_decompression_sorted_merge = off; \set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float); @@ -696,12 +697,14 @@ SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC; -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) (13 rows) +set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows +set enable_hashagg to off; -- different on PG13 :PREFIX SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------- Limit (actual rows=5 loops=1) - -> Finalize GroupAggregate (actual rows=5 loops=1) + -> GroupAggregate (actual rows=5 loops=1) Group Key: test1."time", test1.x1, test1.x2 -> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) Order: test1."time", test1.x1, test1.x2 @@ -710,20 +713,16 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10 -> Sort (actual rows=4 loops=1) Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Method: quicksort - -> Partial HashAggregate (actual rows=4 loops=1) - Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 - Batches: 1 - -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) - -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) -> Sort (actual rows=1 loops=1) Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Method: quicksort - -> Partial HashAggregate (actual rows=1 loops=1) - Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 - Batches: 1 - -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) -(22 rows) + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(16 rows) +reset max_parallel_workers_per_gather; +reset enable_hashagg; :PREFIX SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10; QUERY PLAN diff --git a/tsl/test/expected/merge_append_partially_compressed-16.out b/tsl/test/expected/merge_append_partially_compressed-16.out index 76fdb218d66..4b5b9c1452d 100644 --- a/tsl/test/expected/merge_append_partially_compressed-16.out +++ b/tsl/test/expected/merge_append_partially_compressed-16.out @@ -4,6 +4,7 @@ -- this test checks the validity of the produced plans for partially compressed chunks -- when injecting query_pathkeys on top of the append -- path that combines the uncompressed and compressed parts of a chunk. +set enable_parallel_append to off; -- for less flaky plans set timescaledb.enable_decompression_sorted_merge = off; \set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float); @@ -696,12 +697,14 @@ SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC; -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) (13 rows) +set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows +set enable_hashagg to off; -- different on PG13 :PREFIX SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------- Limit (actual rows=5 loops=1) - -> Finalize GroupAggregate (actual rows=5 loops=1) + -> GroupAggregate (actual rows=5 loops=1) Group Key: test1."time", test1.x1, test1.x2 -> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) Order: test1."time", test1.x1, test1.x2 @@ -710,20 +713,16 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10 -> Sort (actual rows=4 loops=1) Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Method: quicksort - -> Partial HashAggregate (actual rows=4 loops=1) - Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 - Batches: 1 - -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) - -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1) + -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1) -> Sort (actual rows=1 loops=1) Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Method: quicksort - -> Partial HashAggregate (actual rows=1 loops=1) - Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 - Batches: 1 - -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) -(22 rows) + -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) +(16 rows) +reset max_parallel_workers_per_gather; +reset enable_hashagg; :PREFIX SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10; QUERY PLAN diff --git a/tsl/test/sql/merge_append_partially_compressed.sql.in b/tsl/test/sql/merge_append_partially_compressed.sql.in index 5075ac5d799..c20535a990a 100644 --- a/tsl/test/sql/merge_append_partially_compressed.sql.in +++ b/tsl/test/sql/merge_append_partially_compressed.sql.in @@ -6,6 +6,8 @@ -- when injecting query_pathkeys on top of the append -- path that combines the uncompressed and compressed parts of a chunk. +set enable_parallel_append to off; -- for less flaky plans + set timescaledb.enable_decompression_sorted_merge = off; \set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' @@ -99,8 +101,12 @@ SELECT * FROM test1 ORDER BY time ASC NULLS LAST, x3 DESC NULLS FIRST, x4 DESC N :PREFIX SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC; +set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows +set enable_hashagg to off; -- different on PG13 :PREFIX SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10; +reset max_parallel_workers_per_gather; +reset enable_hashagg; :PREFIX SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10;