diff --git a/tsl/src/chunkwise_agg.c b/tsl/src/chunkwise_agg.c index 0bc302ea792..5d319e5f4b2 100644 --- a/tsl/src/chunkwise_agg.c +++ b/tsl/src/chunkwise_agg.c @@ -56,35 +56,75 @@ get_existing_agg_path(const RelOptInfo *relation) /* * Get all subpaths from a Append, MergeAppend, or ChunkAppend path */ -static List * -get_subpaths_from_append_path(Path *path, bool handle_gather_path) +static void +get_subpaths_from_append_path(Path *path, List **subpaths, Path **append, Path **gather) { if (IsA(path, AppendPath)) { AppendPath *append_path = castNode(AppendPath, path); - return append_path->subpaths; + *subpaths = append_path->subpaths; + *append = path; + return; } - else if (IsA(path, MergeAppendPath)) + + if (IsA(path, MergeAppendPath)) { MergeAppendPath *merge_append_path = castNode(MergeAppendPath, path); - return merge_append_path->subpaths; + *subpaths = merge_append_path->subpaths; + *append = path; + return; } - else if (ts_is_chunk_append_path(path)) + + if (ts_is_chunk_append_path(path)) { CustomPath *custom_path = castNode(CustomPath, path); - return custom_path->custom_paths; + *subpaths = custom_path->custom_paths; + *append = path; + return; } - else if (handle_gather_path && IsA(path, GatherPath)) + + if (IsA(path, GatherPath)) { - return get_subpaths_from_append_path(castNode(GatherPath, path)->subpath, false); + *gather = path; + get_subpaths_from_append_path(castNode(GatherPath, path)->subpath, + subpaths, + append, + /* gather = */ NULL); + return; } - else if (IsA(path, ProjectionPath)) + + if (IsA(path, GatherMergePath)) + { + *gather = path; + get_subpaths_from_append_path(castNode(GatherMergePath, path)->subpath, + subpaths, + append, + /* gather = */ NULL); + return; + } + + if (IsA(path, SortPath)) { - return get_subpaths_from_append_path(castNode(ProjectionPath, path)->subpath, false); + /* Can see GatherMerge -> Sort -> Partial HashAggregate in parallel plans. */ + get_subpaths_from_append_path(castNode(SortPath, path)->subpath, subpaths, append, gather); + return; + } + + if (IsA(path, AggPath)) + { + /* Can see GatherMerge -> Sort -> Partial HashAggregate in parallel plans. */ + get_subpaths_from_append_path(castNode(AggPath, path)->subpath, subpaths, append, gather); + return; + } + + if (IsA(path, ProjectionPath)) + { + ProjectionPath *projection = castNode(ProjectionPath, path); + get_subpaths_from_append_path(projection->subpath, subpaths, append, gather); + return; } /* Aggregation push-down is not supported for other path types so far */ - return NIL; } /* @@ -310,6 +350,9 @@ add_partially_aggregated_subpaths(PlannerInfo *root, PathTarget *input_target, * The generated paths contain partial aggregations (created by using AGGSPLIT_INITIAL_SERIAL). * These aggregations need to be finished by the caller by adding a node that performs the * AGGSPLIT_FINAL_DESERIAL step. + * + * The original path can be either parallel or non-parallel aggregation, and the + * resulting path will be parallel accordingly. */ static void generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptInfo *input_rel, @@ -319,11 +362,18 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI GroupPathExtraData *extra_data) { /* Get subpaths */ - List *subpaths = get_subpaths_from_append_path(cheapest_total_path, false); + List *subpaths = NIL; + Path *top_gather = NULL; + Path *top_append = NULL; + get_subpaths_from_append_path(cheapest_total_path, &subpaths, &top_append, &top_gather); /* No subpaths available or unsupported append node */ if (subpaths == NIL) + { return; + } + + Assert(top_append != NULL); if (list_length(subpaths) < 2) { @@ -350,17 +400,24 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI * In this case, the partial aggregation needs to be pushed down below the lower * append path. */ - List *subsubpaths = get_subpaths_from_append_path(subpath, false); - - if (subsubpaths != NIL) + List *partially_compressed_paths = NIL; + Path *partially_compressed_append = NULL; + Path *partially_compressed_gather = NULL; + get_subpaths_from_append_path(subpath, + &partially_compressed_paths, + &partially_compressed_append, + &partially_compressed_gather); + Assert(partially_compressed_gather == NULL); + + if (partially_compressed_append != NULL) { - List *sorted_subsubpaths = NIL; - List *hashed_subsubpaths = NIL; + List *partially_compressed_sorted = NIL; + List *partially_compressed_hashed = NIL; ListCell *lc2; - foreach (lc2, subsubpaths) + foreach (lc2, partially_compressed_paths) { - Path *subsubpath = lfirst(lc2); + Path *partially_compressed_path = lfirst(lc2); add_partially_aggregated_subpaths(root, input_rel->reltarget, @@ -369,17 +426,17 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI extra_data, can_sort, can_hash, - subsubpath, - &sorted_subsubpaths /* Result path */, - &hashed_subsubpaths /* Result path */); + partially_compressed_path, + &partially_compressed_sorted /* Result path */, + &partially_compressed_hashed /* Result path */); } if (can_sort) { sorted_subpaths = lappend(sorted_subpaths, copy_append_like_path(root, - subpath, - sorted_subsubpaths, + partially_compressed_append, + partially_compressed_sorted, subpath->pathtarget)); } @@ -387,8 +444,8 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI { hashed_subpaths = lappend(hashed_subpaths, copy_append_like_path(root, - subpath, - hashed_subsubpaths, + partially_compressed_append, + partially_compressed_hashed, subpath->pathtarget)); } } @@ -408,141 +465,71 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI } /* Create new append paths */ - if (sorted_subpaths != NIL) - { - add_path(partially_grouped_rel, - copy_append_like_path(root, - cheapest_total_path, - sorted_subpaths, - partial_grouping_target)); - } - - if (hashed_subpaths != NIL) - { - add_path(partially_grouped_rel, - copy_append_like_path(root, - cheapest_total_path, - hashed_subpaths, - partial_grouping_target)); - } -} - -/* - * Generate a partial aggregation path for chunk-wise partial aggregations. - - * This function does almost the same as generate_agg_pushdown_path(). In contrast, it processes a - * partial_path (paths that are usually used in parallel plans) of the input relation, pushes down - * the aggregation in this path and adds a gather node on top of the partial plan. Therefore, the - * push-down of the partial aggregates also works in parallel plans. - * - * Note: The PostgreSQL terminology can cause some confusion here. Partial paths are usually used by - * PostgreSQL to distribute work between parallel workers. This has nothing to do with the partial - * aggregation we are creating in the function. - */ -static void -generate_partial_agg_pushdown_path(PlannerInfo *root, Path *cheapest_partial_path, - RelOptInfo *input_rel, RelOptInfo *output_rel, - RelOptInfo *partially_grouped_rel, PathTarget *grouping_target, - PathTarget *partial_grouping_target, bool can_sort, - bool can_hash, double d_num_groups, - GroupPathExtraData *extra_data) -{ - /* Get subpaths */ - List *subpaths = get_subpaths_from_append_path(cheapest_partial_path, false); - - /* No subpaths available or unsupported append node */ - if (subpaths == NIL) - return; - - if (list_length(subpaths) < 2) + if (top_gather == NULL) { /* - * Doesn't make sense to add per-chunk aggregation paths if there's - * only one chunk. + * The original aggregation plan was non-parallel, so we're creating a + * non-parallel plan as well. */ - return; - } - /* Generate agg paths on top of the append children */ - ListCell *lc; - List *sorted_subpaths = NIL; - List *hashed_subpaths = NIL; - - foreach (lc, subpaths) - { - Path *subpath = lfirst(lc); - - Assert(subpath->parallel_safe); - - /* There should be no nested append paths in the partial paths to construct the upper - * relation */ - Assert(get_subpaths_from_append_path(subpath, false) == NIL); - - add_partially_aggregated_subpaths(root, - input_rel->reltarget, - partial_grouping_target, - d_num_groups, - extra_data, - can_sort, - can_hash, - subpath, - &sorted_subpaths /* Result paths */, - &hashed_subpaths /* Result paths */); - } - - /* Create new append paths */ - if (sorted_subpaths != NIL) - { - add_partial_path(partially_grouped_rel, - copy_append_like_path(root, - cheapest_partial_path, - sorted_subpaths, - partial_grouping_target)); - } + if (sorted_subpaths != NIL) + { + add_path(partially_grouped_rel, + copy_append_like_path(root, + top_append, + sorted_subpaths, + partial_grouping_target)); + } - if (hashed_subpaths != NIL) - { - add_partial_path(partially_grouped_rel, - copy_append_like_path(root, - cheapest_partial_path, - hashed_subpaths, - partial_grouping_target)); + if (hashed_subpaths != NIL) + { + add_path(partially_grouped_rel, + copy_append_like_path(root, + top_append, + hashed_subpaths, + partial_grouping_target)); + } } - - /* Finish the partial paths (just added by add_partial_path to partially_grouped_rel in this - * function) by adding a gather node and add this path to the partially_grouped_rel using - * add_path). */ - foreach (lc, partially_grouped_rel->partial_pathlist) + else { - Path *append_path = lfirst(lc); - double total_groups = append_path->rows * append_path->parallel_workers; - - Path *gather_path = (Path *) create_gather_path(root, - partially_grouped_rel, - append_path, - partially_grouped_rel->reltarget, - NULL, - &total_groups); - add_path(partially_grouped_rel, gather_path); - } -} + /* + * The cheapest aggregation plan was parallel, so we're creating a + * parallel plan as well. + */ + if (sorted_subpaths != NIL) + { + add_partial_path(partially_grouped_rel, + copy_append_like_path(root, + top_append, + sorted_subpaths, + partial_grouping_target)); + } -/* - * Get the best total path for aggregation. Prefer chunk append paths if we have one, otherwise - * return the cheapest_total_path; - */ -static Path * -get_best_total_path(RelOptInfo *output_rel) -{ - ListCell *lc; - foreach (lc, output_rel->pathlist) - { - Path *path = lfirst(lc); + if (hashed_subpaths != NIL) + { + add_partial_path(partially_grouped_rel, + copy_append_like_path(root, + top_append, + hashed_subpaths, + partial_grouping_target)); + } - if (ts_is_chunk_append_path(path)) - return path; + /* Finish the partial paths (just added by add_partial_path to partially_grouped_rel in this + * function) by adding a gather node and add this path to the partially_grouped_rel using + * add_path). */ + foreach (lc, partially_grouped_rel->partial_pathlist) + { + Path *append_path = lfirst(lc); + double total_groups = append_path->rows * append_path->parallel_workers; + + Path *gather_path = (Path *) create_gather_path(root, + partially_grouped_rel, + append_path, + partially_grouped_rel->reltarget, + NULL, + &total_groups); + add_path(partially_grouped_rel, (Path *) gather_path); + } } - - return output_rel->cheapest_total_path; } /* @@ -563,7 +550,10 @@ is_path_sorted_or_plain_agg_path(Path *path) static bool contains_path_plain_or_sorted_agg(Path *path) { - List *subpaths = get_subpaths_from_append_path(path, true); + List *subpaths = NIL; + Path *append = NULL; + Path *gather = NULL; + get_subpaths_from_append_path(path, &subpaths, &append, &gather); Ensure(subpaths != NIL, "Unable to determine aggregation type"); @@ -642,7 +632,7 @@ tsl_pushdown_partial_agg(PlannerInfo *root, Hypertable *ht, RelOptInfo *input_re return; /* Is sorting possible ? */ - bool can_sort = grouping_is_sortable(parse->groupClause) && ts_guc_enable_chunkwise_aggregation; + bool can_sort = grouping_is_sortable(parse->groupClause); /* Is hashing possible ? */ bool can_hash = grouping_is_hashable(parse->groupClause) && @@ -654,7 +644,9 @@ tsl_pushdown_partial_agg(PlannerInfo *root, Hypertable *ht, RelOptInfo *input_re /* Determine the number of groups from the already planned aggregation */ AggPath *existing_agg_path = get_existing_agg_path(output_rel); if (existing_agg_path == NULL) + { return; + } /* Skip partial aggregations already created by _timescaledb_functions.partialize_agg */ if (existing_agg_path->aggsplit == AGGSPLIT_INITIAL_SERIAL) @@ -671,6 +663,7 @@ tsl_pushdown_partial_agg(PlannerInfo *root, Hypertable *ht, RelOptInfo *input_re RelOptInfo *partially_grouped_rel = fetch_upper_rel(root, UPPERREL_PARTIAL_GROUP_AGG, input_rel->relids); partially_grouped_rel->consider_parallel = input_rel->consider_parallel; + partially_grouped_rel->consider_startup = input_rel->consider_startup; partially_grouped_rel->reloptkind = input_rel->reloptkind; partially_grouped_rel->serverid = input_rel->serverid; partially_grouped_rel->userid = input_rel->userid; @@ -698,36 +691,36 @@ tsl_pushdown_partial_agg(PlannerInfo *root, Hypertable *ht, RelOptInfo *input_re extra_data->partial_costs_set = true; } - /* Generate the aggregation pushdown path */ - Path *cheapest_total_path = get_best_total_path(input_rel); - Assert(cheapest_total_path != NULL); - generate_agg_pushdown_path(root, - cheapest_total_path, - input_rel, - output_rel, - partially_grouped_rel, - grouping_target, - partial_grouping_target, - can_sort, - can_hash, - d_num_groups, - extra_data); - - /* The same as above but for partial paths */ - if (input_rel->partial_pathlist != NIL && input_rel->consider_parallel) + /* + * For queries with LIMIT, the aggregated relation can have a path with low + * total cost, and a path with low startup cost. We must partialize both, so + * loop through the entire pathlist. + */ + ListCell *lc; + foreach (lc, output_rel->pathlist) { - Path *cheapest_partial_path = linitial(input_rel->partial_pathlist); - generate_partial_agg_pushdown_path(root, - cheapest_partial_path, - input_rel, - output_rel, - partially_grouped_rel, - grouping_target, - partial_grouping_target, - can_sort, - can_hash, - d_num_groups, - extra_data); + Node *path = lfirst(lc); + if (!IsA(path, AggPath)) + { + /* + * Shouldn't happen, but here we work with arbitrary paths we don't + * control, so it's not an assertion. + */ + continue; + } + + /* Generate the aggregation pushdown path */ + generate_agg_pushdown_path(root, + (Path *) path, + input_rel, + output_rel, + partially_grouped_rel, + grouping_target, + partial_grouping_target, + can_sort, + can_hash, + d_num_groups, + extra_data); } /* Replan aggregation if we were able to generate partially grouped rel paths */ @@ -741,7 +734,6 @@ tsl_pushdown_partial_agg(PlannerInfo *root, Hypertable *ht, RelOptInfo *input_re /* Finalize the created partially aggregated paths by adding a 'Finalize Aggregate' node on top * of them. */ AggClauseCosts *agg_final_costs = &extra_data->agg_final_costs; - ListCell *lc; foreach (lc, partially_grouped_rel->pathlist) { Path *append_path = lfirst(lc); diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index c8c204b5b8d..210b3f322a2 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -409,6 +409,10 @@ has_vector_agg_node(Plan *plan, bool *has_normal_agg) { append_plans = castNode(Append, plan)->appendplans; } + if (IsA(plan, MergeAppend)) + { + append_plans = castNode(MergeAppend, plan)->mergeplans; + } else if (IsA(plan, CustomScan)) { custom = castNode(CustomScan, plan); @@ -471,6 +475,10 @@ try_insert_vector_agg_node(Plan *plan) { append_plans = castNode(Append, plan)->appendplans; } + else if (IsA(plan, MergeAppend)) + { + append_plans = castNode(MergeAppend, plan)->mergeplans; + } else if (IsA(plan, CustomScan)) { CustomScan *custom = castNode(CustomScan, plan); diff --git a/tsl/test/expected/cagg_union_view-14.out b/tsl/test/expected/cagg_union_view-14.out index d352ee34b7b..d53b0825a20 100644 --- a/tsl/test/expected/cagg_union_view-14.out +++ b/tsl/test/expected/cagg_union_view-14.out @@ -355,31 +355,29 @@ SELECT _timescaledb_functions.cagg_watermark(:boundary_view_id); -- first UNION child should have no rows because no materialization has happened yet and 2nd child should have 4 rows :PREFIX SELECT * FROM boundary_view; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (actual rows=4 loops=1) Group Key: (time_bucket(10, _hyper_5_5_chunk."time")) - -> Sort (actual rows=4 loops=1) + -> Merge Append (actual rows=4 loops=1) Sort Key: (time_bucket(10, _hyper_5_5_chunk."time")) - Sort Method: quicksort - -> Append (actual rows=4 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_5_chunk."time") - -> Index Scan Backward using _hyper_5_5_chunk_boundary_test_time_idx on _hyper_5_5_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_6_chunk."time") - -> Index Scan Backward using _hyper_5_6_chunk_boundary_test_time_idx on _hyper_5_6_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_7_chunk."time") - -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_8_chunk."time") - -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) -(22 rows) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_5_chunk."time") + -> Index Scan Backward using _hyper_5_5_chunk_boundary_test_time_idx on _hyper_5_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_6_chunk."time") + -> Index Scan Backward using _hyper_5_6_chunk_boundary_test_time_idx on _hyper_5_6_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_7_chunk."time") + -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_8_chunk."time") + -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) +(20 rows) -- result should have 4 rows SELECT * FROM boundary_view ORDER BY time_bucket; @@ -403,26 +401,24 @@ SELECT _timescaledb_functions.cagg_watermark(:boundary_view_id); -- both sides of the UNION should return 2 rows :PREFIX SELECT * FROM boundary_view; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- Append (actual rows=4 loops=1) -> Index Scan using _hyper_6_9_chunk__materialized_hypertable_6_time_bucket_idx on _hyper_6_9_chunk (actual rows=2 loops=1) Index Cond: (time_bucket < 30) -> Finalize GroupAggregate (actual rows=2 loops=1) Group Key: (time_bucket(10, _hyper_5_7_chunk."time")) - -> Sort (actual rows=2 loops=1) + -> Merge Append (actual rows=2 loops=1) Sort Key: (time_bucket(10, _hyper_5_7_chunk."time")) - Sort Method: quicksort - -> Append (actual rows=2 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_7_chunk."time") - -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= 30) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_8_chunk."time") - -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= 30) -(17 rows) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_7_chunk."time") + -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= 30) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_8_chunk."time") + -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= 30) +(15 rows) -- result should have 4 rows SELECT * FROM boundary_view ORDER BY time_bucket; @@ -592,8 +588,8 @@ ORDER by 1; -- plan output :PREFIX SELECT * FROM mat_m1 ORDER BY 1; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- Sort (actual rows=3 loops=1) Sort Key: _hyper_9_15_chunk.time_bucket Sort Method: quicksort @@ -604,27 +600,25 @@ ORDER by 1; Group Key: (time_bucket(5, _hyper_7_11_chunk.a)) Filter: ((sum(_hyper_7_11_chunk.c) > 50) AND ((avg(_hyper_7_11_chunk.b))::integer > 12)) Rows Removed by Filter: 1 - -> Sort (actual rows=3 loops=1) + -> Merge Append (actual rows=3 loops=1) Sort Key: (time_bucket(5, _hyper_7_11_chunk.a)) - Sort Method: quicksort - -> Append (actual rows=3 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(5, _hyper_7_11_chunk.a) - -> Index Scan Backward using _hyper_7_11_chunk_ht_intdata_a_idx on _hyper_7_11_chunk (actual rows=2 loops=1) - Index Cond: (a >= 25) - Filter: ((b < 16) AND (c > 20)) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(5, _hyper_7_13_chunk.a) - -> Index Scan Backward using _hyper_7_13_chunk_ht_intdata_a_idx on _hyper_7_13_chunk (actual rows=3 loops=1) - Index Cond: (a >= 25) - Filter: ((b < 16) AND (c > 20)) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(5, _hyper_7_14_chunk.a) - -> Index Scan Backward using _hyper_7_14_chunk_ht_intdata_a_idx on _hyper_7_14_chunk (actual rows=1 loops=1) - Index Cond: (a >= 25) - Filter: ((b < 16) AND (c > 20)) - Rows Removed by Filter: 2 -(30 rows) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(5, _hyper_7_11_chunk.a) + -> Index Scan Backward using _hyper_7_11_chunk_ht_intdata_a_idx on _hyper_7_11_chunk (actual rows=2 loops=1) + Index Cond: (a >= 25) + Filter: ((b < 16) AND (c > 20)) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(5, _hyper_7_13_chunk.a) + -> Index Scan Backward using _hyper_7_13_chunk_ht_intdata_a_idx on _hyper_7_13_chunk (actual rows=3 loops=1) + Index Cond: (a >= 25) + Filter: ((b < 16) AND (c > 20)) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(5, _hyper_7_14_chunk.a) + -> Index Scan Backward using _hyper_7_14_chunk_ht_intdata_a_idx on _hyper_7_14_chunk (actual rows=1 loops=1) + Index Cond: (a >= 25) + Filter: ((b < 16) AND (c > 20)) + Rows Removed by Filter: 2 +(28 rows) -- Test caggs with different time types CREATE TABLE smallint_table (time smallint, value int); diff --git a/tsl/test/expected/cagg_union_view-15.out b/tsl/test/expected/cagg_union_view-15.out index d352ee34b7b..d53b0825a20 100644 --- a/tsl/test/expected/cagg_union_view-15.out +++ b/tsl/test/expected/cagg_union_view-15.out @@ -355,31 +355,29 @@ SELECT _timescaledb_functions.cagg_watermark(:boundary_view_id); -- first UNION child should have no rows because no materialization has happened yet and 2nd child should have 4 rows :PREFIX SELECT * FROM boundary_view; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (actual rows=4 loops=1) Group Key: (time_bucket(10, _hyper_5_5_chunk."time")) - -> Sort (actual rows=4 loops=1) + -> Merge Append (actual rows=4 loops=1) Sort Key: (time_bucket(10, _hyper_5_5_chunk."time")) - Sort Method: quicksort - -> Append (actual rows=4 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_5_chunk."time") - -> Index Scan Backward using _hyper_5_5_chunk_boundary_test_time_idx on _hyper_5_5_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_6_chunk."time") - -> Index Scan Backward using _hyper_5_6_chunk_boundary_test_time_idx on _hyper_5_6_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_7_chunk."time") - -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_8_chunk."time") - -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) -(22 rows) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_5_chunk."time") + -> Index Scan Backward using _hyper_5_5_chunk_boundary_test_time_idx on _hyper_5_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_6_chunk."time") + -> Index Scan Backward using _hyper_5_6_chunk_boundary_test_time_idx on _hyper_5_6_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_7_chunk."time") + -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_8_chunk."time") + -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) +(20 rows) -- result should have 4 rows SELECT * FROM boundary_view ORDER BY time_bucket; @@ -403,26 +401,24 @@ SELECT _timescaledb_functions.cagg_watermark(:boundary_view_id); -- both sides of the UNION should return 2 rows :PREFIX SELECT * FROM boundary_view; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- Append (actual rows=4 loops=1) -> Index Scan using _hyper_6_9_chunk__materialized_hypertable_6_time_bucket_idx on _hyper_6_9_chunk (actual rows=2 loops=1) Index Cond: (time_bucket < 30) -> Finalize GroupAggregate (actual rows=2 loops=1) Group Key: (time_bucket(10, _hyper_5_7_chunk."time")) - -> Sort (actual rows=2 loops=1) + -> Merge Append (actual rows=2 loops=1) Sort Key: (time_bucket(10, _hyper_5_7_chunk."time")) - Sort Method: quicksort - -> Append (actual rows=2 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_7_chunk."time") - -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= 30) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_8_chunk."time") - -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= 30) -(17 rows) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_7_chunk."time") + -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= 30) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_8_chunk."time") + -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= 30) +(15 rows) -- result should have 4 rows SELECT * FROM boundary_view ORDER BY time_bucket; @@ -592,8 +588,8 @@ ORDER by 1; -- plan output :PREFIX SELECT * FROM mat_m1 ORDER BY 1; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- Sort (actual rows=3 loops=1) Sort Key: _hyper_9_15_chunk.time_bucket Sort Method: quicksort @@ -604,27 +600,25 @@ ORDER by 1; Group Key: (time_bucket(5, _hyper_7_11_chunk.a)) Filter: ((sum(_hyper_7_11_chunk.c) > 50) AND ((avg(_hyper_7_11_chunk.b))::integer > 12)) Rows Removed by Filter: 1 - -> Sort (actual rows=3 loops=1) + -> Merge Append (actual rows=3 loops=1) Sort Key: (time_bucket(5, _hyper_7_11_chunk.a)) - Sort Method: quicksort - -> Append (actual rows=3 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(5, _hyper_7_11_chunk.a) - -> Index Scan Backward using _hyper_7_11_chunk_ht_intdata_a_idx on _hyper_7_11_chunk (actual rows=2 loops=1) - Index Cond: (a >= 25) - Filter: ((b < 16) AND (c > 20)) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(5, _hyper_7_13_chunk.a) - -> Index Scan Backward using _hyper_7_13_chunk_ht_intdata_a_idx on _hyper_7_13_chunk (actual rows=3 loops=1) - Index Cond: (a >= 25) - Filter: ((b < 16) AND (c > 20)) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(5, _hyper_7_14_chunk.a) - -> Index Scan Backward using _hyper_7_14_chunk_ht_intdata_a_idx on _hyper_7_14_chunk (actual rows=1 loops=1) - Index Cond: (a >= 25) - Filter: ((b < 16) AND (c > 20)) - Rows Removed by Filter: 2 -(30 rows) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(5, _hyper_7_11_chunk.a) + -> Index Scan Backward using _hyper_7_11_chunk_ht_intdata_a_idx on _hyper_7_11_chunk (actual rows=2 loops=1) + Index Cond: (a >= 25) + Filter: ((b < 16) AND (c > 20)) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(5, _hyper_7_13_chunk.a) + -> Index Scan Backward using _hyper_7_13_chunk_ht_intdata_a_idx on _hyper_7_13_chunk (actual rows=3 loops=1) + Index Cond: (a >= 25) + Filter: ((b < 16) AND (c > 20)) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(5, _hyper_7_14_chunk.a) + -> Index Scan Backward using _hyper_7_14_chunk_ht_intdata_a_idx on _hyper_7_14_chunk (actual rows=1 loops=1) + Index Cond: (a >= 25) + Filter: ((b < 16) AND (c > 20)) + Rows Removed by Filter: 2 +(28 rows) -- Test caggs with different time types CREATE TABLE smallint_table (time smallint, value int); diff --git a/tsl/test/expected/cagg_union_view-16.out b/tsl/test/expected/cagg_union_view-16.out index 436a465c123..82256a15725 100644 --- a/tsl/test/expected/cagg_union_view-16.out +++ b/tsl/test/expected/cagg_union_view-16.out @@ -355,31 +355,29 @@ SELECT _timescaledb_functions.cagg_watermark(:boundary_view_id); -- first UNION child should have no rows because no materialization has happened yet and 2nd child should have 4 rows :PREFIX SELECT * FROM boundary_view; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (actual rows=4 loops=1) Group Key: (time_bucket(10, _hyper_5_5_chunk."time")) - -> Sort (actual rows=4 loops=1) + -> Merge Append (actual rows=4 loops=1) Sort Key: (time_bucket(10, _hyper_5_5_chunk."time")) - Sort Method: quicksort - -> Append (actual rows=4 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_5_chunk."time") - -> Index Scan Backward using _hyper_5_5_chunk_boundary_test_time_idx on _hyper_5_5_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_6_chunk."time") - -> Index Scan Backward using _hyper_5_6_chunk_boundary_test_time_idx on _hyper_5_6_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_7_chunk."time") - -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_8_chunk."time") - -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) -(22 rows) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_5_chunk."time") + -> Index Scan Backward using _hyper_5_5_chunk_boundary_test_time_idx on _hyper_5_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_6_chunk."time") + -> Index Scan Backward using _hyper_5_6_chunk_boundary_test_time_idx on _hyper_5_6_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_7_chunk."time") + -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_8_chunk."time") + -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) +(20 rows) -- result should have 4 rows SELECT * FROM boundary_view ORDER BY time_bucket; @@ -403,26 +401,24 @@ SELECT _timescaledb_functions.cagg_watermark(:boundary_view_id); -- both sides of the UNION should return 2 rows :PREFIX SELECT * FROM boundary_view; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- Append (actual rows=4 loops=1) -> Index Scan using _hyper_6_9_chunk__materialized_hypertable_6_time_bucket_idx on _hyper_6_9_chunk (actual rows=2 loops=1) Index Cond: (time_bucket < 30) -> Finalize GroupAggregate (actual rows=2 loops=1) Group Key: (time_bucket(10, _hyper_5_7_chunk."time")) - -> Sort (actual rows=2 loops=1) + -> Merge Append (actual rows=2 loops=1) Sort Key: (time_bucket(10, _hyper_5_7_chunk."time")) - Sort Method: quicksort - -> Append (actual rows=2 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_7_chunk."time") - -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= 30) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_8_chunk."time") - -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= 30) -(17 rows) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_7_chunk."time") + -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= 30) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_8_chunk."time") + -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= 30) +(15 rows) -- result should have 4 rows SELECT * FROM boundary_view ORDER BY time_bucket; diff --git a/tsl/test/expected/cagg_union_view-17.out b/tsl/test/expected/cagg_union_view-17.out index 436a465c123..82256a15725 100644 --- a/tsl/test/expected/cagg_union_view-17.out +++ b/tsl/test/expected/cagg_union_view-17.out @@ -355,31 +355,29 @@ SELECT _timescaledb_functions.cagg_watermark(:boundary_view_id); -- first UNION child should have no rows because no materialization has happened yet and 2nd child should have 4 rows :PREFIX SELECT * FROM boundary_view; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (actual rows=4 loops=1) Group Key: (time_bucket(10, _hyper_5_5_chunk."time")) - -> Sort (actual rows=4 loops=1) + -> Merge Append (actual rows=4 loops=1) Sort Key: (time_bucket(10, _hyper_5_5_chunk."time")) - Sort Method: quicksort - -> Append (actual rows=4 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_5_chunk."time") - -> Index Scan Backward using _hyper_5_5_chunk_boundary_test_time_idx on _hyper_5_5_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_6_chunk."time") - -> Index Scan Backward using _hyper_5_6_chunk_boundary_test_time_idx on _hyper_5_6_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_7_chunk."time") - -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_8_chunk."time") - -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= '-2147483648'::integer) -(22 rows) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_5_chunk."time") + -> Index Scan Backward using _hyper_5_5_chunk_boundary_test_time_idx on _hyper_5_5_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_6_chunk."time") + -> Index Scan Backward using _hyper_5_6_chunk_boundary_test_time_idx on _hyper_5_6_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_7_chunk."time") + -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_8_chunk."time") + -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= '-2147483648'::integer) +(20 rows) -- result should have 4 rows SELECT * FROM boundary_view ORDER BY time_bucket; @@ -403,26 +401,24 @@ SELECT _timescaledb_functions.cagg_watermark(:boundary_view_id); -- both sides of the UNION should return 2 rows :PREFIX SELECT * FROM boundary_view; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------- Append (actual rows=4 loops=1) -> Index Scan using _hyper_6_9_chunk__materialized_hypertable_6_time_bucket_idx on _hyper_6_9_chunk (actual rows=2 loops=1) Index Cond: (time_bucket < 30) -> Finalize GroupAggregate (actual rows=2 loops=1) Group Key: (time_bucket(10, _hyper_5_7_chunk."time")) - -> Sort (actual rows=2 loops=1) + -> Merge Append (actual rows=2 loops=1) Sort Key: (time_bucket(10, _hyper_5_7_chunk."time")) - Sort Method: quicksort - -> Append (actual rows=2 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_7_chunk."time") - -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= 30) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: time_bucket(10, _hyper_5_8_chunk."time") - -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) - Index Cond: ("time" >= 30) -(17 rows) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_7_chunk."time") + -> Index Scan Backward using _hyper_5_7_chunk_boundary_test_time_idx on _hyper_5_7_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= 30) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: time_bucket(10, _hyper_5_8_chunk."time") + -> Index Scan Backward using _hyper_5_8_chunk_boundary_test_time_idx on _hyper_5_8_chunk (actual rows=1 loops=1) + Index Cond: ("time" >= 30) +(15 rows) -- result should have 4 rows SELECT * FROM boundary_view ORDER BY time_bucket; diff --git a/tsl/test/expected/compression_ddl.out b/tsl/test/expected/compression_ddl.out index ecbbdc8828b..edb7008c317 100644 --- a/tsl/test/expected/compression_ddl.out +++ b/tsl/test/expected/compression_ddl.out @@ -1444,20 +1444,19 @@ EXPLAIN (costs off) SELECT device_id, count(*) FROM compression_insert GROUP BY device_id ORDER BY device_id; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate Group Key: _hyper_31_110_chunk.device_id - -> Sort + -> Merge Append Sort Key: _hyper_31_110_chunk.device_id - -> Append - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk - -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_110_chunk.device_id - -> Index Only Scan using _hyper_31_110_chunk_compression_insert_device_id_time_idx on _hyper_31_110_chunk -(11 rows) + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk + -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk + -> Partial GroupAggregate + Group Key: _hyper_31_110_chunk.device_id + -> Index Only Scan using _hyper_31_110_chunk_compression_insert_device_id_time_idx on _hyper_31_110_chunk +(10 rows) SELECT device_id, count(*) FROM compression_insert @@ -1528,23 +1527,22 @@ EXPLAIN (costs off) SELECT device_id, count(*) FROM compression_insert GROUP BY device_id ORDER BY device_id; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate Group Key: _hyper_31_110_chunk.device_id - -> Sort + -> Merge Append Sort Key: _hyper_31_110_chunk.device_id - -> Append - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk - -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk - -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_112_chunk.device_id - -> Index Only Scan using _hyper_31_112_chunk_compression_insert_device_id_time_idx on _hyper_31_112_chunk -(14 rows) + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk + -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk + -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk + -> Partial GroupAggregate + Group Key: _hyper_31_112_chunk.device_id + -> Index Only Scan using _hyper_31_112_chunk_compression_insert_device_id_time_idx on _hyper_31_112_chunk +(13 rows) SELECT device_id, count(*) FROM compression_insert @@ -1615,26 +1613,25 @@ EXPLAIN (costs off) SELECT device_id, count(*) FROM compression_insert GROUP BY device_id ORDER BY device_id; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate Group Key: _hyper_31_110_chunk.device_id - -> Sort + -> Merge Append Sort Key: _hyper_31_110_chunk.device_id - -> Append - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk - -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk - -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk - -> Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_115_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_114_chunk.device_id - -> Index Only Scan using _hyper_31_114_chunk_compression_insert_device_id_time_idx on _hyper_31_114_chunk -(17 rows) + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk + -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk + -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk + -> Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_115_chunk + -> Partial GroupAggregate + Group Key: _hyper_31_114_chunk.device_id + -> Index Only Scan using _hyper_31_114_chunk_compression_insert_device_id_time_idx on _hyper_31_114_chunk +(16 rows) SELECT device_id, count(*) FROM compression_insert @@ -1705,29 +1702,28 @@ EXPLAIN (costs off) SELECT device_id, count(*) FROM compression_insert GROUP BY device_id ORDER BY device_id; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate Group Key: _hyper_31_110_chunk.device_id - -> Sort + -> Merge Append Sort Key: _hyper_31_110_chunk.device_id - -> Append - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk - -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk - -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk - -> Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_115_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_116_chunk - -> Index Scan using compress_hyper_32_117_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_117_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_116_chunk.device_id - -> Index Only Scan using _hyper_31_116_chunk_compression_insert_device_id_time_idx on _hyper_31_116_chunk -(20 rows) + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk + -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk + -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk + -> Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_115_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_116_chunk + -> Index Scan using compress_hyper_32_117_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_117_chunk + -> Partial GroupAggregate + Group Key: _hyper_31_116_chunk.device_id + -> Index Only Scan using _hyper_31_116_chunk_compression_insert_device_id_time_idx on _hyper_31_116_chunk +(19 rows) SELECT device_id, count(*) FROM compression_insert @@ -1798,32 +1794,31 @@ EXPLAIN (costs off) SELECT device_id, count(*) FROM compression_insert GROUP BY device_id ORDER BY device_id; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate Group Key: _hyper_31_110_chunk.device_id - -> Sort + -> Merge Append Sort Key: _hyper_31_110_chunk.device_id - -> Append - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk - -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk - -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk - -> Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_115_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_116_chunk - -> Index Scan using compress_hyper_32_117_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_117_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_31_118_chunk - -> Index Scan using compress_hyper_32_119_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_119_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_118_chunk.device_id - -> Index Only Scan using _hyper_31_118_chunk_compression_insert_device_id_time_idx on _hyper_31_118_chunk -(23 rows) + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk + -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_111_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk + -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_113_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk + -> Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_115_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_116_chunk + -> Index Scan using compress_hyper_32_117_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_117_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_31_118_chunk + -> Index Scan using compress_hyper_32_119_chunk_device_id__ts_meta_min_1__ts_me_idx on compress_hyper_32_119_chunk + -> Partial GroupAggregate + Group Key: _hyper_31_118_chunk.device_id + -> Index Only Scan using _hyper_31_118_chunk_compression_insert_device_id_time_idx on _hyper_31_118_chunk +(22 rows) SELECT device_id, count(*) FROM compression_insert diff --git a/tsl/test/expected/continuous_aggs-14.out b/tsl/test/expected/continuous_aggs-14.out index 478825ae14f..8e2289908d9 100644 --- a/tsl/test/expected/continuous_aggs-14.out +++ b/tsl/test/expected/continuous_aggs-14.out @@ -2035,8 +2035,8 @@ SELECT * FROM mat_m1; -- Merge Append EXPLAIN (COSTS OFF) SELECT * FROM mat_m1; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ Merge Append Sort Key: _hyper_59_123_chunk.sum DESC -> Merge Append @@ -2049,18 +2049,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM mat_m1; Sort Key: (sum(_hyper_52_111_chunk.temperature)) DESC -> Finalize GroupAggregate Group Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Sort + -> Merge Append Sort Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Append - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) - -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) - -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) -(23 rows) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) + -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) + -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) +(22 rows) -- Ordering by another column SELECT * FROM mat_m1 ORDER BY count; @@ -2073,8 +2072,8 @@ SELECT * FROM mat_m1 ORDER BY count; (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM mat_m1 ORDER BY count; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ Sort Sort Key: _hyper_59_123_chunk.count -> Merge Append @@ -2089,18 +2088,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM mat_m1 ORDER BY count; Sort Key: (sum(_hyper_52_111_chunk.temperature)) DESC -> Finalize GroupAggregate Group Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Sort + -> Merge Append Sort Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Append - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) - -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) - -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) -(25 rows) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) + -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) + -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) +(24 rows) -- Change the type of cagg ALTER MATERIALIZED VIEW mat_m1 SET (timescaledb.materialized_only=true); diff --git a/tsl/test/expected/continuous_aggs-15.out b/tsl/test/expected/continuous_aggs-15.out index 478825ae14f..8e2289908d9 100644 --- a/tsl/test/expected/continuous_aggs-15.out +++ b/tsl/test/expected/continuous_aggs-15.out @@ -2035,8 +2035,8 @@ SELECT * FROM mat_m1; -- Merge Append EXPLAIN (COSTS OFF) SELECT * FROM mat_m1; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ Merge Append Sort Key: _hyper_59_123_chunk.sum DESC -> Merge Append @@ -2049,18 +2049,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM mat_m1; Sort Key: (sum(_hyper_52_111_chunk.temperature)) DESC -> Finalize GroupAggregate Group Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Sort + -> Merge Append Sort Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Append - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) - -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) - -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) -(23 rows) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) + -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) + -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) +(22 rows) -- Ordering by another column SELECT * FROM mat_m1 ORDER BY count; @@ -2073,8 +2072,8 @@ SELECT * FROM mat_m1 ORDER BY count; (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM mat_m1 ORDER BY count; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ Sort Sort Key: _hyper_59_123_chunk.count -> Merge Append @@ -2089,18 +2088,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM mat_m1 ORDER BY count; Sort Key: (sum(_hyper_52_111_chunk.temperature)) DESC -> Finalize GroupAggregate Group Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Sort + -> Merge Append Sort Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Append - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) - -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) - -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) -(25 rows) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) + -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) + -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) +(24 rows) -- Change the type of cagg ALTER MATERIALIZED VIEW mat_m1 SET (timescaledb.materialized_only=true); diff --git a/tsl/test/expected/continuous_aggs-16.out b/tsl/test/expected/continuous_aggs-16.out index 7a52c68c544..38804430ea2 100644 --- a/tsl/test/expected/continuous_aggs-16.out +++ b/tsl/test/expected/continuous_aggs-16.out @@ -2035,8 +2035,8 @@ SELECT * FROM mat_m1; -- Merge Append EXPLAIN (COSTS OFF) SELECT * FROM mat_m1; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ Merge Append Sort Key: _hyper_59_123_chunk.sum DESC -> Merge Append @@ -2049,18 +2049,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM mat_m1; Sort Key: (sum(_hyper_52_111_chunk.temperature)) DESC -> Finalize GroupAggregate Group Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Sort + -> Merge Append Sort Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Append - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) - -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) - -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) -(23 rows) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) + -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) + -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) +(22 rows) -- Ordering by another column SELECT * FROM mat_m1 ORDER BY count; @@ -2073,8 +2072,8 @@ SELECT * FROM mat_m1 ORDER BY count; (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM mat_m1 ORDER BY count; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ Sort Sort Key: _hyper_59_123_chunk.count -> Merge Append @@ -2089,18 +2088,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM mat_m1 ORDER BY count; Sort Key: (sum(_hyper_52_111_chunk.temperature)) DESC -> Finalize GroupAggregate Group Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Sort + -> Merge Append Sort Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Append - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) - -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) - -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) -(25 rows) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) + -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) + -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) +(24 rows) -- Change the type of cagg ALTER MATERIALIZED VIEW mat_m1 SET (timescaledb.materialized_only=true); diff --git a/tsl/test/expected/continuous_aggs-17.out b/tsl/test/expected/continuous_aggs-17.out index 7a52c68c544..38804430ea2 100644 --- a/tsl/test/expected/continuous_aggs-17.out +++ b/tsl/test/expected/continuous_aggs-17.out @@ -2035,8 +2035,8 @@ SELECT * FROM mat_m1; -- Merge Append EXPLAIN (COSTS OFF) SELECT * FROM mat_m1; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------ Merge Append Sort Key: _hyper_59_123_chunk.sum DESC -> Merge Append @@ -2049,18 +2049,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM mat_m1; Sort Key: (sum(_hyper_52_111_chunk.temperature)) DESC -> Finalize GroupAggregate Group Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Sort + -> Merge Append Sort Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Append - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) - -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) - -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) -(23 rows) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) + -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) + -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) +(22 rows) -- Ordering by another column SELECT * FROM mat_m1 ORDER BY count; @@ -2073,8 +2072,8 @@ SELECT * FROM mat_m1 ORDER BY count; (4 rows) EXPLAIN (COSTS OFF) SELECT * FROM mat_m1 ORDER BY count; - QUERY PLAN -------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------ Sort Sort Key: _hyper_59_123_chunk.count -> Merge Append @@ -2089,18 +2088,17 @@ EXPLAIN (COSTS OFF) SELECT * FROM mat_m1 ORDER BY count; Sort Key: (sum(_hyper_52_111_chunk.temperature)) DESC -> Finalize GroupAggregate Group Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Sort + -> Merge Append Sort Key: (time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec)) - -> Append - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) - -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) - -> Partial GroupAggregate - Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) - -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk - Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) -(25 rows) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_111_chunk.timec) + -> Index Scan Backward using _hyper_52_111_chunk_conditions_timec_idx on _hyper_52_111_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) + -> Partial GroupAggregate + Group Key: time_bucket('@ 7 days'::interval, _hyper_52_125_chunk.timec) + -> Index Scan Backward using _hyper_52_125_chunk_conditions_timec_idx on _hyper_52_125_chunk + Index Cond: (timec >= 'Sun Nov 04 16:00:00 2018 PST'::timestamp with time zone) +(24 rows) -- Change the type of cagg ALTER MATERIALIZED VIEW mat_m1 SET (timescaledb.materialized_only=true); diff --git a/tsl/test/expected/hypercore_index_btree.out b/tsl/test/expected/hypercore_index_btree.out index 805b4d219a0..87c64c873f6 100644 --- a/tsl/test/expected/hypercore_index_btree.out +++ b/tsl/test/expected/hypercore_index_btree.out @@ -111,6 +111,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2 -- Avoid parallel (index) scans to make test stable set max_parallel_workers_per_gather to 0; set enable_hashagg to off; +set timescaledb.enable_chunkwise_aggregation to off; -- Drop the device_id index and redefine it later with extra columns. drop index hypertable_device_id_idx; create view chunk_indexes as @@ -126,38 +127,25 @@ select explain_anonymize(format($$ select location_id, count(*) into orig from %s where location_id in (3,4,5) group by location_id $$, :'hypertable')); - explain_anonymize ---------------------------------------------------------------------------------------------------------------- - Finalize GroupAggregate + explain_anonymize +--------------------------------------------------------------------------------------------------- + GroupAggregate Group Key: _hyper_I_N_chunk.location_id - -> Sort + -> Merge Append Sort Key: _hyper_I_N_chunk.location_id - -> Append - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.location_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk - Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.location_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk - Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.location_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk - Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.location_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk - Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.location_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk - Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.location_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk - Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) -(29 rows) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk + Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk + Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk + Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk + Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk + Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk + Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) +(16 rows) select location_id, count(*) into orig from :hypertable where location_id in (3,4,5) group by location_id; @@ -194,38 +182,25 @@ select explain_anonymize(format($$ select owner_id, count(*) into owner_comp from %s where owner_id in (3,4,5) group by owner_id $$, :'hypertable')); - explain_anonymize ---------------------------------------------------------------------------------------------------------- - Finalize GroupAggregate + explain_anonymize +--------------------------------------------------------------------------------------------- + GroupAggregate Group Key: _hyper_I_N_chunk.owner_id - -> Sort + -> Merge Append Sort Key: _hyper_I_N_chunk.owner_id - -> Append - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.owner_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk - Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.owner_id - -> Index Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk - Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.owner_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk - Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.owner_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk - Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.owner_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk - Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.owner_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk - Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[])) -(29 rows) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk + Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[])) + -> Index Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk + Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk + Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk + Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk + Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk + Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[])) +(16 rows) select owner_id, count(*) into owner_comp from :hypertable where owner_id in (3,4,5) group by owner_id; @@ -266,38 +241,25 @@ select explain_anonymize(format($$ select location_id, count(*) into comp from %s where location_id in (3,4,5) group by location_id $$, :'hypertable')); - explain_anonymize ------------------------------------------------------------------------------------------------------------------------- - Finalize GroupAggregate + explain_anonymize +------------------------------------------------------------------------------------------------------------ + GroupAggregate Group Key: _hyper_I_N_chunk.location_id - -> Sort + -> Merge Append Sort Key: _hyper_I_N_chunk.location_id - -> Append - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.location_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk - Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.location_id - -> Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk - Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.location_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk - Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.location_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk - Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.location_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk - Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) - -> Partial GroupAggregate - Group Key: _hyper_I_N_chunk.location_id - -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk - Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) -(29 rows) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk + Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) + -> Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk + Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk + Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk + Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk + Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk + Index Cond: (location_id = ANY ('{3,4,5}'::integer[])) +(16 rows) -- result should be the same select location_id, count(*) into comp from :hypertable where location_id in (3,4,5) group by location_id; @@ -414,44 +376,28 @@ select explain_analyze_anonymize(format($$ select device_id, avg(temp) from %s where device_id between 10 and 20 group by device_id $$, :'hypertable')); - explain_analyze_anonymize --------------------------------------------------------------------------------------------------------------------------------- - Finalize GroupAggregate (actual rows=N loops=N) - -> Sort (actual rows=N loops=N) + explain_analyze_anonymize +-------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=N loops=N) + -> Merge Append (actual rows=N loops=N) Sort Key: _hyper_I_N_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=N loops=N) - -> Partial GroupAggregate (actual rows=N loops=N) - -> Sort (actual rows=N loops=N) - Sort Key: _hyper_I_N_chunk.device_id - Sort Method: quicksort - -> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N) - Vectorized Filter: ((device_id >= 10) AND (device_id <= 20)) - Rows Removed by Filter: 133 - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((device_id >= 10) AND (device_id <= 20)) - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((device_id >= 10) AND (device_id <= 20)) - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((device_id >= 10) AND (device_id <= 20)) - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((device_id >= 10) AND (device_id <= 20)) - -> Partial GroupAggregate (actual rows=N loops=N) - -> Sort (actual rows=N loops=N) - Sort Key: _hyper_I_N_chunk.device_id - Sort Method: quicksort - -> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N) - Vectorized Filter: ((device_id >= 10) AND (device_id <= 20)) - Rows Removed by Filter: 234 + -> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((device_id >= 10) AND (device_id <= 20)) + -> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((device_id >= 10) AND (device_id <= 20)) + -> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((device_id >= 10) AND (device_id <= 20)) + -> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((device_id >= 10) AND (device_id <= 20)) + -> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((device_id >= 10) AND (device_id <= 20)) + -> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((device_id >= 10) AND (device_id <= 20)) Array Cache Hits: N Array Cache Misses: N Array Cache Evictions: N - Array Decompressions: 578 -(35 rows) + Array Decompressions: 560 +(19 rows) select explain_analyze_anonymize(format($$ select device_id, avg(temp) from %s where device_id between 10 and 20 @@ -656,83 +602,62 @@ select explain_analyze_anonymize(format($$ select location_id, avg(humidity) from %s where location_id between 5 and 10 group by location_id order by location_id $$, :'hypertable')); - explain_analyze_anonymize ----------------------------------------------------------------------------------------------------------------------------------- - Finalize GroupAggregate (actual rows=N loops=N) - -> Sort (actual rows=N loops=N) + explain_analyze_anonymize +-------------------------------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=N loops=N) + -> Merge Append (actual rows=N loops=N) Sort Key: _hyper_I_N_chunk.location_id - Sort Method: quicksort - -> Append (actual rows=N loops=N) - -> Partial GroupAggregate (actual rows=N loops=N) - -> Sort (actual rows=N loops=N) - Sort Key: _hyper_I_N_chunk.location_id - Sort Method: quicksort - -> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N) - Scankey: ((location_id >= 5) AND (location_id <= 10)) - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((location_id >= 5) AND (location_id <= 10)) - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((location_id >= 5) AND (location_id <= 10)) - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((location_id >= 5) AND (location_id <= 10)) - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((location_id >= 5) AND (location_id <= 10)) - -> Partial GroupAggregate (actual rows=N loops=N) - -> Sort (actual rows=N loops=N) - Sort Key: _hyper_I_N_chunk.location_id - Sort Method: quicksort - -> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N) - Scankey: ((location_id >= 5) AND (location_id <= 10)) + -> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_include_humidity_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((location_id >= 5) AND (location_id <= 10)) + Heap Fetches: N + -> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((location_id >= 5) AND (location_id <= 10)) + -> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((location_id >= 5) AND (location_id <= 10)) + -> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((location_id >= 5) AND (location_id <= 10)) + -> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((location_id >= 5) AND (location_id <= 10)) + -> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((location_id >= 5) AND (location_id <= 10)) Array Cache Hits: N Array Cache Misses: N Array Cache Evictions: N - Array Decompressions: 180 -(33 rows) + Array Decompressions: 150 +(20 rows) select explain_analyze_anonymize(format($$ select device_id, avg(humidity) from %s where device_id between 5 and 10 group by device_id order by device_id $$, :'hypertable')); - explain_analyze_anonymize -------------------------------------------------------------------------------------------------------------------------------------- - Finalize GroupAggregate (actual rows=N loops=N) - -> Sort (actual rows=N loops=N) + explain_analyze_anonymize +------------------------------------------------------------------------------------------------------------------------- + GroupAggregate (actual rows=N loops=N) + -> Merge Append (actual rows=N loops=N) Sort Key: _hyper_I_N_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=N loops=N) - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((device_id >= 5) AND (device_id <= 10)) - Heap Fetches: N - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((device_id >= 5) AND (device_id <= 10)) - Heap Fetches: N - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((device_id >= 5) AND (device_id <= 10)) - Heap Fetches: N - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((device_id >= 5) AND (device_id <= 10)) - Heap Fetches: N - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((device_id >= 5) AND (device_id <= 10)) - Heap Fetches: N - -> Partial GroupAggregate (actual rows=N loops=N) - -> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) - Index Cond: ((device_id >= 5) AND (device_id <= 10)) - Heap Fetches: N + -> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((device_id >= 5) AND (device_id <= 10)) + Heap Fetches: N + -> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((device_id >= 5) AND (device_id <= 10)) + Heap Fetches: N + -> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((device_id >= 5) AND (device_id <= 10)) + Heap Fetches: N + -> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((device_id >= 5) AND (device_id <= 10)) + Heap Fetches: N + -> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((device_id >= 5) AND (device_id <= 10)) + Heap Fetches: N + -> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N) + Index Cond: ((device_id >= 5) AND (device_id <= 10)) + Heap Fetches: N Array Cache Hits: N Array Cache Misses: N Array Cache Evictions: N Array Decompressions: 0 -(33 rows) +(25 rows) select explain_analyze_anonymize(format($$ select location_id, avg(humidity) from %s where location_id between 5 and 10 diff --git a/tsl/test/expected/hypercore_scans.out b/tsl/test/expected/hypercore_scans.out index 7c4a9792c23..dd4f37f39d0 100644 --- a/tsl/test/expected/hypercore_scans.out +++ b/tsl/test/expected/hypercore_scans.out @@ -1,6 +1,8 @@ -- This file and its contents are licensed under the Timescale License. -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. +-- Avoid chunkwise aggregation to make the test stable +set timescaledb.enable_chunkwise_aggregation to off; create table readings(time timestamptz, location text, device int, @@ -222,43 +224,29 @@ select device, humidity from readings where device between 5 and 10; explain (analyze, costs off, timing off, summary off, decompress_cache_stats) select device, avg(humidity) from readings where device between 5 and 10 group by device; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------ - Finalize GroupAggregate (actual rows=6 loops=1) + QUERY PLAN +----------------------------------------------------------------------------------------------------------------- + HashAggregate (actual rows=6 loops=1) Group Key: _hyper_1_1_chunk.device - -> Sort (actual rows=36 loops=1) - Sort Key: _hyper_1_1_chunk.device - Sort Method: quicksort - -> Append (actual rows=36 loops=1) - -> Partial GroupAggregate (actual rows=6 loops=1) - Group Key: _hyper_1_1_chunk.device - -> Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=35 loops=1) - Index Cond: ((device >= 5) AND (device <= 10)) - -> Partial GroupAggregate (actual rows=6 loops=1) - Group Key: _hyper_1_2_chunk.device - -> Index Scan using _hyper_1_2_chunk_readings_device_idx on _hyper_1_2_chunk (actual rows=421 loops=1) - Index Cond: ((device >= 5) AND (device <= 10)) - -> Partial GroupAggregate (actual rows=6 loops=1) - Group Key: _hyper_1_3_chunk.device - -> Index Scan using _hyper_1_3_chunk_readings_device_idx on _hyper_1_3_chunk (actual rows=403 loops=1) - Index Cond: ((device >= 5) AND (device <= 10)) - -> Partial GroupAggregate (actual rows=6 loops=1) - Group Key: _hyper_1_4_chunk.device - -> Index Scan using _hyper_1_4_chunk_readings_device_idx on _hyper_1_4_chunk (actual rows=377 loops=1) - Index Cond: ((device >= 5) AND (device <= 10)) - -> Partial GroupAggregate (actual rows=6 loops=1) - Group Key: _hyper_1_5_chunk.device - -> Index Scan using _hyper_1_5_chunk_readings_device_idx on _hyper_1_5_chunk (actual rows=395 loops=1) - Index Cond: ((device >= 5) AND (device <= 10)) - -> Partial GroupAggregate (actual rows=6 loops=1) - Group Key: _hyper_1_6_chunk.device - -> Index Scan using _hyper_1_6_chunk_readings_device_idx on _hyper_1_6_chunk (actual rows=74 loops=1) - Index Cond: ((device >= 5) AND (device <= 10)) + Batches: 1 + -> Append (actual rows=1705 loops=1) + -> Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=35 loops=1) + Index Cond: ((device >= 5) AND (device <= 10)) + -> Index Scan using _hyper_1_2_chunk_readings_device_idx on _hyper_1_2_chunk (actual rows=421 loops=1) + Index Cond: ((device >= 5) AND (device <= 10)) + -> Index Scan using _hyper_1_3_chunk_readings_device_idx on _hyper_1_3_chunk (actual rows=403 loops=1) + Index Cond: ((device >= 5) AND (device <= 10)) + -> Index Scan using _hyper_1_4_chunk_readings_device_idx on _hyper_1_4_chunk (actual rows=377 loops=1) + Index Cond: ((device >= 5) AND (device <= 10)) + -> Index Scan using _hyper_1_5_chunk_readings_device_idx on _hyper_1_5_chunk (actual rows=395 loops=1) + Index Cond: ((device >= 5) AND (device <= 10)) + -> Index Scan using _hyper_1_6_chunk_readings_device_idx on _hyper_1_6_chunk (actual rows=74 loops=1) + Index Cond: ((device >= 5) AND (device <= 10)) Array Cache Hits: 0 Array Cache Misses: 6 Array Cache Evictions: 0 Array Decompressions: 6 -(34 rows) +(20 rows) -- Test on conflict: insert the same data as before, but throw away -- the updates. diff --git a/tsl/test/expected/transparent_decompression-14.out b/tsl/test/expected/transparent_decompression-14.out index 94849608d46..6f7935e9e32 100644 --- a/tsl/test/expected/transparent_decompression-14.out +++ b/tsl/test/expected/transparent_decompression-14.out @@ -1645,14 +1645,39 @@ SELECT count(*) FROM :TEST_TABLE GROUP BY device_id ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (actual rows=5 loops=1) Group Key: _hyper_1_1_chunk.device_id - -> Sort (actual rows=15 loops=1) + -> Merge Append (actual rows=15 loops=1) Sort Key: _hyper_1_1_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) + -> Custom Scan (VectorAgg) (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3600 loops=1) + -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Partial GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_2_chunk.device_id + -> Sort (actual rows=3360 loops=1) + Sort Key: _hyper_1_2_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_1_2_chunk (actual rows=3360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1680 loops=1) + -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(16 rows) + +-- test window functions with GROUP BY +:PREFIX +SELECT sum(count(*)) OVER () +FROM :TEST_TABLE +GROUP BY device_id +ORDER BY device_id; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------- + WindowAgg (actual rows=5 loops=1) + -> Finalize GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_1_chunk.device_id + -> Merge Append (actual rows=15 loops=1) + Sort Key: _hyper_1_1_chunk.device_id -> Custom Scan (VectorAgg) (actual rows=5 loops=1) -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3600 loops=1) -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) @@ -1665,36 +1690,7 @@ ORDER BY device_id; -> Custom Scan (VectorAgg) (actual rows=5 loops=1) -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1680 loops=1) -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) -(18 rows) - --- test window functions with GROUP BY -:PREFIX -SELECT sum(count(*)) OVER () -FROM :TEST_TABLE -GROUP BY device_id -ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - WindowAgg (actual rows=5 loops=1) - -> Finalize GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_1_1_chunk.device_id - -> Sort (actual rows=15 loops=1) - Sort Key: _hyper_1_1_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) - -> Custom Scan (VectorAgg) (actual rows=5 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3600 loops=1) - -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) - -> Partial GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_1_2_chunk.device_id - -> Sort (actual rows=3360 loops=1) - Sort Key: _hyper_1_2_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_1_2_chunk (actual rows=3360 loops=1) - -> Custom Scan (VectorAgg) (actual rows=5 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1680 loops=1) - -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) -(19 rows) +(17 rows) SET enable_hashagg = ON; -- test CTE @@ -5614,14 +5610,54 @@ SELECT count(*) FROM :TEST_TABLE GROUP BY device_id ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (actual rows=5 loops=1) Group Key: _hyper_2_4_chunk.device_id - -> Sort (actual rows=15 loops=1) + -> Merge Append (actual rows=15 loops=1) Sort Key: _hyper_2_4_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=720 loops=1) + -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Custom Scan (VectorAgg) (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=2160 loops=1) + -> Index Scan using compress_hyper_6_18_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=720 loops=1) + -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_7_chunk.device_id + -> Index Only Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_7_chunk (actual rows=672 loops=1) + -> Partial GroupAggregate (actual rows=3 loops=1) + Group Key: _hyper_2_8_chunk.device_id + -> Index Only Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_8_chunk (actual rows=2016 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_9_chunk.device_id + -> Index Only Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_9_chunk (actual rows=672 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=336 loops=1) + -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Custom Scan (VectorAgg) (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1008 loops=1) + -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_12_chunk.device_id + -> Index Only Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _hyper_2_12_chunk (actual rows=336 loops=1) +(35 rows) + +-- test window functions with GROUP BY +:PREFIX +SELECT sum(count(*)) OVER () +FROM :TEST_TABLE +GROUP BY device_id +ORDER BY device_id; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------- + WindowAgg (actual rows=5 loops=1) + -> Finalize GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_2_4_chunk.device_id + -> Merge Append (actual rows=15 loops=1) + Sort Key: _hyper_2_4_chunk.device_id -> Custom Scan (VectorAgg) (actual rows=1 loops=1) -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=720 loops=1) -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) @@ -5633,22 +5669,13 @@ ORDER BY device_id; -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_7_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_7_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_7_chunk (actual rows=672 loops=1) + -> Index Only Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_7_chunk (actual rows=672 loops=1) -> Partial GroupAggregate (actual rows=3 loops=1) Group Key: _hyper_2_8_chunk.device_id - -> Sort (actual rows=2016 loops=1) - Sort Key: _hyper_2_8_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_8_chunk (actual rows=2016 loops=1) + -> Index Only Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_8_chunk (actual rows=2016 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_9_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_9_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_9_chunk (actual rows=672 loops=1) + -> Index Only Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_9_chunk (actual rows=672 loops=1) -> Custom Scan (VectorAgg) (actual rows=1 loops=1) -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=336 loops=1) -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) @@ -5657,67 +5684,8 @@ ORDER BY device_id; -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_12_chunk.device_id - -> Sort (actual rows=336 loops=1) - Sort Key: _hyper_2_12_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_12_chunk (actual rows=336 loops=1) -(45 rows) - --- test window functions with GROUP BY -:PREFIX -SELECT sum(count(*)) OVER () -FROM :TEST_TABLE -GROUP BY device_id -ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - WindowAgg (actual rows=5 loops=1) - -> Finalize GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_2_4_chunk.device_id - -> Sort (actual rows=15 loops=1) - Sort Key: _hyper_2_4_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) - -> Custom Scan (VectorAgg) (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=720 loops=1) - -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) - -> Custom Scan (VectorAgg) (actual rows=3 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=2160 loops=1) - -> Index Scan using compress_hyper_6_18_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_18_chunk (actual rows=3 loops=1) - -> Custom Scan (VectorAgg) (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=720 loops=1) - -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_7_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_7_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_7_chunk (actual rows=672 loops=1) - -> Partial GroupAggregate (actual rows=3 loops=1) - Group Key: _hyper_2_8_chunk.device_id - -> Sort (actual rows=2016 loops=1) - Sort Key: _hyper_2_8_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_8_chunk (actual rows=2016 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_9_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_9_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_9_chunk (actual rows=672 loops=1) - -> Custom Scan (VectorAgg) (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=336 loops=1) - -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) - -> Custom Scan (VectorAgg) (actual rows=3 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1008 loops=1) - -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_12_chunk.device_id - -> Sort (actual rows=336 loops=1) - Sort Key: _hyper_2_12_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_12_chunk (actual rows=336 loops=1) -(46 rows) + -> Index Only Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _hyper_2_12_chunk (actual rows=336 loops=1) +(36 rows) SET enable_hashagg = ON; -- test CTE diff --git a/tsl/test/expected/transparent_decompression-15.out b/tsl/test/expected/transparent_decompression-15.out index 0a041fe1f5a..5677fa7d695 100644 --- a/tsl/test/expected/transparent_decompression-15.out +++ b/tsl/test/expected/transparent_decompression-15.out @@ -1645,14 +1645,39 @@ SELECT count(*) FROM :TEST_TABLE GROUP BY device_id ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (actual rows=5 loops=1) Group Key: _hyper_1_1_chunk.device_id - -> Sort (actual rows=15 loops=1) + -> Merge Append (actual rows=15 loops=1) Sort Key: _hyper_1_1_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) + -> Custom Scan (VectorAgg) (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3600 loops=1) + -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Partial GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_2_chunk.device_id + -> Sort (actual rows=3360 loops=1) + Sort Key: _hyper_1_2_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_1_2_chunk (actual rows=3360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1680 loops=1) + -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(16 rows) + +-- test window functions with GROUP BY +:PREFIX +SELECT sum(count(*)) OVER () +FROM :TEST_TABLE +GROUP BY device_id +ORDER BY device_id; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------- + WindowAgg (actual rows=5 loops=1) + -> Finalize GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_1_chunk.device_id + -> Merge Append (actual rows=15 loops=1) + Sort Key: _hyper_1_1_chunk.device_id -> Custom Scan (VectorAgg) (actual rows=5 loops=1) -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3600 loops=1) -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) @@ -1665,36 +1690,7 @@ ORDER BY device_id; -> Custom Scan (VectorAgg) (actual rows=5 loops=1) -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1680 loops=1) -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) -(18 rows) - --- test window functions with GROUP BY -:PREFIX -SELECT sum(count(*)) OVER () -FROM :TEST_TABLE -GROUP BY device_id -ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - WindowAgg (actual rows=5 loops=1) - -> Finalize GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_1_1_chunk.device_id - -> Sort (actual rows=15 loops=1) - Sort Key: _hyper_1_1_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) - -> Custom Scan (VectorAgg) (actual rows=5 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3600 loops=1) - -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) - -> Partial GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_1_2_chunk.device_id - -> Sort (actual rows=3360 loops=1) - Sort Key: _hyper_1_2_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_1_2_chunk (actual rows=3360 loops=1) - -> Custom Scan (VectorAgg) (actual rows=5 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1680 loops=1) - -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) -(19 rows) +(17 rows) SET enable_hashagg = ON; -- test CTE @@ -5614,14 +5610,54 @@ SELECT count(*) FROM :TEST_TABLE GROUP BY device_id ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (actual rows=5 loops=1) Group Key: _hyper_2_4_chunk.device_id - -> Sort (actual rows=15 loops=1) + -> Merge Append (actual rows=15 loops=1) Sort Key: _hyper_2_4_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=720 loops=1) + -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Custom Scan (VectorAgg) (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=2160 loops=1) + -> Index Scan using compress_hyper_6_18_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=720 loops=1) + -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_7_chunk.device_id + -> Index Only Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_7_chunk (actual rows=672 loops=1) + -> Partial GroupAggregate (actual rows=3 loops=1) + Group Key: _hyper_2_8_chunk.device_id + -> Index Only Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_8_chunk (actual rows=2016 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_9_chunk.device_id + -> Index Only Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_9_chunk (actual rows=672 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=336 loops=1) + -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Custom Scan (VectorAgg) (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1008 loops=1) + -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_12_chunk.device_id + -> Index Only Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _hyper_2_12_chunk (actual rows=336 loops=1) +(35 rows) + +-- test window functions with GROUP BY +:PREFIX +SELECT sum(count(*)) OVER () +FROM :TEST_TABLE +GROUP BY device_id +ORDER BY device_id; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------- + WindowAgg (actual rows=5 loops=1) + -> Finalize GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_2_4_chunk.device_id + -> Merge Append (actual rows=15 loops=1) + Sort Key: _hyper_2_4_chunk.device_id -> Custom Scan (VectorAgg) (actual rows=1 loops=1) -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=720 loops=1) -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) @@ -5633,22 +5669,13 @@ ORDER BY device_id; -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_7_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_7_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_7_chunk (actual rows=672 loops=1) + -> Index Only Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_7_chunk (actual rows=672 loops=1) -> Partial GroupAggregate (actual rows=3 loops=1) Group Key: _hyper_2_8_chunk.device_id - -> Sort (actual rows=2016 loops=1) - Sort Key: _hyper_2_8_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_8_chunk (actual rows=2016 loops=1) + -> Index Only Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_8_chunk (actual rows=2016 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_9_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_9_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_9_chunk (actual rows=672 loops=1) + -> Index Only Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_9_chunk (actual rows=672 loops=1) -> Custom Scan (VectorAgg) (actual rows=1 loops=1) -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=336 loops=1) -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) @@ -5657,67 +5684,8 @@ ORDER BY device_id; -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_12_chunk.device_id - -> Sort (actual rows=336 loops=1) - Sort Key: _hyper_2_12_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_12_chunk (actual rows=336 loops=1) -(45 rows) - --- test window functions with GROUP BY -:PREFIX -SELECT sum(count(*)) OVER () -FROM :TEST_TABLE -GROUP BY device_id -ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - WindowAgg (actual rows=5 loops=1) - -> Finalize GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_2_4_chunk.device_id - -> Sort (actual rows=15 loops=1) - Sort Key: _hyper_2_4_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) - -> Custom Scan (VectorAgg) (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=720 loops=1) - -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) - -> Custom Scan (VectorAgg) (actual rows=3 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=2160 loops=1) - -> Index Scan using compress_hyper_6_18_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_18_chunk (actual rows=3 loops=1) - -> Custom Scan (VectorAgg) (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=720 loops=1) - -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_7_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_7_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_7_chunk (actual rows=672 loops=1) - -> Partial GroupAggregate (actual rows=3 loops=1) - Group Key: _hyper_2_8_chunk.device_id - -> Sort (actual rows=2016 loops=1) - Sort Key: _hyper_2_8_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_8_chunk (actual rows=2016 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_9_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_9_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_9_chunk (actual rows=672 loops=1) - -> Custom Scan (VectorAgg) (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=336 loops=1) - -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) - -> Custom Scan (VectorAgg) (actual rows=3 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1008 loops=1) - -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_12_chunk.device_id - -> Sort (actual rows=336 loops=1) - Sort Key: _hyper_2_12_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_12_chunk (actual rows=336 loops=1) -(46 rows) + -> Index Only Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _hyper_2_12_chunk (actual rows=336 loops=1) +(36 rows) SET enable_hashagg = ON; -- test CTE diff --git a/tsl/test/expected/transparent_decompression-16.out b/tsl/test/expected/transparent_decompression-16.out index 0d0e3de0348..c379010a68e 100644 --- a/tsl/test/expected/transparent_decompression-16.out +++ b/tsl/test/expected/transparent_decompression-16.out @@ -1645,14 +1645,39 @@ SELECT count(*) FROM :TEST_TABLE GROUP BY device_id ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (actual rows=5 loops=1) Group Key: _hyper_1_1_chunk.device_id - -> Sort (actual rows=15 loops=1) + -> Merge Append (actual rows=15 loops=1) Sort Key: _hyper_1_1_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) + -> Custom Scan (VectorAgg) (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3600 loops=1) + -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Partial GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_2_chunk.device_id + -> Sort (actual rows=3360 loops=1) + Sort Key: _hyper_1_2_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_1_2_chunk (actual rows=3360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1680 loops=1) + -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(16 rows) + +-- test window functions with GROUP BY +:PREFIX +SELECT sum(count(*)) OVER () +FROM :TEST_TABLE +GROUP BY device_id +ORDER BY device_id; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------- + WindowAgg (actual rows=5 loops=1) + -> Finalize GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_1_chunk.device_id + -> Merge Append (actual rows=15 loops=1) + Sort Key: _hyper_1_1_chunk.device_id -> Custom Scan (VectorAgg) (actual rows=5 loops=1) -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3600 loops=1) -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) @@ -1665,36 +1690,7 @@ ORDER BY device_id; -> Custom Scan (VectorAgg) (actual rows=5 loops=1) -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1680 loops=1) -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) -(18 rows) - --- test window functions with GROUP BY -:PREFIX -SELECT sum(count(*)) OVER () -FROM :TEST_TABLE -GROUP BY device_id -ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - WindowAgg (actual rows=5 loops=1) - -> Finalize GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_1_1_chunk.device_id - -> Sort (actual rows=15 loops=1) - Sort Key: _hyper_1_1_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) - -> Custom Scan (VectorAgg) (actual rows=5 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3600 loops=1) - -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) - -> Partial GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_1_2_chunk.device_id - -> Sort (actual rows=3360 loops=1) - Sort Key: _hyper_1_2_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_1_2_chunk (actual rows=3360 loops=1) - -> Custom Scan (VectorAgg) (actual rows=5 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1680 loops=1) - -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) -(19 rows) +(17 rows) SET enable_hashagg = ON; -- test CTE @@ -5614,14 +5610,54 @@ SELECT count(*) FROM :TEST_TABLE GROUP BY device_id ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (actual rows=5 loops=1) Group Key: _hyper_2_4_chunk.device_id - -> Sort (actual rows=15 loops=1) + -> Merge Append (actual rows=15 loops=1) Sort Key: _hyper_2_4_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=720 loops=1) + -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Custom Scan (VectorAgg) (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=2160 loops=1) + -> Index Scan using compress_hyper_6_18_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=720 loops=1) + -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_7_chunk.device_id + -> Index Only Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_7_chunk (actual rows=672 loops=1) + -> Partial GroupAggregate (actual rows=3 loops=1) + Group Key: _hyper_2_8_chunk.device_id + -> Index Only Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_8_chunk (actual rows=2016 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_9_chunk.device_id + -> Index Only Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_9_chunk (actual rows=672 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=336 loops=1) + -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Custom Scan (VectorAgg) (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1008 loops=1) + -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_12_chunk.device_id + -> Index Only Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _hyper_2_12_chunk (actual rows=336 loops=1) +(35 rows) + +-- test window functions with GROUP BY +:PREFIX +SELECT sum(count(*)) OVER () +FROM :TEST_TABLE +GROUP BY device_id +ORDER BY device_id; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------- + WindowAgg (actual rows=5 loops=1) + -> Finalize GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_2_4_chunk.device_id + -> Merge Append (actual rows=15 loops=1) + Sort Key: _hyper_2_4_chunk.device_id -> Custom Scan (VectorAgg) (actual rows=1 loops=1) -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=720 loops=1) -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) @@ -5633,22 +5669,13 @@ ORDER BY device_id; -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_7_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_7_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_7_chunk (actual rows=672 loops=1) + -> Index Only Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_7_chunk (actual rows=672 loops=1) -> Partial GroupAggregate (actual rows=3 loops=1) Group Key: _hyper_2_8_chunk.device_id - -> Sort (actual rows=2016 loops=1) - Sort Key: _hyper_2_8_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_8_chunk (actual rows=2016 loops=1) + -> Index Only Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_8_chunk (actual rows=2016 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_9_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_9_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_9_chunk (actual rows=672 loops=1) + -> Index Only Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_9_chunk (actual rows=672 loops=1) -> Custom Scan (VectorAgg) (actual rows=1 loops=1) -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=336 loops=1) -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) @@ -5657,67 +5684,8 @@ ORDER BY device_id; -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_12_chunk.device_id - -> Sort (actual rows=336 loops=1) - Sort Key: _hyper_2_12_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_12_chunk (actual rows=336 loops=1) -(45 rows) - --- test window functions with GROUP BY -:PREFIX -SELECT sum(count(*)) OVER () -FROM :TEST_TABLE -GROUP BY device_id -ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - WindowAgg (actual rows=5 loops=1) - -> Finalize GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_2_4_chunk.device_id - -> Sort (actual rows=15 loops=1) - Sort Key: _hyper_2_4_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) - -> Custom Scan (VectorAgg) (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=720 loops=1) - -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) - -> Custom Scan (VectorAgg) (actual rows=3 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=2160 loops=1) - -> Index Scan using compress_hyper_6_18_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_18_chunk (actual rows=3 loops=1) - -> Custom Scan (VectorAgg) (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=720 loops=1) - -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_7_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_7_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_7_chunk (actual rows=672 loops=1) - -> Partial GroupAggregate (actual rows=3 loops=1) - Group Key: _hyper_2_8_chunk.device_id - -> Sort (actual rows=2016 loops=1) - Sort Key: _hyper_2_8_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_8_chunk (actual rows=2016 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_9_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_9_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_9_chunk (actual rows=672 loops=1) - -> Custom Scan (VectorAgg) (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=336 loops=1) - -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) - -> Custom Scan (VectorAgg) (actual rows=3 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1008 loops=1) - -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_12_chunk.device_id - -> Sort (actual rows=336 loops=1) - Sort Key: _hyper_2_12_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_12_chunk (actual rows=336 loops=1) -(46 rows) + -> Index Only Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _hyper_2_12_chunk (actual rows=336 loops=1) +(36 rows) SET enable_hashagg = ON; -- test CTE diff --git a/tsl/test/expected/transparent_decompression-17.out b/tsl/test/expected/transparent_decompression-17.out index 0d0e3de0348..c379010a68e 100644 --- a/tsl/test/expected/transparent_decompression-17.out +++ b/tsl/test/expected/transparent_decompression-17.out @@ -1645,14 +1645,39 @@ SELECT count(*) FROM :TEST_TABLE GROUP BY device_id ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (actual rows=5 loops=1) Group Key: _hyper_1_1_chunk.device_id - -> Sort (actual rows=15 loops=1) + -> Merge Append (actual rows=15 loops=1) Sort Key: _hyper_1_1_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) + -> Custom Scan (VectorAgg) (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3600 loops=1) + -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) + -> Partial GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_2_chunk.device_id + -> Sort (actual rows=3360 loops=1) + Sort Key: _hyper_1_2_chunk.device_id + Sort Method: quicksort + -> Seq Scan on _hyper_1_2_chunk (actual rows=3360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1680 loops=1) + -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) +(16 rows) + +-- test window functions with GROUP BY +:PREFIX +SELECT sum(count(*)) OVER () +FROM :TEST_TABLE +GROUP BY device_id +ORDER BY device_id; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------- + WindowAgg (actual rows=5 loops=1) + -> Finalize GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_1_1_chunk.device_id + -> Merge Append (actual rows=15 loops=1) + Sort Key: _hyper_1_1_chunk.device_id -> Custom Scan (VectorAgg) (actual rows=5 loops=1) -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3600 loops=1) -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) @@ -1665,36 +1690,7 @@ ORDER BY device_id; -> Custom Scan (VectorAgg) (actual rows=5 loops=1) -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1680 loops=1) -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) -(18 rows) - --- test window functions with GROUP BY -:PREFIX -SELECT sum(count(*)) OVER () -FROM :TEST_TABLE -GROUP BY device_id -ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - WindowAgg (actual rows=5 loops=1) - -> Finalize GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_1_1_chunk.device_id - -> Sort (actual rows=15 loops=1) - Sort Key: _hyper_1_1_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) - -> Custom Scan (VectorAgg) (actual rows=5 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=3600 loops=1) - -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) - -> Partial GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_1_2_chunk.device_id - -> Sort (actual rows=3360 loops=1) - Sort Key: _hyper_1_2_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_1_2_chunk (actual rows=3360 loops=1) - -> Custom Scan (VectorAgg) (actual rows=5 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=1680 loops=1) - -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) -(19 rows) +(17 rows) SET enable_hashagg = ON; -- test CTE @@ -5614,14 +5610,54 @@ SELECT count(*) FROM :TEST_TABLE GROUP BY device_id ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate (actual rows=5 loops=1) Group Key: _hyper_2_4_chunk.device_id - -> Sort (actual rows=15 loops=1) + -> Merge Append (actual rows=15 loops=1) Sort Key: _hyper_2_4_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=720 loops=1) + -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) + -> Custom Scan (VectorAgg) (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=2160 loops=1) + -> Index Scan using compress_hyper_6_18_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_18_chunk (actual rows=3 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=720 loops=1) + -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_7_chunk.device_id + -> Index Only Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_7_chunk (actual rows=672 loops=1) + -> Partial GroupAggregate (actual rows=3 loops=1) + Group Key: _hyper_2_8_chunk.device_id + -> Index Only Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_8_chunk (actual rows=2016 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_9_chunk.device_id + -> Index Only Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_9_chunk (actual rows=672 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=336 loops=1) + -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) + -> Custom Scan (VectorAgg) (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1008 loops=1) + -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) + -> Partial GroupAggregate (actual rows=1 loops=1) + Group Key: _hyper_2_12_chunk.device_id + -> Index Only Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _hyper_2_12_chunk (actual rows=336 loops=1) +(35 rows) + +-- test window functions with GROUP BY +:PREFIX +SELECT sum(count(*)) OVER () +FROM :TEST_TABLE +GROUP BY device_id +ORDER BY device_id; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------- + WindowAgg (actual rows=5 loops=1) + -> Finalize GroupAggregate (actual rows=5 loops=1) + Group Key: _hyper_2_4_chunk.device_id + -> Merge Append (actual rows=15 loops=1) + Sort Key: _hyper_2_4_chunk.device_id -> Custom Scan (VectorAgg) (actual rows=1 loops=1) -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=720 loops=1) -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) @@ -5633,22 +5669,13 @@ ORDER BY device_id; -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_7_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_7_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_7_chunk (actual rows=672 loops=1) + -> Index Only Scan Backward using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_7_chunk (actual rows=672 loops=1) -> Partial GroupAggregate (actual rows=3 loops=1) Group Key: _hyper_2_8_chunk.device_id - -> Sort (actual rows=2016 loops=1) - Sort Key: _hyper_2_8_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_8_chunk (actual rows=2016 loops=1) + -> Index Only Scan Backward using _hyper_2_8_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_8_chunk (actual rows=2016 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_9_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_9_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_9_chunk (actual rows=672 loops=1) + -> Index Only Scan Backward using _hyper_2_9_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _hyper_2_9_chunk (actual rows=672 loops=1) -> Custom Scan (VectorAgg) (actual rows=1 loops=1) -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=336 loops=1) -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) @@ -5657,67 +5684,8 @@ ORDER BY device_id; -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_12_chunk.device_id - -> Sort (actual rows=336 loops=1) - Sort Key: _hyper_2_12_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_12_chunk (actual rows=336 loops=1) -(45 rows) - --- test window functions with GROUP BY -:PREFIX -SELECT sum(count(*)) OVER () -FROM :TEST_TABLE -GROUP BY device_id -ORDER BY device_id; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - WindowAgg (actual rows=5 loops=1) - -> Finalize GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_2_4_chunk.device_id - -> Sort (actual rows=15 loops=1) - Sort Key: _hyper_2_4_chunk.device_id - Sort Method: quicksort - -> Append (actual rows=15 loops=1) - -> Custom Scan (VectorAgg) (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=720 loops=1) - -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) - -> Custom Scan (VectorAgg) (actual rows=3 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=2160 loops=1) - -> Index Scan using compress_hyper_6_18_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_18_chunk (actual rows=3 loops=1) - -> Custom Scan (VectorAgg) (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=720 loops=1) - -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_7_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_7_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_7_chunk (actual rows=672 loops=1) - -> Partial GroupAggregate (actual rows=3 loops=1) - Group Key: _hyper_2_8_chunk.device_id - -> Sort (actual rows=2016 loops=1) - Sort Key: _hyper_2_8_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_8_chunk (actual rows=2016 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_9_chunk.device_id - -> Sort (actual rows=672 loops=1) - Sort Key: _hyper_2_9_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_9_chunk (actual rows=672 loops=1) - -> Custom Scan (VectorAgg) (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=336 loops=1) - -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) - -> Custom Scan (VectorAgg) (actual rows=3 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1008 loops=1) - -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_12_chunk.device_id - -> Sort (actual rows=336 loops=1) - Sort Key: _hyper_2_12_chunk.device_id - Sort Method: quicksort - -> Seq Scan on _hyper_2_12_chunk (actual rows=336 loops=1) -(46 rows) + -> Index Only Scan Backward using _hyper_2_12_chunk_metrics_space_device_id_device_id_peer_v0_v_2 on _hyper_2_12_chunk (actual rows=336 loops=1) +(36 rows) SET enable_hashagg = ON; -- test CTE diff --git a/tsl/test/expected/vector_agg_memory.out b/tsl/test/expected/vector_agg_memory.out index 868075cc890..1600fbba164 100644 --- a/tsl/test/expected/vector_agg_memory.out +++ b/tsl/test/expected/vector_agg_memory.out @@ -79,28 +79,27 @@ set work_mem = '64kB'; explain (costs off) select ts_debug_allocated_bytes() bytes, count(*) a, count(t) b, sum(t) c, avg(t) d, min(t) e, max(t) f from mvagg where t >= -1 and t < 1000000 group by s1; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------- Finalize GroupAggregate Group Key: _hyper_1_1_chunk.s1 - -> Sort + -> Merge Append Sort Key: _hyper_1_1_chunk.s1 - -> Append - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk - Vectorized Filter: ((t >= '-1'::integer) AND (t < 1000000)) - -> Sort - Sort Key: compress_hyper_2_3_chunk.s1 - -> Seq Scan on compress_hyper_2_3_chunk - Filter: ((_ts_meta_max_1 >= '-1'::integer) AND (_ts_meta_min_1 < 1000000)) - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk - Vectorized Filter: ((t >= '-1'::integer) AND (t < 1000000)) - -> Sort - Sort Key: compress_hyper_2_4_chunk.s1 - -> Index Scan using compress_hyper_2_4_chunk_s0_s1__ts_meta_min_1__ts_meta_max__idx on compress_hyper_2_4_chunk - Index Cond: ((_ts_meta_min_1 < 1000000) AND (_ts_meta_max_1 >= '-1'::integer)) -(19 rows) + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + Vectorized Filter: ((t >= '-1'::integer) AND (t < 1000000)) + -> Sort + Sort Key: compress_hyper_2_3_chunk.s1 + -> Seq Scan on compress_hyper_2_3_chunk + Filter: ((_ts_meta_max_1 >= '-1'::integer) AND (_ts_meta_min_1 < 1000000)) + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk + Vectorized Filter: ((t >= '-1'::integer) AND (t < 1000000)) + -> Sort + Sort Key: compress_hyper_2_4_chunk.s1 + -> Index Scan using compress_hyper_2_4_chunk_s0_s1__ts_meta_min_1__ts_meta_max__idx on compress_hyper_2_4_chunk + Index Cond: ((_ts_meta_min_1 < 1000000) AND (_ts_meta_max_1 >= '-1'::integer)) +(18 rows) \set ECHO none reset timescaledb.debug_require_vector_agg; diff --git a/tsl/test/expected/vectorized_aggregation.out b/tsl/test/expected/vectorized_aggregation.out index 63ff6ed337c..8887dac8469 100644 --- a/tsl/test/expected/vectorized_aggregation.out +++ b/tsl/test/expected/vectorized_aggregation.out @@ -111,143 +111,149 @@ SELECT sum(segment_by_value) FROM testtable; -- Vectorization possible - filter on segment_by :EXPLAIN SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - -> Index Scan using compress_hyper_2_11_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - Index Cond: (compress_hyper_2_11_chunk.segment_by_value > 0) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.segment_by_value - -> Index Scan using compress_hyper_2_12_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - Index Cond: (compress_hyper_2_12_chunk.segment_by_value > 0) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.segment_by_value - -> Index Scan using compress_hyper_2_13_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - Index Cond: (compress_hyper_2_13_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - Filter: (_hyper_1_4_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - Filter: (_hyper_1_5_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - Filter: (_hyper_1_6_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - Filter: (_hyper_1_7_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - Filter: (_hyper_1_8_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - Filter: (_hyper_1_9_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value - Filter: (_hyper_1_10_chunk.segment_by_value > 0) -(62 rows) + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + Filter: (compress_hyper_2_11_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + Filter: (compress_hyper_2_12_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + Filter: (compress_hyper_2_13_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + Filter: (_hyper_1_4_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + Filter: (_hyper_1_5_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + Filter: (_hyper_1_6_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + Filter: (_hyper_1_7_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + Filter: (_hyper_1_8_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + Filter: (_hyper_1_9_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value + Filter: (_hyper_1_10_chunk.segment_by_value > 0) +(65 rows) -- Vectorization not possible due to a used filter :EXPLAIN SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0 AND int_value > 0; - QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) - -> Index Scan using compress_hyper_2_11_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - Index Cond: (compress_hyper_2_11_chunk.segment_by_value > 0) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.segment_by_value - Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) - -> Index Scan using compress_hyper_2_12_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - Index Cond: (compress_hyper_2_12_chunk.segment_by_value > 0) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Grouping Policy: all compressed batches - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.segment_by_value - Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) - -> Index Scan using compress_hyper_2_13_chunk_segment_by_value__ts_meta_min_1___idx on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - Index Cond: (compress_hyper_2_13_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - Filter: ((_hyper_1_4_chunk.segment_by_value > 0) AND (_hyper_1_4_chunk.int_value > 0)) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - Filter: ((_hyper_1_5_chunk.segment_by_value > 0) AND (_hyper_1_5_chunk.int_value > 0)) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - Filter: ((_hyper_1_6_chunk.segment_by_value > 0) AND (_hyper_1_6_chunk.int_value > 0)) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - Filter: ((_hyper_1_7_chunk.segment_by_value > 0) AND (_hyper_1_7_chunk.int_value > 0)) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - Filter: ((_hyper_1_8_chunk.segment_by_value > 0) AND (_hyper_1_8_chunk.int_value > 0)) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - Filter: ((_hyper_1_9_chunk.segment_by_value > 0) AND (_hyper_1_9_chunk.int_value > 0)) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value - Filter: ((_hyper_1_10_chunk.segment_by_value > 0) AND (_hyper_1_10_chunk.int_value > 0)) -(65 rows) + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + Filter: (compress_hyper_2_11_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + Filter: (compress_hyper_2_12_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + Grouping Policy: all compressed batches + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + Filter: (compress_hyper_2_13_chunk.segment_by_value > 0) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + Filter: ((_hyper_1_4_chunk.segment_by_value > 0) AND (_hyper_1_4_chunk.int_value > 0)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + Filter: ((_hyper_1_5_chunk.segment_by_value > 0) AND (_hyper_1_5_chunk.int_value > 0)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + Filter: ((_hyper_1_6_chunk.segment_by_value > 0) AND (_hyper_1_6_chunk.int_value > 0)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + Filter: ((_hyper_1_7_chunk.segment_by_value > 0) AND (_hyper_1_7_chunk.int_value > 0)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + Filter: ((_hyper_1_8_chunk.segment_by_value > 0) AND (_hyper_1_8_chunk.int_value > 0)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + Filter: ((_hyper_1_9_chunk.segment_by_value > 0) AND (_hyper_1_9_chunk.int_value > 0)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value + Filter: ((_hyper_1_10_chunk.segment_by_value > 0) AND (_hyper_1_10_chunk.int_value > 0)) +(68 rows) :EXPLAIN SELECT sum(segment_by_value) FROM testtable WHERE int_value > 0; diff --git a/tsl/test/sql/hypercore_index_btree.sql b/tsl/test/sql/hypercore_index_btree.sql index bb32489dfa7..72a935cabd5 100644 --- a/tsl/test/sql/hypercore_index_btree.sql +++ b/tsl/test/sql/hypercore_index_btree.sql @@ -11,6 +11,7 @@ set role :ROLE_DEFAULT_PERM_USER; -- Avoid parallel (index) scans to make test stable set max_parallel_workers_per_gather to 0; set enable_hashagg to off; +set timescaledb.enable_chunkwise_aggregation to off; -- Drop the device_id index and redefine it later with extra columns. drop index hypertable_device_id_idx; diff --git a/tsl/test/sql/hypercore_scans.sql b/tsl/test/sql/hypercore_scans.sql index da3b345edfd..63beaef30f3 100644 --- a/tsl/test/sql/hypercore_scans.sql +++ b/tsl/test/sql/hypercore_scans.sql @@ -2,6 +2,9 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. +-- Avoid chunkwise aggregation to make the test stable +set timescaledb.enable_chunkwise_aggregation to off; + create table readings(time timestamptz, location text, device int,