From e64faf702d6168a4bb5de25a1edb5e468e4b33cb Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Mon, 25 Mar 2024 17:53:48 +0100 Subject: [PATCH 01/81] vectorized aggregation as separate plan node --- src/cross_module_fn.c | 6 + src/cross_module_fn.h | 1 + src/planner/planner.c | 2 + tsl/src/init.c | 1 + tsl/src/nodes/CMakeLists.txt | 1 + tsl/src/nodes/decompress_chunk/exec.c | 90 ++++++++++----- tsl/src/nodes/decompress_chunk/planner.c | 34 ++++-- tsl/src/nodes/vector_agg/CMakeLists.txt | 2 + tsl/src/nodes/vector_agg/vector_agg.c | 133 +++++++++++++++++++++++ tsl/src/nodes/vector_agg/vector_agg.h | 24 ++++ tsl/src/partialize_agg.c | 85 +++++++++++++++ tsl/src/partialize_agg.h | 2 + 12 files changed, 342 insertions(+), 39 deletions(-) create mode 100644 tsl/src/nodes/vector_agg/CMakeLists.txt create mode 100644 tsl/src/nodes/vector_agg/vector_agg.c create mode 100644 tsl/src/nodes/vector_agg/vector_agg.h diff --git a/src/cross_module_fn.c b/src/cross_module_fn.c index 8482b901dbe..0e1f499d986 100644 --- a/src/cross_module_fn.c +++ b/src/cross_module_fn.c @@ -131,6 +131,11 @@ push_down_aggregation(PlannerInfo *root, AggPath *aggregation_path, Path *subpat return false; } +static void +tsl_postprocess_plan_stub(PlannedStmt *stmt) +{ +} + static bool process_compress_table_default(AlterTableCmd *cmd, Hypertable *ht, WithClauseResult *with_clause_options) @@ -323,6 +328,7 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = { .policies_show = error_no_default_fn_pg_community, .push_down_aggregation = push_down_aggregation, + .tsl_postprocess_plan = tsl_postprocess_plan_stub, .partialize_agg = error_no_default_fn_pg_community, .finalize_agg_sfunc = error_no_default_fn_pg_community, diff --git a/src/cross_module_fn.h b/src/cross_module_fn.h index c443bcab1c3..722ca5a918c 100644 --- a/src/cross_module_fn.h +++ b/src/cross_module_fn.h @@ -91,6 +91,7 @@ typedef struct CrossModuleFunctions /* Vectorized queries */ bool (*push_down_aggregation)(PlannerInfo *root, AggPath *aggregation_path, Path *subpath); + void (*tsl_postprocess_plan)(PlannedStmt *stmt); /* Continuous Aggregates */ PGFunction partialize_agg; diff --git a/src/planner/planner.c b/src/planner/planner.c index 43a1ca6ff1c..ca630d983d9 100644 --- a/src/planner/planner.c +++ b/src/planner/planner.c @@ -580,6 +580,8 @@ timescaledb_planner(Query *parse, const char *query_string, int cursor_opts, AGGSPLITOP_SERIALIZE | AGGSPLITOP_SKIPFINAL; } } + + ts_cm_functions->tsl_postprocess_plan(stmt); } if (reset_baserel_info) diff --git a/tsl/src/init.c b/tsl/src/init.c index f43a7c2e487..ce1035b0d2d 100644 --- a/tsl/src/init.c +++ b/tsl/src/init.c @@ -119,6 +119,7 @@ CrossModuleFunctions tsl_cm_functions = { /* Vectorized queries */ .push_down_aggregation = apply_vectorized_agg_optimization, + .tsl_postprocess_plan = tsl_postprocess_plan, /* Continuous Aggregates */ .partialize_agg = tsl_partialize_agg, diff --git a/tsl/src/nodes/CMakeLists.txt b/tsl/src/nodes/CMakeLists.txt index e7789b8a314..d3eecdba8ad 100644 --- a/tsl/src/nodes/CMakeLists.txt +++ b/tsl/src/nodes/CMakeLists.txt @@ -5,3 +5,4 @@ add_subdirectory(decompress_chunk) add_subdirectory(frozen_chunk_dml) add_subdirectory(gapfill) add_subdirectory(skip_scan) +add_subdirectory(vector_agg) diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index 238b52f927b..8f7fd7a9148 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -77,6 +77,7 @@ decompress_chunk_state_create(CustomScan *cscan) chunk_state->decompress_context.reverse = lthird_int(settings); chunk_state->decompress_context.batch_sorted_merge = lfourth_int(settings); chunk_state->decompress_context.enable_bulk_decompression = lfifth_int(settings); + // chunk_state->perform_vectorized_aggregation = false; //lsixth_int(settings); chunk_state->perform_vectorized_aggregation = lsixth_int(settings); Assert(IsA(cscan->custom_exprs, List)); @@ -173,6 +174,8 @@ decompress_chunk_exec_heap(CustomScanState *node) return decompress_chunk_exec_impl(chunk_state, &BatchQueueFunctionsHeap); } +static TupleTableSlot *decompress_chunk_exec_vector_agg(CustomScanState *node); + /* * Complete initialization of the supplied CustomScanState. * @@ -349,7 +352,13 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) * Choose which batch queue we are going to use: heap for batch sorted * merge, and one-element FIFO for normal decompression. */ - if (dcontext->batch_sorted_merge) + if (chunk_state->perform_vectorized_aggregation) + { + chunk_state->batch_queue = + batch_queue_fifo_create(num_compressed, &BatchQueueFunctionsFifo); + chunk_state->exec_methods.ExecCustomScan = decompress_chunk_exec_vector_agg; + } + else if (dcontext->batch_sorted_merge) { chunk_state->batch_queue = batch_queue_heap_create(num_compressed, @@ -393,12 +402,12 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) * Perform a vectorized aggregation on int4 values */ static TupleTableSlot * -perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref) +perform_vectorized_sum_int4(CustomScanState *vector_agg_state, Aggref *aggref, + DecompressChunkState *decompress_state) { - DecompressContext *dcontext = &chunk_state->decompress_context; - BatchQueue *batch_queue = chunk_state->batch_queue; + DecompressContext *dcontext = &decompress_state->decompress_context; + BatchQueue *batch_queue = decompress_state->batch_queue; - Assert(chunk_state != NULL); Assert(aggref != NULL); /* Partial result is a int8 */ @@ -443,14 +452,15 @@ perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref) Assert(dcontext->bulk_decompression_context != NULL); /* Get a reference the the output TupleTableSlot */ - TupleTableSlot *decompressed_scan_slot = chunk_state->csstate.ss.ss_ScanTupleSlot; - Assert(decompressed_scan_slot->tts_tupleDescriptor->natts == 1); + TupleTableSlot *aggregated_slot = + decompress_state->csstate.ss.ss_ScanTupleSlot; // FIXME should be ss.ps.ps_ResultTupleSlot + Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); /* Set all attributes of the result tuple to NULL. So, we return NULL if no data is processed * by our implementation. In addition, the call marks the slot as being used (i.e., no * ExecStoreVirtualTuple call is required). */ - ExecStoreAllNullTuple(decompressed_scan_slot); - Assert(!TupIsNull(decompressed_scan_slot)); + ExecStoreAllNullTuple(aggregated_slot); + Assert(!TupIsNull(aggregated_slot)); int64 result_sum = 0; @@ -463,7 +473,7 @@ perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref) while (true) { TupleTableSlot *compressed_slot = - ExecProcNode(linitial(chunk_state->csstate.custom_ps)); + ExecProcNode(linitial(decompress_state->csstate.custom_ps)); if (TupIsNull(compressed_slot)) { @@ -494,7 +504,7 @@ perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref) Assert(amount > 0); /* We have at least one value */ - decompressed_scan_slot->tts_isnull[0] = false; + aggregated_slot->tts_isnull[0] = false; /* Multiply the number of tuples with the actual value */ if (unlikely(pg_mul_s64_overflow(intvalue, amount, &batch_sum))) @@ -522,7 +532,7 @@ perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref) while (true) { TupleTableSlot *compressed_slot = - ExecProcNode(linitial(chunk_state->csstate.custom_ps)); + ExecProcNode(linitial(decompress_state->csstate.custom_ps)); if (TupIsNull(compressed_slot)) { /* All compressed batches are processed. */ @@ -541,7 +551,7 @@ perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref) Ensure(isnull == false, "got unexpected NULL attribute value from compressed batch"); /* We have at least one value */ - decompressed_scan_slot->tts_isnull[0] = false; + aggregated_slot->tts_isnull[0] = false; CompressedDataHeader *header = (CompressedDataHeader *) detoaster_detoast_attr((struct varlena *) DatumGetPointer( @@ -605,9 +615,9 @@ perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref) /* Use Int64GetDatum to store the result since a 64-bit value is not pass-by-value on 32-bit * systems */ - decompressed_scan_slot->tts_values[0] = Int64GetDatum(result_sum); + aggregated_slot->tts_values[0] = Int64GetDatum(result_sum); - return decompressed_scan_slot; + return aggregated_slot; } /* @@ -618,27 +628,42 @@ perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref) * directly before it is converted into row-based tuples. */ static TupleTableSlot * -perform_vectorized_aggregation(DecompressChunkState *chunk_state) +decompress_chunk_exec_vector_agg_impl( + CustomScanState *vector_agg_state, + /* + * For some reason, the output targetlist of DecompressChunk with vectorized + * aggregation is INDEX_VAR into its scan targetlist, which in turn contains the + * Aggref we need. So here we have to specify the aggregated targetlist + * separately, because we can't get it in a uniform way from both Agg and DC + * nodes. + */ + List *aggregated_tlist, DecompressChunkState *decompress_state) { - BatchQueue *bq = chunk_state->batch_queue; + BatchQueue *bq = decompress_state->batch_queue; - Assert(list_length(chunk_state->custom_scan_tlist) == 1); + Assert(list_length(aggregated_tlist) == 1); // FIXME /* Checked by planner */ Assert(ts_guc_enable_vectorized_aggregation); Assert(ts_guc_enable_bulk_decompression); - /* When using vectorized aggregates, only one result tuple is produced. So, if we have + /* + * When using vectorized aggregates, only one result tuple is produced. So, if we have * already initialized a batch state, the aggregation was already performed. */ if (batch_array_has_active_batches(&bq->batch_array)) { - ExecClearTuple(chunk_state->csstate.ss.ss_ScanTupleSlot); - return chunk_state->csstate.ss.ss_ScanTupleSlot; + ExecClearTuple(vector_agg_state->ss.ss_ScanTupleSlot); + return vector_agg_state->ss.ss_ScanTupleSlot; } /* Determine which kind of vectorized aggregation we should perform */ - TargetEntry *tlentry = (TargetEntry *) linitial(chunk_state->custom_scan_tlist); + // fprintf(stderr, "output tlist:\n"); + // my_print(decompress_state->csstate.ss.ps.plan->targetlist); + // fprintf(stderr, "custom scan tlist:\n"); + // my_print(decompress_state->custom_scan_tlist); + + TargetEntry *tlentry = (TargetEntry *) linitial(aggregated_tlist); Assert(IsA(tlentry->expr, Aggref)); Aggref *aggref = castNode(Aggref, tlentry->expr); @@ -648,7 +673,7 @@ perform_vectorized_aggregation(DecompressChunkState *chunk_state) switch (aggref->aggfnoid) { case F_SUM_INT4: - return perform_vectorized_sum_int4(chunk_state, aggref); + return perform_vectorized_sum_int4(vector_agg_state, aggref, decompress_state); default: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -658,6 +683,20 @@ perform_vectorized_aggregation(DecompressChunkState *chunk_state) } } +static TupleTableSlot * +decompress_chunk_exec_vector_agg(CustomScanState *node) +{ + DecompressChunkState *chunk_state = (DecompressChunkState *) node; + Assert(!chunk_state->decompress_context.batch_sorted_merge); + Assert(chunk_state->perform_vectorized_aggregation); + + CustomScanState *vector_agg_state = node; + + return decompress_chunk_exec_vector_agg_impl(vector_agg_state, + chunk_state->custom_scan_tlist, + chunk_state); +} + /* * The exec function for the DecompressChunk node. It takes the explicit queue * functions pointer as an optimization, to allow these functions to be @@ -672,10 +711,7 @@ decompress_chunk_exec_impl(DecompressChunkState *chunk_state, const BatchQueueFu Assert(bq->funcs == bqfuncs); - if (chunk_state->perform_vectorized_aggregation) - { - return perform_vectorized_aggregation(chunk_state); - } + Assert(!chunk_state->perform_vectorized_aggregation); bqfuncs->pop(bq, dcontext); diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index 3dce92da4c3..b2bbf63062f 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -33,6 +33,7 @@ #include "nodes/decompress_chunk/exec.h" #include "nodes/decompress_chunk/planner.h" #include "nodes/chunk_append/transform.h" +#include "nodes/vector_agg/vector_agg.h" #include "vector_predicates.h" #include "ts_catalog/array_utils.h" @@ -722,18 +723,6 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat /* output target list */ decompress_plan->scan.plan.targetlist = decompressed_tlist; - /* input target list */ - decompress_plan->custom_scan_tlist = NIL; - - /* Make PostgreSQL aware that we emit partials. In apply_vectorized_agg_optimization the - * pathtarget of the node is changed; the decompress chunk node now emits prtials directly. - * - * We have to set a custom_scan_tlist to make sure tlist_matches_tupdesc is true to prevent the - * call of ExecAssignProjectionInfo in ExecConditionalAssignProjectionInfo. Otherwise, - * PostgreSQL will error out since scan nodes are not intended to emit partial aggregates. - */ - if (dcpath->perform_vectorized_aggregation) - decompress_plan->custom_scan_tlist = decompressed_tlist; if (IsA(compressed_path, IndexPath)) { @@ -1084,5 +1073,26 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat dcpath->aggregated_column_type, sort_options); + /* input target list */ + decompress_plan->custom_scan_tlist = NIL; + + /* Make PostgreSQL aware that we emit partials. In apply_vectorized_agg_optimization the + * pathtarget of the node is changed; the decompress chunk node now emits prtials directly. + * + * We have to set a custom_scan_tlist to make sure tlist_matches_tupdesc is true to prevent the + * call of ExecAssignProjectionInfo in ExecConditionalAssignProjectionInfo. Otherwise, + * PostgreSQL will error out since scan nodes are not intended to emit partial aggregates. + */ + if (dcpath->perform_vectorized_aggregation) + { + decompress_plan->custom_scan_tlist = decompressed_tlist; + decompress_plan->scan.plan.targetlist = decompressed_tlist; + } + + // if (dcpath->perform_vectorized_aggregation) + // { + // return vector_agg_plan_create(&decompress_plan->scan.plan); + // } + return &decompress_plan->scan.plan; } diff --git a/tsl/src/nodes/vector_agg/CMakeLists.txt b/tsl/src/nodes/vector_agg/CMakeLists.txt new file mode 100644 index 00000000000..55da7eed64d --- /dev/null +++ b/tsl/src/nodes/vector_agg/CMakeLists.txt @@ -0,0 +1,2 @@ +set(SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/vector_agg.c) +target_sources(${TSL_LIBRARY_NAME} PRIVATE ${SOURCES}) diff --git a/tsl/src/nodes/vector_agg/vector_agg.c b/tsl/src/nodes/vector_agg/vector_agg.c new file mode 100644 index 00000000000..16f033d0316 --- /dev/null +++ b/tsl/src/nodes/vector_agg/vector_agg.c @@ -0,0 +1,133 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#include + +#include +#include +#include +#include +#include + +#include "vector_agg.h" + +#include "nodes/decompress_chunk/compressed_batch.h" +#include "nodes/decompress_chunk/exec.h" + +static void +vector_agg_begin(CustomScanState *node, EState *estate, int eflags) +{ + CustomScan *cscan = castNode(CustomScan, node->ss.ps.plan); + node->custom_ps = + lappend(node->custom_ps, ExecInitNode(linitial(cscan->custom_plans), estate, eflags)); +} + +static void +vector_agg_end(CustomScanState *node) +{ + ExecEndNode(linitial(node->custom_ps)); +} + +static void +vector_agg_rescan(CustomScanState *node) +{ + if (node->ss.ps.chgParam != NULL) + UpdateChangedParamSet(linitial(node->custom_ps), node->ss.ps.chgParam); + + ExecReScan(linitial(node->custom_ps)); +} + +static TupleTableSlot * +vector_agg_exec(CustomScanState *node) +{ + return ExecProcNode(linitial(node->custom_ps)); + // DecompressChunkState *ds = (DecompressChunkState *) linitial(node->custom_ps); + // DecompressContext *dcontext = &ds->decompress_context; + // TupleTableSlot *inner_slot = ExecProcNode(linitial(node->custom_ps)); + // if (inner_slot == NULL) + // { + // return NULL; + // } + // + // DecompressBatchState *batch_state = (DecompressBatchState *) inner_slot; +} + +static void +vector_agg_explain(CustomScanState *node, List *ancestors, ExplainState *es) +{ + /* noop? */ +} + +static struct CustomExecMethods exec_methods = { + .CustomName = "VectorAgg", + .BeginCustomScan = vector_agg_begin, + .ExecCustomScan = vector_agg_exec, + .EndCustomScan = vector_agg_end, + .ReScanCustomScan = vector_agg_rescan, + .ExplainCustomScan = vector_agg_explain, +}; + +static struct CustomScanMethods scan_methods = { .CustomName = "VectorAgg", + .CreateCustomScanState = vector_agg_state_create }; + +static inline List * +CustomBuildTargetList(List *tlist, Index varNo) +{ + List *result_tlist = NIL; + ListCell *lc; + + foreach (lc, tlist) + { + TargetEntry *tle = (TargetEntry *) lfirst(lc); + + Var *var = makeVar(varNo, + tle->resno, + exprType((Node *) tle->expr), + exprTypmod((Node *) tle->expr), + exprCollation((Node *) tle->expr), + 0); + + TargetEntry *newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, tle->resjunk); + + result_tlist = lappend(result_tlist, newtle); + } + + return result_tlist; +} + +Plan * +vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) +{ + CustomScan *custom = (CustomScan *) makeNode(CustomScan); + custom->custom_plans = list_make1(decompress_chunk); + custom->methods = &scan_methods; + // custom->scan.plan.targetlist = CustomBuildTargetList(agg->plan.targetlist, INDEX_VAR); + custom->scan.plan.targetlist = agg->plan.targetlist; + // fprintf(stderr, "source agg tagetlist:\n"); + // my_print(agg->plan.targetlist); + // fprintf(stderr, "build targetlist:\n"); + // my_print(custom->scan.plan.targetlist); + // custom->custom_scan_tlist = CustomBuildTargetList(decompress_chunk->scan.plan.targetlist, + // INDEX_VAR); + custom->custom_scan_tlist = decompress_chunk->scan.plan.targetlist; + + // custom->scan.plan.lefttree = agg->plan.lefttree; + + fprintf(stderr, "created:\n"); + my_print(custom); + + (void) CustomBuildTargetList; + + return (Plan *) custom; +} + +Node * +vector_agg_state_create(CustomScan *cscan) +{ + CustomScanState *state = makeNode(CustomScanState); + state->methods = &exec_methods; + return (Node *) state; +} diff --git a/tsl/src/nodes/vector_agg/vector_agg.h b/tsl/src/nodes/vector_agg/vector_agg.h new file mode 100644 index 00000000000..744de3f0d5b --- /dev/null +++ b/tsl/src/nodes/vector_agg/vector_agg.h @@ -0,0 +1,24 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#include + +#include +#include + +typedef struct VectorAggState +{ + CustomScanState custom; +} VectorAggState; + +typedef struct VectorAggPlan +{ + CustomScan custom; +} VectorAggPlan; + +extern Plan *vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk); + +extern Node *vector_agg_state_create(CustomScan *cscan); diff --git a/tsl/src/partialize_agg.c b/tsl/src/partialize_agg.c index 8644555e32a..7a35e652e8e 100644 --- a/tsl/src/partialize_agg.c +++ b/tsl/src/partialize_agg.c @@ -28,6 +28,8 @@ #include "utils.h" #include "debug_assert.h" +#include "nodes/vector_agg/vector_agg.h" + /* * Are we able to optimize the path by applying vectorized aggregation? */ @@ -118,3 +120,86 @@ apply_vectorized_agg_optimization(PlannerInfo *root, AggPath *aggregation_path, /* PostgreSQL should handle the aggregation. Regular agg node on top is required. */ return false; } + +static Plan * +insert_vector_agg_node(Plan *plan) +{ + if (plan->lefttree) + { + plan->lefttree = insert_vector_agg_node(plan->lefttree); + } + + if (plan->righttree) + { + plan->righttree = insert_vector_agg_node(plan->righttree); + } + + if (IsA(plan, Append)) + { + List *plans = castNode(Append, plan)->appendplans; + ListCell *lc; + foreach (lc, plans) + { + lfirst(lc) = insert_vector_agg_node(lfirst(lc)); + } + } + + if (plan->type != T_Agg) + { + return plan; + } + + fprintf(stderr, "found agg!\n"); + + Agg *agg = castNode(Agg, plan); + + if (agg->aggsplit != AGGSPLIT_INITIAL_SERIAL) + { + fprintf(stderr, "wrong split %d\n", agg->aggsplit); + return plan; + } + + if (agg->plan.lefttree == NULL) + { + fprintf(stderr, "no leftnode?\n"); + return plan; + } + + if (!IsA(agg->plan.lefttree, CustomScan)) + { + fprintf(stderr, "not custom\n"); + // my_print(agg->plan.lefttree); + return plan; + } + + CustomScan *custom = castNode(CustomScan, agg->plan.lefttree); + if (strcmp(custom->methods->CustomName, "DecompressChunk") != 0) + { + fprintf(stderr, "not decompress chunk\n"); + return plan; + } + + bool perform_vectorized_aggregation = list_nth_int(linitial(custom->custom_private), 5); + if (!perform_vectorized_aggregation) + { + fprintf(stderr, "no vectorized aggregation\n"); + return plan; + } + + fprintf(stderr, "found!!!\n"); + // my_print(plan); + + return vector_agg_plan_create(agg, custom); +} + +void +tsl_postprocess_plan(PlannedStmt *stmt) +{ + // mybt(); + // my_print(stmt); + + if (true) + { + stmt->planTree = insert_vector_agg_node(stmt->planTree); + } +} diff --git a/tsl/src/partialize_agg.h b/tsl/src/partialize_agg.h index b7fe1ba1d02..925e614ccf1 100644 --- a/tsl/src/partialize_agg.h +++ b/tsl/src/partialize_agg.h @@ -7,3 +7,5 @@ extern bool apply_vectorized_agg_optimization(PlannerInfo *root, AggPath *aggregation_path, Path *subpath); + +extern void tsl_postprocess_plan(PlannedStmt *stmt); From ef4cd87f5b274b8cf6312823267901f5aa7d7b6c Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Mon, 25 Mar 2024 18:40:05 +0100 Subject: [PATCH 02/81] fixes --- tsl/src/init.c | 2 ++ tsl/src/nodes/decompress_chunk/exec.c | 5 ++-- tsl/src/nodes/decompress_chunk/exec.h | 4 +++ tsl/src/nodes/vector_agg/vector_agg.c | 37 +++++++++++++++------------ tsl/src/nodes/vector_agg/vector_agg.h | 2 ++ tsl/src/partialize_agg.c | 2 ++ 6 files changed, 32 insertions(+), 20 deletions(-) diff --git a/tsl/src/init.c b/tsl/src/init.c index ce1035b0d2d..49ff5fced73 100644 --- a/tsl/src/init.c +++ b/tsl/src/init.c @@ -39,6 +39,7 @@ #include "nodes/decompress_chunk/planner.h" #include "nodes/skip_scan/skip_scan.h" #include "nodes/gapfill/gapfill_functions.h" +#include "nodes/vector_agg/vector_agg.h" #include "partialize_agg.h" #include "partialize_finalize.h" #include "planner.h" @@ -195,6 +196,7 @@ ts_module_init(PG_FUNCTION_ARGS) _continuous_aggs_cache_inval_init(); _decompress_chunk_init(); _skip_scan_init(); + _vector_agg_init(); /* Register a cleanup function to be called when the backend exits */ if (register_proc_exit) on_proc_exit(ts_module_cleanup_on_pg_exit, 0); diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index 8f7fd7a9148..16d168df6ff 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -452,8 +452,7 @@ perform_vectorized_sum_int4(CustomScanState *vector_agg_state, Aggref *aggref, Assert(dcontext->bulk_decompression_context != NULL); /* Get a reference the the output TupleTableSlot */ - TupleTableSlot *aggregated_slot = - decompress_state->csstate.ss.ss_ScanTupleSlot; // FIXME should be ss.ps.ps_ResultTupleSlot + TupleTableSlot *aggregated_slot = vector_agg_state->ss.ps.ps_ResultTupleSlot; Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); /* Set all attributes of the result tuple to NULL. So, we return NULL if no data is processed @@ -627,7 +626,7 @@ perform_vectorized_sum_int4(CustomScanState *vector_agg_state, Aggref *aggref, * Executing the aggregation directly in this node makes it possible to use the columnar data * directly before it is converted into row-based tuples. */ -static TupleTableSlot * +TupleTableSlot * decompress_chunk_exec_vector_agg_impl( CustomScanState *vector_agg_state, /* diff --git a/tsl/src/nodes/decompress_chunk/exec.h b/tsl/src/nodes/decompress_chunk/exec.h index b3a43640e50..9a7e0780db7 100644 --- a/tsl/src/nodes/decompress_chunk/exec.h +++ b/tsl/src/nodes/decompress_chunk/exec.h @@ -49,3 +49,7 @@ typedef struct DecompressChunkState } DecompressChunkState; extern Node *decompress_chunk_state_create(CustomScan *cscan); + +TupleTableSlot *decompress_chunk_exec_vector_agg_impl(CustomScanState *vector_agg_state, + List *aggregated_tlist, + DecompressChunkState *decompress_state); diff --git a/tsl/src/nodes/vector_agg/vector_agg.c b/tsl/src/nodes/vector_agg/vector_agg.c index 16f033d0316..24967a866d2 100644 --- a/tsl/src/nodes/vector_agg/vector_agg.c +++ b/tsl/src/nodes/vector_agg/vector_agg.c @@ -43,16 +43,12 @@ vector_agg_rescan(CustomScanState *node) static TupleTableSlot * vector_agg_exec(CustomScanState *node) { - return ExecProcNode(linitial(node->custom_ps)); - // DecompressChunkState *ds = (DecompressChunkState *) linitial(node->custom_ps); - // DecompressContext *dcontext = &ds->decompress_context; - // TupleTableSlot *inner_slot = ExecProcNode(linitial(node->custom_ps)); - // if (inner_slot == NULL) - // { - // return NULL; - // } - // - // DecompressBatchState *batch_state = (DecompressBatchState *) inner_slot; + // return ExecProcNode(linitial(node->custom_ps)); + DecompressChunkState *ds = (DecompressChunkState *) linitial(node->custom_ps); + return decompress_chunk_exec_vector_agg_impl(node, + castNode(CustomScan, node->ss.ps.plan) + ->custom_scan_tlist, + ds); } static void @@ -104,20 +100,21 @@ vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) CustomScan *custom = (CustomScan *) makeNode(CustomScan); custom->custom_plans = list_make1(decompress_chunk); custom->methods = &scan_methods; - // custom->scan.plan.targetlist = CustomBuildTargetList(agg->plan.targetlist, INDEX_VAR); - custom->scan.plan.targetlist = agg->plan.targetlist; + custom->scan.plan.targetlist = CustomBuildTargetList(agg->plan.targetlist, INDEX_VAR); + // custom->scan.plan.targetlist = agg->plan.targetlist; // fprintf(stderr, "source agg tagetlist:\n"); // my_print(agg->plan.targetlist); // fprintf(stderr, "build targetlist:\n"); // my_print(custom->scan.plan.targetlist); - // custom->custom_scan_tlist = CustomBuildTargetList(decompress_chunk->scan.plan.targetlist, - // INDEX_VAR); - custom->custom_scan_tlist = decompress_chunk->scan.plan.targetlist; + // custom->custom_scan_tlist = CustomBuildTargetList(decompress_chunk->scan.plan.targetlist, + // INDEX_VAR); + // custom->custom_scan_tlist = decompress_chunk->scan.plan.targetlist; + custom->custom_scan_tlist = agg->plan.targetlist; // custom->scan.plan.lefttree = agg->plan.lefttree; - fprintf(stderr, "created:\n"); - my_print(custom); +// fprintf(stderr, "created:\n"); +// my_print(custom); (void) CustomBuildTargetList; @@ -131,3 +128,9 @@ vector_agg_state_create(CustomScan *cscan) state->methods = &exec_methods; return (Node *) state; } + +void +_vector_agg_init(void) +{ + TryRegisterCustomScanMethods(&scan_methods); +} diff --git a/tsl/src/nodes/vector_agg/vector_agg.h b/tsl/src/nodes/vector_agg/vector_agg.h index 744de3f0d5b..2a53a7e6468 100644 --- a/tsl/src/nodes/vector_agg/vector_agg.h +++ b/tsl/src/nodes/vector_agg/vector_agg.h @@ -22,3 +22,5 @@ typedef struct VectorAggPlan extern Plan *vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk); extern Node *vector_agg_state_create(CustomScan *cscan); + +extern void _vector_agg_init(void); diff --git a/tsl/src/partialize_agg.c b/tsl/src/partialize_agg.c index 7a35e652e8e..facbf313c52 100644 --- a/tsl/src/partialize_agg.c +++ b/tsl/src/partialize_agg.c @@ -110,6 +110,7 @@ apply_vectorized_agg_optimization(PlannerInfo *root, AggPath *aggregation_path, /* Change the output of the path and let the decompress chunk node emit partial aggregates * directly */ decompress_path->perform_vectorized_aggregation = true; + decompress_path->custom_path.path.pathtarget = aggregation_path->path.pathtarget; /* The decompress chunk node can perform the aggregation directly. No need for a dedicated @@ -188,6 +189,7 @@ insert_vector_agg_node(Plan *plan) fprintf(stderr, "found!!!\n"); // my_print(plan); + // mybt(); return vector_agg_plan_create(agg, custom); } From b27d2b148bd05cfc20412bd9bdcec60cc046d260 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 27 Mar 2024 20:07:56 +0100 Subject: [PATCH 03/81] something works --- tsl/src/nodes/decompress_chunk/planner.c | 13 ++++--- tsl/src/nodes/vector_agg/vector_agg.c | 45 +++++++++++++++++++++--- tsl/src/partialize_agg.c | 14 +++++--- 3 files changed, 57 insertions(+), 15 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index b2bbf63062f..f6a0e0f19c1 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -1083,11 +1083,14 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat * call of ExecAssignProjectionInfo in ExecConditionalAssignProjectionInfo. Otherwise, * PostgreSQL will error out since scan nodes are not intended to emit partial aggregates. */ - if (dcpath->perform_vectorized_aggregation) - { - decompress_plan->custom_scan_tlist = decompressed_tlist; - decompress_plan->scan.plan.targetlist = decompressed_tlist; - } + // if (dcpath->perform_vectorized_aggregation) + // { + // decompress_plan->custom_scan_tlist = decompressed_tlist; + // decompress_plan->scan.plan.targetlist = decompressed_tlist; + + // fprintf(stderr, "when assigned, the decompressed tlist was:\n"); + // my_print(decompressed_tlist); + // } // if (dcpath->perform_vectorized_aggregation) // { diff --git a/tsl/src/nodes/vector_agg/vector_agg.c b/tsl/src/nodes/vector_agg/vector_agg.c index 24967a866d2..acadc9401cb 100644 --- a/tsl/src/nodes/vector_agg/vector_agg.c +++ b/tsl/src/nodes/vector_agg/vector_agg.c @@ -94,6 +94,38 @@ CustomBuildTargetList(List *tlist, Index varNo) return result_tlist; } +static Node * +replace_special_vars_mutator(Node *node, void *context) +{ + if (node == NULL) + { + return NULL; + } + + if (!IsA(node, Var)) + { + return expression_tree_mutator(node, replace_special_vars_mutator, context); + } + + Var *var = castNode(Var, node); + if (var->varno != OUTER_VAR) + { + return node; + } + + var = copyObject(var); + var->varno = DatumGetInt32(PointerGetDatum(context)); + return (Node *) var; +} + +static List * +replace_special_vars(List *input, int target_varno) +{ + return castNode(List, + replace_special_vars_mutator((Node *) input, + DatumGetPointer(Int32GetDatum(target_varno)))); +} + Plan * vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) { @@ -101,15 +133,18 @@ vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) custom->custom_plans = list_make1(decompress_chunk); custom->methods = &scan_methods; custom->scan.plan.targetlist = CustomBuildTargetList(agg->plan.targetlist, INDEX_VAR); - // custom->scan.plan.targetlist = agg->plan.targetlist; + // custom->scan.plan.targetlist = replace_special_vars(agg->plan.targetlist); + // custom->scan.plan.targetlist = agg->plan.targetlist; // fprintf(stderr, "source agg tagetlist:\n"); // my_print(agg->plan.targetlist); // fprintf(stderr, "build targetlist:\n"); // my_print(custom->scan.plan.targetlist); - // custom->custom_scan_tlist = CustomBuildTargetList(decompress_chunk->scan.plan.targetlist, - // INDEX_VAR); - // custom->custom_scan_tlist = decompress_chunk->scan.plan.targetlist; - custom->custom_scan_tlist = agg->plan.targetlist; + // custom->custom_scan_tlist = CustomBuildTargetList(decompress_chunk->scan.plan.targetlist, + // INDEX_VAR); + // custom->custom_scan_tlist = decompress_chunk->scan.plan.targetlist; + // custom->custom_scan_tlist = custom->scan.plan.targetlist; + custom->custom_scan_tlist = + replace_special_vars(agg->plan.targetlist, decompress_chunk->scan.scanrelid); // custom->scan.plan.lefttree = agg->plan.lefttree; diff --git a/tsl/src/partialize_agg.c b/tsl/src/partialize_agg.c index facbf313c52..be74c16b384 100644 --- a/tsl/src/partialize_agg.c +++ b/tsl/src/partialize_agg.c @@ -111,11 +111,12 @@ apply_vectorized_agg_optimization(PlannerInfo *root, AggPath *aggregation_path, * directly */ decompress_path->perform_vectorized_aggregation = true; - decompress_path->custom_path.path.pathtarget = aggregation_path->path.pathtarget; - - /* The decompress chunk node can perform the aggregation directly. No need for a dedicated - * agg node on top. */ - return true; + // decompress_path->custom_path.path.pathtarget = aggregation_path->path.pathtarget; + // + // /* The decompress chunk node can perform the aggregation directly. No need for a + //dedicated + // * agg node on top. */ + // return true; } /* PostgreSQL should handle the aggregation. Regular agg node on top is required. */ @@ -204,4 +205,7 @@ tsl_postprocess_plan(PlannedStmt *stmt) { stmt->planTree = insert_vector_agg_node(stmt->planTree); } + + // fprintf(stderr, "postprocessed:\n"); + // my_print(stmt->planTree); } From 13ba1730de580a786b29e15ba131c2390dd0f04f Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 28 Mar 2024 14:32:53 +0100 Subject: [PATCH 04/81] full switch --- tsl/src/nodes/decompress_chunk/exec.c | 28 +- tsl/src/nodes/decompress_chunk/exec.h | 1 - tsl/src/nodes/decompress_chunk/planner.c | 14 +- tsl/src/nodes/vector_agg/vector_agg.c | 9 +- tsl/src/partialize_agg.c | 186 +- tsl/test/expected/vectorized_aggregation.out | 2205 ++++++++++-------- 6 files changed, 1388 insertions(+), 1055 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index 16d168df6ff..ec03e315c85 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -627,20 +627,22 @@ perform_vectorized_sum_int4(CustomScanState *vector_agg_state, Aggref *aggref, * directly before it is converted into row-based tuples. */ TupleTableSlot * -decompress_chunk_exec_vector_agg_impl( - CustomScanState *vector_agg_state, - /* - * For some reason, the output targetlist of DecompressChunk with vectorized - * aggregation is INDEX_VAR into its scan targetlist, which in turn contains the - * Aggref we need. So here we have to specify the aggregated targetlist - * separately, because we can't get it in a uniform way from both Agg and DC - * nodes. - */ - List *aggregated_tlist, DecompressChunkState *decompress_state) +decompress_chunk_exec_vector_agg_impl(CustomScanState *vector_agg_state, + DecompressChunkState *decompress_state) { BatchQueue *bq = decompress_state->batch_queue; - Assert(list_length(aggregated_tlist) == 1); // FIXME + /* + * The aggregated targetlist with Aggrefs is in the custom scan targetlist + * of the custom scan node that is performing the vectorized aggregation. + * We do this to avoid projections at this node, because the postgres + * projection functions complain when they see an Aggref in a custom + * node output targetlist. + * The output targetlist, in turn, consists of just the INDEX_VAR references + * into the custom_scan_tlist. + */ + List *aggregated_tlist = castNode(CustomScan, vector_agg_state->ss.ps.plan)->custom_scan_tlist; + Assert(list_length(aggregated_tlist) == 1); /* Checked by planner */ Assert(ts_guc_enable_vectorized_aggregation); @@ -691,9 +693,7 @@ decompress_chunk_exec_vector_agg(CustomScanState *node) CustomScanState *vector_agg_state = node; - return decompress_chunk_exec_vector_agg_impl(vector_agg_state, - chunk_state->custom_scan_tlist, - chunk_state); + return decompress_chunk_exec_vector_agg_impl(vector_agg_state, chunk_state); } /* diff --git a/tsl/src/nodes/decompress_chunk/exec.h b/tsl/src/nodes/decompress_chunk/exec.h index 9a7e0780db7..6da4d9e6637 100644 --- a/tsl/src/nodes/decompress_chunk/exec.h +++ b/tsl/src/nodes/decompress_chunk/exec.h @@ -51,5 +51,4 @@ typedef struct DecompressChunkState extern Node *decompress_chunk_state_create(CustomScan *cscan); TupleTableSlot *decompress_chunk_exec_vector_agg_impl(CustomScanState *vector_agg_state, - List *aggregated_tlist, DecompressChunkState *decompress_state); diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index f6a0e0f19c1..1ecee8755cd 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -1083,14 +1083,14 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat * call of ExecAssignProjectionInfo in ExecConditionalAssignProjectionInfo. Otherwise, * PostgreSQL will error out since scan nodes are not intended to emit partial aggregates. */ - // if (dcpath->perform_vectorized_aggregation) - // { - // decompress_plan->custom_scan_tlist = decompressed_tlist; - // decompress_plan->scan.plan.targetlist = decompressed_tlist; + if (dcpath->perform_vectorized_aggregation) + { + decompress_plan->custom_scan_tlist = decompressed_tlist; + decompress_plan->scan.plan.targetlist = decompressed_tlist; - // fprintf(stderr, "when assigned, the decompressed tlist was:\n"); - // my_print(decompressed_tlist); - // } + // fprintf(stderr, "when assigned, the decompressed tlist was:\n"); + // my_print(decompressed_tlist); + } // if (dcpath->perform_vectorized_aggregation) // { diff --git a/tsl/src/nodes/vector_agg/vector_agg.c b/tsl/src/nodes/vector_agg/vector_agg.c index acadc9401cb..b6f04705d7f 100644 --- a/tsl/src/nodes/vector_agg/vector_agg.c +++ b/tsl/src/nodes/vector_agg/vector_agg.c @@ -45,10 +45,7 @@ vector_agg_exec(CustomScanState *node) { // return ExecProcNode(linitial(node->custom_ps)); DecompressChunkState *ds = (DecompressChunkState *) linitial(node->custom_ps); - return decompress_chunk_exec_vector_agg_impl(node, - castNode(CustomScan, node->ss.ps.plan) - ->custom_scan_tlist, - ds); + return decompress_chunk_exec_vector_agg_impl(node, ds); } static void @@ -148,8 +145,8 @@ vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) // custom->scan.plan.lefttree = agg->plan.lefttree; -// fprintf(stderr, "created:\n"); -// my_print(custom); + // fprintf(stderr, "created:\n"); + // my_print(custom); (void) CustomBuildTargetList; diff --git a/tsl/src/partialize_agg.c b/tsl/src/partialize_agg.c index be74c16b384..deab6d1555c 100644 --- a/tsl/src/partialize_agg.c +++ b/tsl/src/partialize_agg.c @@ -96,31 +96,33 @@ is_vectorizable_agg_path(PlannerInfo *root, AggPath *agg_path, Path *path) bool apply_vectorized_agg_optimization(PlannerInfo *root, AggPath *aggregation_path, Path *path) { - if (!ts_guc_enable_vectorized_aggregation || !ts_guc_enable_bulk_decompression) - return false; - - Assert(path != NULL); - Assert(aggregation_path->aggsplit == AGGSPLIT_INITIAL_SERIAL); - - if (is_vectorizable_agg_path(root, aggregation_path, path)) - { - Assert(ts_is_decompress_chunk_path(path)); - DecompressChunkPath *decompress_path = (DecompressChunkPath *) castNode(CustomPath, path); - - /* Change the output of the path and let the decompress chunk node emit partial aggregates - * directly */ - decompress_path->perform_vectorized_aggregation = true; - - // decompress_path->custom_path.path.pathtarget = aggregation_path->path.pathtarget; - // - // /* The decompress chunk node can perform the aggregation directly. No need for a - //dedicated - // * agg node on top. */ - // return true; - } - - /* PostgreSQL should handle the aggregation. Regular agg node on top is required. */ return false; +// +// if (!ts_guc_enable_vectorized_aggregation || !ts_guc_enable_bulk_decompression) +// return false; +// +// Assert(path != NULL); +// Assert(aggregation_path->aggsplit == AGGSPLIT_INITIAL_SERIAL); +// +// if (is_vectorizable_agg_path(root, aggregation_path, path)) +// { +// Assert(ts_is_decompress_chunk_path(path)); +// DecompressChunkPath *decompress_path = (DecompressChunkPath *) castNode(CustomPath, path); +// +// /* Change the output of the path and let the decompress chunk node emit partial aggregates +// * directly */ +// decompress_path->perform_vectorized_aggregation = true; +// +// decompress_path->custom_path.path.pathtarget = aggregation_path->path.pathtarget; +// +// /* The decompress chunk node can perform the aggregation directly. No need for a +//dedicated +// * agg node on top. */ +// return true; +// } +// +// /* PostgreSQL should handle the aggregation. Regular agg node on top is required. */ +// return false; } static Plan * @@ -181,13 +183,141 @@ insert_vector_agg_node(Plan *plan) return plan; } - bool perform_vectorized_aggregation = list_nth_int(linitial(custom->custom_private), 5); - if (!perform_vectorized_aggregation) + if (custom->scan.plan.qual != NIL) + { + /* Can't do vectorized aggregation if we have Postgres quals. */ + return plan; + } + + if (linitial(custom->custom_exprs) != NIL) + { + /* Even the vectorized filters are not supported at the moment. */ + return plan; + } + + if (agg->numCols != 0) + { + /* No GROUP BY support for now. */ + return plan; + } + + if (agg->groupingSets != NIL) + { + /* No GROUPING SETS support. */ + return plan; + } + + if (agg->plan.qual != NIL) + { + /* + * No HAVING support. Probably we can't have it in this node in any case, + * because we only replace the partial aggregation nodes which can't + * chech HAVING. + */ + return plan; + } + + if (list_length(agg->plan.targetlist) != 1) + { + /* We currently handle only one agg function per node. */ + return plan; + } + + Node *expr_node = (Node *) castNode(TargetEntry, linitial(agg->plan.targetlist))->expr; + if (!IsA(expr_node, Aggref)) + { + return plan; + } + + Aggref *aggref = castNode(Aggref, expr_node); + + if (aggref->aggfilter != NULL) + { + /* Filter clause on aggregate is not supported. */ + return plan; + } + + if (aggref->aggfnoid != F_SUM_INT4) { - fprintf(stderr, "no vectorized aggregation\n"); + /* We only support sum(int4) at the moment. */ return plan; } + TargetEntry *argument = castNode(TargetEntry, linitial(aggref->args)); + if (!IsA(argument->expr, Var)) + { + /* Can aggregate only a bare decompressed column, not an expression. */ + return plan; + } + Var *aggregated_var = castNode(Var, argument->expr); + // my_print(aggregated_var); + + /* + * Check if this particular column is a segmentby or has bulk decompression + * enabled. This hook is called after set_plan_refs, and at this stage the + * aggregation node uses OUTER_VAR references into the child scan targetlist, + * so first we have to translate this. + */ + Assert(aggregated_var->varno == OUTER_VAR); + TargetEntry *decompressed_target_entry = + list_nth(custom->scan.plan.targetlist, AttrNumberGetAttrOffset(aggregated_var->varattno)); + // my_print(decompressed_target_entry); + + if (!IsA(decompressed_target_entry->expr, Var)) + { + /* + * Can only aggregate the plain Vars. Not sure if this is redundant with + * the similar check above. + */ + return plan; + } + Var *decompressed_var = castNode(Var, decompressed_target_entry->expr); + // my_print(decompressed_var); + + /* + * Now, we have to translate the decompressed varno into the compressed + * column index, to check if the column supports bulk decompression. + */ + List *decompression_map = list_nth(custom->custom_private, 1); + List *is_segmentby_column = list_nth(custom->custom_private, 2); + List *bulk_decompression_column = list_nth(custom->custom_private, 3); + int compressed_column_index = 0; + for (; compressed_column_index < list_length(decompression_map); compressed_column_index++) + { + if (list_nth_int(decompression_map, compressed_column_index) == decompressed_var->varattno) + { + break; + } + } + Ensure(compressed_column_index < list_length(decompression_map), "compressed column not found"); + Assert(list_length(decompression_map) == list_length(bulk_decompression_column)); + const bool bulk_decompression_enabled_for_column = + list_nth_int(bulk_decompression_column, compressed_column_index); + + /* Bulk decompression can also be disabled globally. */ + List *settings = linitial(custom->custom_private); + const bool bulk_decompression_enabled_globally = list_nth_int(settings, 4); + + /* + * We support vectorized aggregation either for segmentby columns or for + * columns wiht bulk decompression enabled. + */ + if (!list_nth_int(is_segmentby_column, compressed_column_index) && + !(bulk_decompression_enabled_for_column && bulk_decompression_enabled_globally)) + { + /* Vectorized aggregation not possible for this particular column. */ + fprintf(stderr, "compressed column index %d\n", compressed_column_index); + // my_print(bulk_decompression_column); + return plan; + } + + // bool perform_vectorized_aggregation = list_nth_int(linitial(custom->custom_private), 5); + // if (!perform_vectorized_aggregation) + // { + // fprintf(stderr, "no vectorized aggregation\n"); + // return plan; + // } + fprintf(stderr, "found!!!\n"); // my_print(plan); // mybt(); @@ -201,7 +331,7 @@ tsl_postprocess_plan(PlannedStmt *stmt) // mybt(); // my_print(stmt); - if (true) + if (ts_guc_enable_vectorized_aggregation) { stmt->planTree = insert_vector_agg_node(stmt->planTree); } diff --git a/tsl/test/expected/vectorized_aggregation.out b/tsl/test/expected/vectorized_aggregation.out index 552569895ad..680e6da3fb3 100644 --- a/tsl/test/expected/vectorized_aggregation.out +++ b/tsl/test/expected/vectorized_aggregation.out @@ -49,82 +49,91 @@ SELECT sum(segment_by_value) FROM testtable; :EXPLAIN SELECT sum(segment_by_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value -(46 rows) + Output: sum(_hyper_1_1_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value +(52 rows) -- Vectorization possible - filter on segment_by :EXPLAIN SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk.segment_by_value) + Output: sum(_hyper_1_1_chunk."time") -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_11_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - Index Cond: (compress_hyper_2_11_chunk.segment_by_value > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_12_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - Index Cond: (compress_hyper_2_12_chunk.segment_by_value > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_13_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - Index Cond: (compress_hyper_2_13_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Index Scan using compress_hyper_2_11_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + Index Cond: (compress_hyper_2_11_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Index Scan using compress_hyper_2_12_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + Index Cond: (compress_hyper_2_12_chunk.segment_by_value > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Index Scan using compress_hyper_2_13_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + Index Cond: (compress_hyper_2_13_chunk.segment_by_value > 0) -> Partial Aggregate Output: PARTIAL sum(_hyper_1_4_chunk.segment_by_value) -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk @@ -160,7 +169,7 @@ SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0; -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.segment_by_value Filter: (_hyper_1_10_chunk.segment_by_value > 0) -(56 rows) +(59 rows) -- Vectorization not possible due to a used filter :EXPLAIN @@ -651,55 +660,61 @@ SELECT sum(int_value) FROM testtable; :EXPLAIN SELECT sum(int_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk.int_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.int_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.int_value -(46 rows) + Output: sum(_hyper_1_1_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_4_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_5_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_6_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_7_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_8_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_9_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.int_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_10_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.int_value +(52 rows) -- Vectorized aggregation not possible SELECT sum(float_value) FROM testtable; @@ -796,62 +811,75 @@ SELECT sum(segment_by_value) FROM testtable; :EXPLAIN SELECT sum(segment_by_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk - Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk - Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk - Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk - Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk - Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk - Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk - Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk - Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk - Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk - Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk - Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk - Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -(53 rows) + Output: sum(_hyper_1_1_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value +(66 rows) -- Vectorized aggregation possible SELECT sum(int_value) FROM testtable; @@ -862,62 +890,75 @@ SELECT sum(int_value) FROM testtable; :EXPLAIN SELECT sum(int_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk.int_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk - Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk - Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk - Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk - Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk - Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk - Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk - Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk - Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk - Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk - Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk - Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk - Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -(53 rows) + Output: sum(_hyper_1_1_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value +(66 rows) --- -- Tests with some chunks are partially compressed @@ -933,66 +974,79 @@ SELECT sum(segment_by_value) FROM testtable; :EXPLAIN SELECT sum(segment_by_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk.segment_by_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk - Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk - Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk - Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk - Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk - Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk - Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk - Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk - Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk - Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk - Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk - Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk - Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -(57 rows) + Output: sum(_hyper_1_1_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value +(70 rows) -- Vectorized aggregation possible SELECT sum(int_value) FROM testtable; @@ -1003,66 +1057,79 @@ SELECT sum(int_value) FROM testtable; :EXPLAIN SELECT sum(int_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk.int_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.int_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk - Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk - Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk - Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk - Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk - Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk - Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk - Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk - Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk - Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk - Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk - Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk - Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -(57 rows) + Output: sum(_hyper_1_1_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_1_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value +(70 rows) --Vectorized aggregation not possible for expression SELECT sum(abs(int_value)) FROM testtable; @@ -1308,130 +1375,156 @@ RESET timescaledb.enable_bulk_decompression; -- Using the same sum function multiple times is supported by vectorization :EXPLAIN SELECT sum(int_value), sum(int_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk.int_value), sum(_hyper_1_1_chunk.int_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.int_value) - -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.int_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk - Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk - Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk - Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk - Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk - Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk - Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk - Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk - Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk - Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk - Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk - Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk - Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -(57 rows) + Output: sum(_hyper_1_1_chunk."time"), sum(_hyper_1_1_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_1_chunk.int_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.int_value +(70 rows) -- Using the same sum function multiple times is supported by vectorization :EXPLAIN SELECT sum(segment_by_value), sum(segment_by_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk.segment_by_value), sum(_hyper_1_1_chunk.segment_by_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) - -> Seq Scan on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk - Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk - Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk - Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk - Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk - Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk - Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk - Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk - Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk - Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk - Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk - Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk - Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk - Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk - Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value -(57 rows) + Output: sum(_hyper_1_1_chunk."time"), sum(_hyper_1_1_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value +(70 rows) -- Performing a sum on multiple columns is currently not supported by vectorization :EXPLAIN @@ -1763,72 +1856,85 @@ SELECT sum(int_value) FROM testtable; -- Vectorization possible - filter on segment_by :EXPLAIN SELECT sum(int_value) FROM testtable WHERE segment_by_value > 5; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_41_chunk.int_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk - Output: (PARTIAL sum(_hyper_1_41_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_51_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_51_chunk - Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk._ts_meta_sequence_num, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value - Index Cond: (compress_hyper_2_51_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk - Output: (PARTIAL sum(_hyper_1_42_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_52_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_52_chunk - Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk._ts_meta_sequence_num, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value - Index Cond: (compress_hyper_2_52_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk - Output: (PARTIAL sum(_hyper_1_43_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_53_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_53_chunk - Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk._ts_meta_sequence_num, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value - Index Cond: (compress_hyper_2_53_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk - Output: (PARTIAL sum(_hyper_1_44_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_54_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_54_chunk - Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk._ts_meta_sequence_num, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value - Index Cond: (compress_hyper_2_54_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk - Output: (PARTIAL sum(_hyper_1_45_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_55_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_55_chunk - Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk._ts_meta_sequence_num, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value - Index Cond: (compress_hyper_2_55_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk - Output: (PARTIAL sum(_hyper_1_46_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_56_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_56_chunk - Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk._ts_meta_sequence_num, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value - Index Cond: (compress_hyper_2_56_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk - Output: (PARTIAL sum(_hyper_1_47_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_57_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_57_chunk - Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk._ts_meta_sequence_num, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value - Index Cond: (compress_hyper_2_57_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk - Output: (PARTIAL sum(_hyper_1_48_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_58_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_58_chunk - Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk._ts_meta_sequence_num, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value - Index Cond: (compress_hyper_2_58_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk - Output: (PARTIAL sum(_hyper_1_49_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_59_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_59_chunk - Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk._ts_meta_sequence_num, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value - Index Cond: (compress_hyper_2_59_chunk.segment_by_value > 5) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk - Output: (PARTIAL sum(_hyper_1_50_chunk.int_value)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_2_60_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_60_chunk - Output: compress_hyper_2_60_chunk._ts_meta_count, compress_hyper_2_60_chunk._ts_meta_sequence_num, compress_hyper_2_60_chunk.segment_by_value, compress_hyper_2_60_chunk._ts_meta_min_1, compress_hyper_2_60_chunk._ts_meta_max_1, compress_hyper_2_60_chunk."time", compress_hyper_2_60_chunk.int_value, compress_hyper_2_60_chunk.float_value - Index Cond: (compress_hyper_2_60_chunk.segment_by_value > 5) -(63 rows) + Output: sum(_hyper_1_41_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_1_41_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_41_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk + Output: _hyper_1_41_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_51_chunk + Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk._ts_meta_sequence_num, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value + Filter: (compress_hyper_2_51_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_42_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk + Output: _hyper_1_42_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_52_chunk + Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk._ts_meta_sequence_num, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value + Filter: (compress_hyper_2_52_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_43_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk + Output: _hyper_1_43_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_53_chunk + Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk._ts_meta_sequence_num, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value + Filter: (compress_hyper_2_53_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_44_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk + Output: _hyper_1_44_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_54_chunk + Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk._ts_meta_sequence_num, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value + Filter: (compress_hyper_2_54_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_45_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk + Output: _hyper_1_45_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_55_chunk + Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk._ts_meta_sequence_num, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value + Filter: (compress_hyper_2_55_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_46_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk + Output: _hyper_1_46_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_56_chunk + Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk._ts_meta_sequence_num, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value + Filter: (compress_hyper_2_56_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_47_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk + Output: _hyper_1_47_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_57_chunk + Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk._ts_meta_sequence_num, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value + Filter: (compress_hyper_2_57_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_48_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk + Output: _hyper_1_48_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_58_chunk + Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk._ts_meta_sequence_num, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value + Filter: (compress_hyper_2_58_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_49_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk + Output: _hyper_1_49_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_59_chunk + Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk._ts_meta_sequence_num, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value + Filter: (compress_hyper_2_59_chunk.segment_by_value > 5) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_50_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk + Output: _hyper_1_50_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_60_chunk + Output: compress_hyper_2_60_chunk._ts_meta_count, compress_hyper_2_60_chunk._ts_meta_sequence_num, compress_hyper_2_60_chunk.segment_by_value, compress_hyper_2_60_chunk._ts_meta_min_1, compress_hyper_2_60_chunk._ts_meta_max_1, compress_hyper_2_60_chunk."time", compress_hyper_2_60_chunk.int_value, compress_hyper_2_60_chunk.float_value + Filter: (compress_hyper_2_60_chunk.segment_by_value > 5) +(76 rows) SELECT sum(int_value) FROM testtable WHERE segment_by_value > 5; sum @@ -1867,65 +1973,75 @@ SET parallel_setup_cost = 0; SET parallel_tuple_cost = 0; :EXPLAIN SELECT sum(segment_by_value) FROM testtable; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_41_chunk.segment_by_value) + Output: sum(_hyper_1_41_chunk."time") -> Gather - Output: (PARTIAL sum(_hyper_1_41_chunk.segment_by_value)) + Output: (PARTIAL sum(_hyper_1_41_chunk."time")) Workers Planned: 2 -> Parallel Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk - Output: (PARTIAL sum(_hyper_1_41_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_51_chunk - Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk._ts_meta_sequence_num, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk - Output: (PARTIAL sum(_hyper_1_42_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_52_chunk - Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk._ts_meta_sequence_num, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk - Output: (PARTIAL sum(_hyper_1_43_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_53_chunk - Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk._ts_meta_sequence_num, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk - Output: (PARTIAL sum(_hyper_1_44_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_54_chunk - Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk._ts_meta_sequence_num, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk - Output: (PARTIAL sum(_hyper_1_45_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_55_chunk - Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk._ts_meta_sequence_num, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk - Output: (PARTIAL sum(_hyper_1_46_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_56_chunk - Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk._ts_meta_sequence_num, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk - Output: (PARTIAL sum(_hyper_1_47_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_57_chunk - Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk._ts_meta_sequence_num, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk - Output: (PARTIAL sum(_hyper_1_48_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_58_chunk - Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk._ts_meta_sequence_num, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk - Output: (PARTIAL sum(_hyper_1_49_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_59_chunk - Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk._ts_meta_sequence_num, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk - Output: (PARTIAL sum(_hyper_1_50_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_60_chunk - Output: compress_hyper_2_60_chunk._ts_meta_count, compress_hyper_2_60_chunk._ts_meta_sequence_num, compress_hyper_2_60_chunk.segment_by_value, compress_hyper_2_60_chunk._ts_meta_min_1, compress_hyper_2_60_chunk._ts_meta_max_1, compress_hyper_2_60_chunk."time", compress_hyper_2_60_chunk.int_value, compress_hyper_2_60_chunk.float_value -(56 rows) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_41_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk + Output: _hyper_1_41_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_51_chunk + Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk._ts_meta_sequence_num, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_42_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk + Output: _hyper_1_42_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_52_chunk + Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk._ts_meta_sequence_num, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_43_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk + Output: _hyper_1_43_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_53_chunk + Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk._ts_meta_sequence_num, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_44_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk + Output: _hyper_1_44_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_54_chunk + Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk._ts_meta_sequence_num, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_45_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk + Output: _hyper_1_45_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_55_chunk + Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk._ts_meta_sequence_num, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_46_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk + Output: _hyper_1_46_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_56_chunk + Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk._ts_meta_sequence_num, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_47_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk + Output: _hyper_1_47_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_57_chunk + Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk._ts_meta_sequence_num, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_48_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk + Output: _hyper_1_48_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_58_chunk + Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk._ts_meta_sequence_num, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_49_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk + Output: _hyper_1_49_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_59_chunk + Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk._ts_meta_sequence_num, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_50_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk + Output: _hyper_1_50_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_60_chunk + Output: compress_hyper_2_60_chunk._ts_meta_count, compress_hyper_2_60_chunk._ts_meta_sequence_num, compress_hyper_2_60_chunk.segment_by_value, compress_hyper_2_60_chunk._ts_meta_min_1, compress_hyper_2_60_chunk._ts_meta_max_1, compress_hyper_2_60_chunk."time", compress_hyper_2_60_chunk.int_value, compress_hyper_2_60_chunk.float_value +(66 rows) SELECT sum(segment_by_value) FROM testtable; sum @@ -2042,121 +2158,147 @@ SELECT compress_chunk(ch) FROM show_chunks('testtable') ch; -- Aggregation with vectorization :EXPLAIN SELECT sum(segment_by_value) FROM testtable; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_81_chunk.segment_by_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_81_chunk - Output: (PARTIAL sum(_hyper_1_81_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_91_chunk - Output: compress_hyper_2_91_chunk._ts_meta_count, compress_hyper_2_91_chunk._ts_meta_sequence_num, compress_hyper_2_91_chunk.segment_by_value, compress_hyper_2_91_chunk._ts_meta_min_1, compress_hyper_2_91_chunk._ts_meta_max_1, compress_hyper_2_91_chunk."time", compress_hyper_2_91_chunk.int_value, compress_hyper_2_91_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_82_chunk - Output: (PARTIAL sum(_hyper_1_82_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_92_chunk - Output: compress_hyper_2_92_chunk._ts_meta_count, compress_hyper_2_92_chunk._ts_meta_sequence_num, compress_hyper_2_92_chunk.segment_by_value, compress_hyper_2_92_chunk._ts_meta_min_1, compress_hyper_2_92_chunk._ts_meta_max_1, compress_hyper_2_92_chunk."time", compress_hyper_2_92_chunk.int_value, compress_hyper_2_92_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_83_chunk - Output: (PARTIAL sum(_hyper_1_83_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_93_chunk - Output: compress_hyper_2_93_chunk._ts_meta_count, compress_hyper_2_93_chunk._ts_meta_sequence_num, compress_hyper_2_93_chunk.segment_by_value, compress_hyper_2_93_chunk._ts_meta_min_1, compress_hyper_2_93_chunk._ts_meta_max_1, compress_hyper_2_93_chunk."time", compress_hyper_2_93_chunk.int_value, compress_hyper_2_93_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_84_chunk - Output: (PARTIAL sum(_hyper_1_84_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_94_chunk - Output: compress_hyper_2_94_chunk._ts_meta_count, compress_hyper_2_94_chunk._ts_meta_sequence_num, compress_hyper_2_94_chunk.segment_by_value, compress_hyper_2_94_chunk._ts_meta_min_1, compress_hyper_2_94_chunk._ts_meta_max_1, compress_hyper_2_94_chunk."time", compress_hyper_2_94_chunk.int_value, compress_hyper_2_94_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_85_chunk - Output: (PARTIAL sum(_hyper_1_85_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_95_chunk - Output: compress_hyper_2_95_chunk._ts_meta_count, compress_hyper_2_95_chunk._ts_meta_sequence_num, compress_hyper_2_95_chunk.segment_by_value, compress_hyper_2_95_chunk._ts_meta_min_1, compress_hyper_2_95_chunk._ts_meta_max_1, compress_hyper_2_95_chunk."time", compress_hyper_2_95_chunk.int_value, compress_hyper_2_95_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_86_chunk - Output: (PARTIAL sum(_hyper_1_86_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_96_chunk - Output: compress_hyper_2_96_chunk._ts_meta_count, compress_hyper_2_96_chunk._ts_meta_sequence_num, compress_hyper_2_96_chunk.segment_by_value, compress_hyper_2_96_chunk._ts_meta_min_1, compress_hyper_2_96_chunk._ts_meta_max_1, compress_hyper_2_96_chunk."time", compress_hyper_2_96_chunk.int_value, compress_hyper_2_96_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_87_chunk - Output: (PARTIAL sum(_hyper_1_87_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_97_chunk - Output: compress_hyper_2_97_chunk._ts_meta_count, compress_hyper_2_97_chunk._ts_meta_sequence_num, compress_hyper_2_97_chunk.segment_by_value, compress_hyper_2_97_chunk._ts_meta_min_1, compress_hyper_2_97_chunk._ts_meta_max_1, compress_hyper_2_97_chunk."time", compress_hyper_2_97_chunk.int_value, compress_hyper_2_97_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_88_chunk - Output: (PARTIAL sum(_hyper_1_88_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_98_chunk - Output: compress_hyper_2_98_chunk._ts_meta_count, compress_hyper_2_98_chunk._ts_meta_sequence_num, compress_hyper_2_98_chunk.segment_by_value, compress_hyper_2_98_chunk._ts_meta_min_1, compress_hyper_2_98_chunk._ts_meta_max_1, compress_hyper_2_98_chunk."time", compress_hyper_2_98_chunk.int_value, compress_hyper_2_98_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_89_chunk - Output: (PARTIAL sum(_hyper_1_89_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_99_chunk - Output: compress_hyper_2_99_chunk._ts_meta_count, compress_hyper_2_99_chunk._ts_meta_sequence_num, compress_hyper_2_99_chunk.segment_by_value, compress_hyper_2_99_chunk._ts_meta_min_1, compress_hyper_2_99_chunk._ts_meta_max_1, compress_hyper_2_99_chunk."time", compress_hyper_2_99_chunk.int_value, compress_hyper_2_99_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_90_chunk - Output: (PARTIAL sum(_hyper_1_90_chunk.segment_by_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_100_chunk - Output: compress_hyper_2_100_chunk._ts_meta_count, compress_hyper_2_100_chunk._ts_meta_sequence_num, compress_hyper_2_100_chunk.segment_by_value, compress_hyper_2_100_chunk._ts_meta_min_1, compress_hyper_2_100_chunk._ts_meta_max_1, compress_hyper_2_100_chunk."time", compress_hyper_2_100_chunk.int_value, compress_hyper_2_100_chunk.float_value -(53 rows) + Output: sum(_hyper_1_81_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_1_81_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_81_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_81_chunk + Output: _hyper_1_81_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_91_chunk + Output: compress_hyper_2_91_chunk._ts_meta_count, compress_hyper_2_91_chunk._ts_meta_sequence_num, compress_hyper_2_91_chunk.segment_by_value, compress_hyper_2_91_chunk._ts_meta_min_1, compress_hyper_2_91_chunk._ts_meta_max_1, compress_hyper_2_91_chunk."time", compress_hyper_2_91_chunk.int_value, compress_hyper_2_91_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_82_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_82_chunk + Output: _hyper_1_82_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_92_chunk + Output: compress_hyper_2_92_chunk._ts_meta_count, compress_hyper_2_92_chunk._ts_meta_sequence_num, compress_hyper_2_92_chunk.segment_by_value, compress_hyper_2_92_chunk._ts_meta_min_1, compress_hyper_2_92_chunk._ts_meta_max_1, compress_hyper_2_92_chunk."time", compress_hyper_2_92_chunk.int_value, compress_hyper_2_92_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_83_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_83_chunk + Output: _hyper_1_83_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_93_chunk + Output: compress_hyper_2_93_chunk._ts_meta_count, compress_hyper_2_93_chunk._ts_meta_sequence_num, compress_hyper_2_93_chunk.segment_by_value, compress_hyper_2_93_chunk._ts_meta_min_1, compress_hyper_2_93_chunk._ts_meta_max_1, compress_hyper_2_93_chunk."time", compress_hyper_2_93_chunk.int_value, compress_hyper_2_93_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_84_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_84_chunk + Output: _hyper_1_84_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_94_chunk + Output: compress_hyper_2_94_chunk._ts_meta_count, compress_hyper_2_94_chunk._ts_meta_sequence_num, compress_hyper_2_94_chunk.segment_by_value, compress_hyper_2_94_chunk._ts_meta_min_1, compress_hyper_2_94_chunk._ts_meta_max_1, compress_hyper_2_94_chunk."time", compress_hyper_2_94_chunk.int_value, compress_hyper_2_94_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_85_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_85_chunk + Output: _hyper_1_85_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_95_chunk + Output: compress_hyper_2_95_chunk._ts_meta_count, compress_hyper_2_95_chunk._ts_meta_sequence_num, compress_hyper_2_95_chunk.segment_by_value, compress_hyper_2_95_chunk._ts_meta_min_1, compress_hyper_2_95_chunk._ts_meta_max_1, compress_hyper_2_95_chunk."time", compress_hyper_2_95_chunk.int_value, compress_hyper_2_95_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_86_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_86_chunk + Output: _hyper_1_86_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_96_chunk + Output: compress_hyper_2_96_chunk._ts_meta_count, compress_hyper_2_96_chunk._ts_meta_sequence_num, compress_hyper_2_96_chunk.segment_by_value, compress_hyper_2_96_chunk._ts_meta_min_1, compress_hyper_2_96_chunk._ts_meta_max_1, compress_hyper_2_96_chunk."time", compress_hyper_2_96_chunk.int_value, compress_hyper_2_96_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_87_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_87_chunk + Output: _hyper_1_87_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_97_chunk + Output: compress_hyper_2_97_chunk._ts_meta_count, compress_hyper_2_97_chunk._ts_meta_sequence_num, compress_hyper_2_97_chunk.segment_by_value, compress_hyper_2_97_chunk._ts_meta_min_1, compress_hyper_2_97_chunk._ts_meta_max_1, compress_hyper_2_97_chunk."time", compress_hyper_2_97_chunk.int_value, compress_hyper_2_97_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_88_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_88_chunk + Output: _hyper_1_88_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_98_chunk + Output: compress_hyper_2_98_chunk._ts_meta_count, compress_hyper_2_98_chunk._ts_meta_sequence_num, compress_hyper_2_98_chunk.segment_by_value, compress_hyper_2_98_chunk._ts_meta_min_1, compress_hyper_2_98_chunk._ts_meta_max_1, compress_hyper_2_98_chunk."time", compress_hyper_2_98_chunk.int_value, compress_hyper_2_98_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_89_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_89_chunk + Output: _hyper_1_89_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_99_chunk + Output: compress_hyper_2_99_chunk._ts_meta_count, compress_hyper_2_99_chunk._ts_meta_sequence_num, compress_hyper_2_99_chunk.segment_by_value, compress_hyper_2_99_chunk._ts_meta_min_1, compress_hyper_2_99_chunk._ts_meta_max_1, compress_hyper_2_99_chunk."time", compress_hyper_2_99_chunk.int_value, compress_hyper_2_99_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_90_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_90_chunk + Output: _hyper_1_90_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_100_chunk + Output: compress_hyper_2_100_chunk._ts_meta_count, compress_hyper_2_100_chunk._ts_meta_sequence_num, compress_hyper_2_100_chunk.segment_by_value, compress_hyper_2_100_chunk._ts_meta_min_1, compress_hyper_2_100_chunk._ts_meta_max_1, compress_hyper_2_100_chunk."time", compress_hyper_2_100_chunk.int_value, compress_hyper_2_100_chunk.float_value +(66 rows) :EXPLAIN SELECT sum(int_value) FROM testtable; - QUERY PLAN ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_81_chunk.int_value) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_81_chunk - Output: (PARTIAL sum(_hyper_1_81_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_91_chunk - Output: compress_hyper_2_91_chunk._ts_meta_count, compress_hyper_2_91_chunk._ts_meta_sequence_num, compress_hyper_2_91_chunk.segment_by_value, compress_hyper_2_91_chunk._ts_meta_min_1, compress_hyper_2_91_chunk._ts_meta_max_1, compress_hyper_2_91_chunk."time", compress_hyper_2_91_chunk.int_value, compress_hyper_2_91_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_82_chunk - Output: (PARTIAL sum(_hyper_1_82_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_92_chunk - Output: compress_hyper_2_92_chunk._ts_meta_count, compress_hyper_2_92_chunk._ts_meta_sequence_num, compress_hyper_2_92_chunk.segment_by_value, compress_hyper_2_92_chunk._ts_meta_min_1, compress_hyper_2_92_chunk._ts_meta_max_1, compress_hyper_2_92_chunk."time", compress_hyper_2_92_chunk.int_value, compress_hyper_2_92_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_83_chunk - Output: (PARTIAL sum(_hyper_1_83_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_93_chunk - Output: compress_hyper_2_93_chunk._ts_meta_count, compress_hyper_2_93_chunk._ts_meta_sequence_num, compress_hyper_2_93_chunk.segment_by_value, compress_hyper_2_93_chunk._ts_meta_min_1, compress_hyper_2_93_chunk._ts_meta_max_1, compress_hyper_2_93_chunk."time", compress_hyper_2_93_chunk.int_value, compress_hyper_2_93_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_84_chunk - Output: (PARTIAL sum(_hyper_1_84_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_94_chunk - Output: compress_hyper_2_94_chunk._ts_meta_count, compress_hyper_2_94_chunk._ts_meta_sequence_num, compress_hyper_2_94_chunk.segment_by_value, compress_hyper_2_94_chunk._ts_meta_min_1, compress_hyper_2_94_chunk._ts_meta_max_1, compress_hyper_2_94_chunk."time", compress_hyper_2_94_chunk.int_value, compress_hyper_2_94_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_85_chunk - Output: (PARTIAL sum(_hyper_1_85_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_95_chunk - Output: compress_hyper_2_95_chunk._ts_meta_count, compress_hyper_2_95_chunk._ts_meta_sequence_num, compress_hyper_2_95_chunk.segment_by_value, compress_hyper_2_95_chunk._ts_meta_min_1, compress_hyper_2_95_chunk._ts_meta_max_1, compress_hyper_2_95_chunk."time", compress_hyper_2_95_chunk.int_value, compress_hyper_2_95_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_86_chunk - Output: (PARTIAL sum(_hyper_1_86_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_96_chunk - Output: compress_hyper_2_96_chunk._ts_meta_count, compress_hyper_2_96_chunk._ts_meta_sequence_num, compress_hyper_2_96_chunk.segment_by_value, compress_hyper_2_96_chunk._ts_meta_min_1, compress_hyper_2_96_chunk._ts_meta_max_1, compress_hyper_2_96_chunk."time", compress_hyper_2_96_chunk.int_value, compress_hyper_2_96_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_87_chunk - Output: (PARTIAL sum(_hyper_1_87_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_97_chunk - Output: compress_hyper_2_97_chunk._ts_meta_count, compress_hyper_2_97_chunk._ts_meta_sequence_num, compress_hyper_2_97_chunk.segment_by_value, compress_hyper_2_97_chunk._ts_meta_min_1, compress_hyper_2_97_chunk._ts_meta_max_1, compress_hyper_2_97_chunk."time", compress_hyper_2_97_chunk.int_value, compress_hyper_2_97_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_88_chunk - Output: (PARTIAL sum(_hyper_1_88_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_98_chunk - Output: compress_hyper_2_98_chunk._ts_meta_count, compress_hyper_2_98_chunk._ts_meta_sequence_num, compress_hyper_2_98_chunk.segment_by_value, compress_hyper_2_98_chunk._ts_meta_min_1, compress_hyper_2_98_chunk._ts_meta_max_1, compress_hyper_2_98_chunk."time", compress_hyper_2_98_chunk.int_value, compress_hyper_2_98_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_89_chunk - Output: (PARTIAL sum(_hyper_1_89_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_99_chunk - Output: compress_hyper_2_99_chunk._ts_meta_count, compress_hyper_2_99_chunk._ts_meta_sequence_num, compress_hyper_2_99_chunk.segment_by_value, compress_hyper_2_99_chunk._ts_meta_min_1, compress_hyper_2_99_chunk._ts_meta_max_1, compress_hyper_2_99_chunk."time", compress_hyper_2_99_chunk.int_value, compress_hyper_2_99_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_90_chunk - Output: (PARTIAL sum(_hyper_1_90_chunk.int_value)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_2_100_chunk - Output: compress_hyper_2_100_chunk._ts_meta_count, compress_hyper_2_100_chunk._ts_meta_sequence_num, compress_hyper_2_100_chunk.segment_by_value, compress_hyper_2_100_chunk._ts_meta_min_1, compress_hyper_2_100_chunk._ts_meta_max_1, compress_hyper_2_100_chunk."time", compress_hyper_2_100_chunk.int_value, compress_hyper_2_100_chunk.float_value -(53 rows) + Output: sum(_hyper_1_81_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_1_81_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_81_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_81_chunk + Output: _hyper_1_81_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_91_chunk + Output: compress_hyper_2_91_chunk._ts_meta_count, compress_hyper_2_91_chunk._ts_meta_sequence_num, compress_hyper_2_91_chunk.segment_by_value, compress_hyper_2_91_chunk._ts_meta_min_1, compress_hyper_2_91_chunk._ts_meta_max_1, compress_hyper_2_91_chunk."time", compress_hyper_2_91_chunk.int_value, compress_hyper_2_91_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_82_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_82_chunk + Output: _hyper_1_82_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_92_chunk + Output: compress_hyper_2_92_chunk._ts_meta_count, compress_hyper_2_92_chunk._ts_meta_sequence_num, compress_hyper_2_92_chunk.segment_by_value, compress_hyper_2_92_chunk._ts_meta_min_1, compress_hyper_2_92_chunk._ts_meta_max_1, compress_hyper_2_92_chunk."time", compress_hyper_2_92_chunk.int_value, compress_hyper_2_92_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_83_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_83_chunk + Output: _hyper_1_83_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_93_chunk + Output: compress_hyper_2_93_chunk._ts_meta_count, compress_hyper_2_93_chunk._ts_meta_sequence_num, compress_hyper_2_93_chunk.segment_by_value, compress_hyper_2_93_chunk._ts_meta_min_1, compress_hyper_2_93_chunk._ts_meta_max_1, compress_hyper_2_93_chunk."time", compress_hyper_2_93_chunk.int_value, compress_hyper_2_93_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_84_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_84_chunk + Output: _hyper_1_84_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_94_chunk + Output: compress_hyper_2_94_chunk._ts_meta_count, compress_hyper_2_94_chunk._ts_meta_sequence_num, compress_hyper_2_94_chunk.segment_by_value, compress_hyper_2_94_chunk._ts_meta_min_1, compress_hyper_2_94_chunk._ts_meta_max_1, compress_hyper_2_94_chunk."time", compress_hyper_2_94_chunk.int_value, compress_hyper_2_94_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_85_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_85_chunk + Output: _hyper_1_85_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_95_chunk + Output: compress_hyper_2_95_chunk._ts_meta_count, compress_hyper_2_95_chunk._ts_meta_sequence_num, compress_hyper_2_95_chunk.segment_by_value, compress_hyper_2_95_chunk._ts_meta_min_1, compress_hyper_2_95_chunk._ts_meta_max_1, compress_hyper_2_95_chunk."time", compress_hyper_2_95_chunk.int_value, compress_hyper_2_95_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_86_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_86_chunk + Output: _hyper_1_86_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_96_chunk + Output: compress_hyper_2_96_chunk._ts_meta_count, compress_hyper_2_96_chunk._ts_meta_sequence_num, compress_hyper_2_96_chunk.segment_by_value, compress_hyper_2_96_chunk._ts_meta_min_1, compress_hyper_2_96_chunk._ts_meta_max_1, compress_hyper_2_96_chunk."time", compress_hyper_2_96_chunk.int_value, compress_hyper_2_96_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_87_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_87_chunk + Output: _hyper_1_87_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_97_chunk + Output: compress_hyper_2_97_chunk._ts_meta_count, compress_hyper_2_97_chunk._ts_meta_sequence_num, compress_hyper_2_97_chunk.segment_by_value, compress_hyper_2_97_chunk._ts_meta_min_1, compress_hyper_2_97_chunk._ts_meta_max_1, compress_hyper_2_97_chunk."time", compress_hyper_2_97_chunk.int_value, compress_hyper_2_97_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_88_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_88_chunk + Output: _hyper_1_88_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_98_chunk + Output: compress_hyper_2_98_chunk._ts_meta_count, compress_hyper_2_98_chunk._ts_meta_sequence_num, compress_hyper_2_98_chunk.segment_by_value, compress_hyper_2_98_chunk._ts_meta_min_1, compress_hyper_2_98_chunk._ts_meta_max_1, compress_hyper_2_98_chunk."time", compress_hyper_2_98_chunk.int_value, compress_hyper_2_98_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_89_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_89_chunk + Output: _hyper_1_89_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_99_chunk + Output: compress_hyper_2_99_chunk._ts_meta_count, compress_hyper_2_99_chunk._ts_meta_sequence_num, compress_hyper_2_99_chunk.segment_by_value, compress_hyper_2_99_chunk._ts_meta_min_1, compress_hyper_2_99_chunk._ts_meta_max_1, compress_hyper_2_99_chunk."time", compress_hyper_2_99_chunk.int_value, compress_hyper_2_99_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_90_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_90_chunk + Output: _hyper_1_90_chunk.int_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_100_chunk + Output: compress_hyper_2_100_chunk._ts_meta_count, compress_hyper_2_100_chunk._ts_meta_sequence_num, compress_hyper_2_100_chunk.segment_by_value, compress_hyper_2_100_chunk._ts_meta_min_1, compress_hyper_2_100_chunk._ts_meta_max_1, compress_hyper_2_100_chunk."time", compress_hyper_2_100_chunk.int_value, compress_hyper_2_100_chunk.float_value +(66 rows) SELECT sum(segment_by_value) FROM testtable; sum @@ -2307,62 +2449,75 @@ SELECT compress_chunk(ch) FROM show_chunks('testtable2') ch; :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk.segment_by_value1) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk - Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk - Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk - Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk - Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk - Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk - Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk - Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk - Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk - Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value -(53 rows) + Output: sum(_hyper_3_101_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk + Output: _hyper_3_101_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk + Output: _hyper_3_102_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk + Output: _hyper_3_103_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk + Output: _hyper_3_104_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk + Output: _hyper_3_105_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk + Output: _hyper_3_106_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk + Output: _hyper_3_107_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk + Output: _hyper_3_108_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk + Output: _hyper_3_109_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk + Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk + Output: _hyper_3_110_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk + Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value +(66 rows) SELECT sum(segment_by_value1) FROM testtable2; sum @@ -2372,62 +2527,75 @@ SELECT sum(segment_by_value1) FROM testtable2; :EXPLAIN SELECT sum(segment_by_value2) FROM testtable2; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk.segment_by_value2) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk - Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk - Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk - Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk - Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk - Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk - Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk - Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk - Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk - Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value2)) - Vectorized Aggregation: true - -> Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value -(53 rows) + Output: sum(_hyper_3_101_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk + Output: _hyper_3_101_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk + Output: _hyper_3_102_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk + Output: _hyper_3_103_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk + Output: _hyper_3_104_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk + Output: _hyper_3_105_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk + Output: _hyper_3_106_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk + Output: _hyper_3_107_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk + Output: _hyper_3_108_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk + Output: _hyper_3_109_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk + Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk + Output: _hyper_3_110_chunk.segment_by_value2 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk + Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value +(66 rows) SELECT sum(segment_by_value2) FROM testtable2; sum @@ -2438,210 +2606,249 @@ SELECT sum(segment_by_value2) FROM testtable2; -- Vectorization possible - filter on segment_by :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk.segment_by_value1) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk - Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Index Cond: (compress_hyper_4_111_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk - Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Index Cond: (compress_hyper_4_112_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk - Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Index Cond: (compress_hyper_4_113_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk - Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Index Cond: (compress_hyper_4_114_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk - Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Index Cond: (compress_hyper_4_115_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk - Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Index Cond: (compress_hyper_4_116_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk - Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Index Cond: (compress_hyper_4_117_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk - Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Index Cond: (compress_hyper_4_118_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk - Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_119_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - Index Cond: (compress_hyper_4_119_chunk.segment_by_value1 > 0) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_120_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value - Index Cond: (compress_hyper_4_120_chunk.segment_by_value1 > 0) -(63 rows) + Output: sum(_hyper_3_102_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk + Output: _hyper_3_102_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + Filter: (compress_hyper_4_112_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk + Output: _hyper_3_103_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + Filter: (compress_hyper_4_113_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk + Output: _hyper_3_104_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + Filter: (compress_hyper_4_114_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk + Output: _hyper_3_105_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + Filter: (compress_hyper_4_115_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk + Output: _hyper_3_106_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + Filter: (compress_hyper_4_116_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk + Output: _hyper_3_107_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + Filter: (compress_hyper_4_117_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk + Output: _hyper_3_108_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + Filter: (compress_hyper_4_118_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk + Output: _hyper_3_109_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk + Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value + Filter: (compress_hyper_4_119_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk + Output: _hyper_3_110_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk + Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value + Filter: (compress_hyper_4_120_chunk.segment_by_value1 > 0) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk + Output: _hyper_3_101_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + Filter: (compress_hyper_4_111_chunk.segment_by_value1 > 0) +(76 rows) :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND segment_by_value2 > 0; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk.segment_by_value1) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk - Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Index Cond: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk - Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Index Cond: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk - Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Index Cond: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk - Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Index Cond: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk - Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Index Cond: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk - Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Index Cond: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk - Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Index Cond: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk - Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Index Cond: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk - Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_119_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - Index Cond: ((compress_hyper_4_119_chunk.segment_by_value1 > 0) AND (compress_hyper_4_119_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_120_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value - Index Cond: ((compress_hyper_4_120_chunk.segment_by_value1 > 0) AND (compress_hyper_4_120_chunk.segment_by_value2 > 0)) -(63 rows) + Output: sum(_hyper_3_101_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk + Output: _hyper_3_101_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk + Output: _hyper_3_102_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk + Output: _hyper_3_103_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk + Output: _hyper_3_104_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk + Output: _hyper_3_105_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk + Output: _hyper_3_106_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk + Output: _hyper_3_107_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk + Output: _hyper_3_108_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk + Output: _hyper_3_109_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk + Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value + Filter: ((compress_hyper_4_119_chunk.segment_by_value1 > 0) AND (compress_hyper_4_119_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk + Output: _hyper_3_110_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk + Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value + Filter: ((compress_hyper_4_120_chunk.segment_by_value1 > 0) AND (compress_hyper_4_120_chunk.segment_by_value2 > 0)) +(76 rows) :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND segment_by_value2 > 0 AND 2>1; - QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + QUERY PLAN +---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk.segment_by_value1) - -> Append - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk - Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Index Cond: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk - Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Index Cond: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk - Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Index Cond: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk - Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Index Cond: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk - Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Index Cond: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk - Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Index Cond: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk - Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Index Cond: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk - Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Index Cond: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk - Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_119_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - Index Cond: ((compress_hyper_4_119_chunk.segment_by_value1 > 0) AND (compress_hyper_4_119_chunk.segment_by_value2 > 0)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - Vectorized Aggregation: true - -> Index Scan using compress_hyper_4_120_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value - Index Cond: ((compress_hyper_4_120_chunk.segment_by_value1 > 0) AND (compress_hyper_4_120_chunk.segment_by_value2 > 0)) -(63 rows) + Output: sum(_hyper_3_101_chunk."time") + -> Gather + Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk + Output: _hyper_3_101_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk + Output: _hyper_3_102_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk + Output: _hyper_3_103_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk + Output: _hyper_3_104_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk + Output: _hyper_3_105_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk + Output: _hyper_3_106_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk + Output: _hyper_3_107_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk + Output: _hyper_3_108_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk + Output: _hyper_3_109_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk + Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value + Filter: ((compress_hyper_4_119_chunk.segment_by_value1 > 0) AND (compress_hyper_4_119_chunk.segment_by_value2 > 0)) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk + Output: _hyper_3_110_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk + Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value + Filter: ((compress_hyper_4_120_chunk.segment_by_value1 > 0) AND (compress_hyper_4_120_chunk.segment_by_value2 > 0)) +(76 rows) -- Vectorization not possible filter on segment_by and compressed value -- Disable parallel worker to get deterministic query plans on i386 From be203fde33133cdc6263c84a463b349886fc9aff Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 28 Mar 2024 15:19:30 +0100 Subject: [PATCH 05/81] fix the build --- tsl/src/partialize_agg.c | 58 ---------------------------------------- 1 file changed, 58 deletions(-) diff --git a/tsl/src/partialize_agg.c b/tsl/src/partialize_agg.c index deab6d1555c..c72f725b058 100644 --- a/tsl/src/partialize_agg.c +++ b/tsl/src/partialize_agg.c @@ -30,64 +30,6 @@ #include "nodes/vector_agg/vector_agg.h" -/* - * Are we able to optimize the path by applying vectorized aggregation? - */ -static bool -is_vectorizable_agg_path(PlannerInfo *root, AggPath *agg_path, Path *path) -{ - Assert(agg_path->aggstrategy == AGG_SORTED || agg_path->aggstrategy == AGG_PLAIN || - agg_path->aggstrategy == AGG_HASHED); - - /* Having is not supported at the moment */ - if (root->hasHavingQual) - return false; - - /* Only vectorizing within the decompress node is supported so far */ - bool is_decompress_chunk = ts_is_decompress_chunk_path(path); - if (!is_decompress_chunk) - return false; - -#ifdef USE_ASSERT_CHECKING - DecompressChunkPath *decompress_path = (DecompressChunkPath *) path; - Assert(decompress_path->custom_path.custom_paths != NIL); - - /* Hypertable compression info is already fetched from the catalog */ - Assert(decompress_path->info != NULL); -#endif - - /* No filters on the compressed attributes are supported at the moment */ - if ((list_length(path->parent->baserestrictinfo) > 0 || path->parent->joininfo != NULL)) - return false; - - /* We currently handle only one agg function per node */ - if (list_length(agg_path->path.pathtarget->exprs) != 1) - return false; - - /* Only sum on int 4 is supported at the moment */ - Node *expr_node = linitial(agg_path->path.pathtarget->exprs); - if (!IsA(expr_node, Aggref)) - return false; - - Aggref *aggref = castNode(Aggref, expr_node); - - /* Filter expressions in the aggregate are not supported */ - if (aggref->aggfilter != NULL) - return false; - - if (aggref->aggfnoid != F_SUM_INT4) - return false; - - /* Can aggregate only a bare decompressed column, not an expression. */ - TargetEntry *argument = castNode(TargetEntry, linitial(aggref->args)); - if (!IsA(argument->expr, Var)) - { - return false; - } - - return true; -} - /* * Check if we can perform the computation of the aggregate in a vectorized manner directly inside * of the decompress chunk node. If this is possible, the decompress chunk node will emit partial From ee8b1f4dfd89f32535786309dd66d2eca6b59a31 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 28 Mar 2024 15:58:42 +0100 Subject: [PATCH 06/81] remove the old planning approach --- src/cross_module_fn.c | 8 - src/cross_module_fn.h | 1 - src/planner/partialize.c | 18 +- tsl/src/CMakeLists.txt | 1 - tsl/src/init.c | 4 +- tsl/src/nodes/decompress_chunk/planner.c | 7 +- tsl/src/nodes/vector_agg/CMakeLists.txt | 3 +- tsl/src/nodes/vector_agg/exec.c | 72 +++++++ .../nodes/vector_agg/{vector_agg.h => exec.h} | 10 - .../vector_agg/plan.c} | 176 +++++++++++------- tsl/src/nodes/vector_agg/plan.h | 18 ++ tsl/src/nodes/vector_agg/vector_agg.c | 168 ----------------- tsl/src/partialize_agg.h | 11 -- tsl/src/planner.c | 19 ++ tsl/src/planner.h | 1 + 15 files changed, 222 insertions(+), 295 deletions(-) create mode 100644 tsl/src/nodes/vector_agg/exec.c rename tsl/src/nodes/vector_agg/{vector_agg.h => exec.h} (63%) rename tsl/src/{partialize_agg.c => nodes/vector_agg/plan.c} (63%) create mode 100644 tsl/src/nodes/vector_agg/plan.h delete mode 100644 tsl/src/nodes/vector_agg/vector_agg.c delete mode 100644 tsl/src/partialize_agg.h diff --git a/src/cross_module_fn.c b/src/cross_module_fn.c index 0e1f499d986..3d7e4c8319e 100644 --- a/src/cross_module_fn.c +++ b/src/cross_module_fn.c @@ -124,13 +124,6 @@ job_execute_default_fn(BgwJob *job) pg_unreachable(); } -static bool -push_down_aggregation(PlannerInfo *root, AggPath *aggregation_path, Path *subpath) -{ - /* Don't skip adding the agg node on top of the path */ - return false; -} - static void tsl_postprocess_plan_stub(PlannedStmt *stmt) { @@ -327,7 +320,6 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = { .policies_alter = error_no_default_fn_pg_community, .policies_show = error_no_default_fn_pg_community, - .push_down_aggregation = push_down_aggregation, .tsl_postprocess_plan = tsl_postprocess_plan_stub, .partialize_agg = error_no_default_fn_pg_community, diff --git a/src/cross_module_fn.h b/src/cross_module_fn.h index 722ca5a918c..010bc990322 100644 --- a/src/cross_module_fn.h +++ b/src/cross_module_fn.h @@ -90,7 +90,6 @@ typedef struct CrossModuleFunctions PGFunction move_chunk; /* Vectorized queries */ - bool (*push_down_aggregation)(PlannerInfo *root, AggPath *aggregation_path, Path *subpath); void (*tsl_postprocess_plan)(PlannedStmt *stmt); /* Continuous Aggregates */ diff --git a/src/planner/partialize.c b/src/planner/partialize.c index e22fc2af134..b254ab4a9d8 100644 --- a/src/planner/partialize.c +++ b/src/planner/partialize.c @@ -421,14 +421,7 @@ add_partially_aggregated_subpaths(PlannerInfo *root, Path *parent_path, AggPath *agg_path = create_sorted_partial_agg_path(root, subpath, chunktarget, d_num_groups, extra_data); - if (ts_cm_functions->push_down_aggregation(root, agg_path, subpath)) - { - *sorted_paths = lappend(*sorted_paths, subpath); - } - else - { - *sorted_paths = lappend(*sorted_paths, (Path *) agg_path); - } + *sorted_paths = lappend(*sorted_paths, (Path *) agg_path); } if (can_hash) @@ -436,14 +429,7 @@ add_partially_aggregated_subpaths(PlannerInfo *root, Path *parent_path, AggPath *agg_path = create_hashed_partial_agg_path(root, subpath, chunktarget, d_num_groups, extra_data); - if (ts_cm_functions->push_down_aggregation(root, agg_path, subpath)) - { - *hashed_paths = lappend(*hashed_paths, subpath); - } - else - { - *hashed_paths = lappend(*hashed_paths, (Path *) agg_path); - } + *hashed_paths = lappend(*hashed_paths, (Path *) agg_path); } } diff --git a/tsl/src/CMakeLists.txt b/tsl/src/CMakeLists.txt index d6fc2a76266..90cf8a638c2 100644 --- a/tsl/src/CMakeLists.txt +++ b/tsl/src/CMakeLists.txt @@ -2,7 +2,6 @@ set(SOURCES chunk_api.c chunk.c init.c - partialize_agg.c partialize_finalize.c planner.c process_utility.c diff --git a/tsl/src/init.c b/tsl/src/init.c index 49ff5fced73..5ef72970468 100644 --- a/tsl/src/init.c +++ b/tsl/src/init.c @@ -39,8 +39,7 @@ #include "nodes/decompress_chunk/planner.h" #include "nodes/skip_scan/skip_scan.h" #include "nodes/gapfill/gapfill_functions.h" -#include "nodes/vector_agg/vector_agg.h" -#include "partialize_agg.h" +#include "nodes/vector_agg/plan.h" #include "partialize_finalize.h" #include "planner.h" #include "process_utility.h" @@ -119,7 +118,6 @@ CrossModuleFunctions tsl_cm_functions = { .policies_show = policies_show, /* Vectorized queries */ - .push_down_aggregation = apply_vectorized_agg_optimization, .tsl_postprocess_plan = tsl_postprocess_plan, /* Continuous Aggregates */ diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index 1ecee8755cd..fd8763cd890 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -33,7 +33,7 @@ #include "nodes/decompress_chunk/exec.h" #include "nodes/decompress_chunk/planner.h" #include "nodes/chunk_append/transform.h" -#include "nodes/vector_agg/vector_agg.h" +#include "nodes/vector_agg/exec.h" #include "vector_predicates.h" #include "ts_catalog/array_utils.h" @@ -1092,10 +1092,5 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat // my_print(decompressed_tlist); } - // if (dcpath->perform_vectorized_aggregation) - // { - // return vector_agg_plan_create(&decompress_plan->scan.plan); - // } - return &decompress_plan->scan.plan; } diff --git a/tsl/src/nodes/vector_agg/CMakeLists.txt b/tsl/src/nodes/vector_agg/CMakeLists.txt index 55da7eed64d..22a6b93ef0b 100644 --- a/tsl/src/nodes/vector_agg/CMakeLists.txt +++ b/tsl/src/nodes/vector_agg/CMakeLists.txt @@ -1,2 +1,3 @@ -set(SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/vector_agg.c) +set(SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/exec.c + ${CMAKE_CURRENT_SOURCE_DIR}/plan.c) target_sources(${TSL_LIBRARY_NAME} PRIVATE ${SOURCES}) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c new file mode 100644 index 00000000000..358ed84bc88 --- /dev/null +++ b/tsl/src/nodes/vector_agg/exec.c @@ -0,0 +1,72 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#include + +#include +#include +#include +#include +#include + +#include "exec.h" + +#include "nodes/decompress_chunk/compressed_batch.h" +#include "nodes/decompress_chunk/exec.h" + +static void +vector_agg_begin(CustomScanState *node, EState *estate, int eflags) +{ + CustomScan *cscan = castNode(CustomScan, node->ss.ps.plan); + node->custom_ps = + lappend(node->custom_ps, ExecInitNode(linitial(cscan->custom_plans), estate, eflags)); +} + +static void +vector_agg_end(CustomScanState *node) +{ + ExecEndNode(linitial(node->custom_ps)); +} + +static void +vector_agg_rescan(CustomScanState *node) +{ + if (node->ss.ps.chgParam != NULL) + UpdateChangedParamSet(linitial(node->custom_ps), node->ss.ps.chgParam); + + ExecReScan(linitial(node->custom_ps)); +} + +static TupleTableSlot * +vector_agg_exec(CustomScanState *node) +{ + // return ExecProcNode(linitial(node->custom_ps)); + DecompressChunkState *ds = (DecompressChunkState *) linitial(node->custom_ps); + return decompress_chunk_exec_vector_agg_impl(node, ds); +} + +static void +vector_agg_explain(CustomScanState *node, List *ancestors, ExplainState *es) +{ + /* noop? */ +} + +static struct CustomExecMethods exec_methods = { + .CustomName = "VectorAgg", + .BeginCustomScan = vector_agg_begin, + .ExecCustomScan = vector_agg_exec, + .EndCustomScan = vector_agg_end, + .ReScanCustomScan = vector_agg_rescan, + .ExplainCustomScan = vector_agg_explain, +}; + +Node * +vector_agg_state_create(CustomScan *cscan) +{ + CustomScanState *state = makeNode(CustomScanState); + state->methods = &exec_methods; + return (Node *) state; +} diff --git a/tsl/src/nodes/vector_agg/vector_agg.h b/tsl/src/nodes/vector_agg/exec.h similarity index 63% rename from tsl/src/nodes/vector_agg/vector_agg.h rename to tsl/src/nodes/vector_agg/exec.h index 2a53a7e6468..61b0837b076 100644 --- a/tsl/src/nodes/vector_agg/vector_agg.h +++ b/tsl/src/nodes/vector_agg/exec.h @@ -6,7 +6,6 @@ #include -#include #include typedef struct VectorAggState @@ -14,13 +13,4 @@ typedef struct VectorAggState CustomScanState custom; } VectorAggState; -typedef struct VectorAggPlan -{ - CustomScan custom; -} VectorAggPlan; - -extern Plan *vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk); - extern Node *vector_agg_state_create(CustomScan *cscan); - -extern void _vector_agg_init(void); diff --git a/tsl/src/partialize_agg.c b/tsl/src/nodes/vector_agg/plan.c similarity index 63% rename from tsl/src/partialize_agg.c rename to tsl/src/nodes/vector_agg/plan.c index c72f725b058..56a0e47f7e0 100644 --- a/tsl/src/partialize_agg.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -1,3 +1,4 @@ + /* * This file and its contents are licensed under the Timescale License. * Please see the included NOTICE for copyright information and @@ -5,79 +6,129 @@ */ #include -#include -#include -#include + +#include +#include +#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include +#include "plan.h" -#include "compression/compression.h" -#include "nodes/decompress_chunk/decompress_chunk.h" -#include "partialize_agg.h" +#include "exec.h" #include "utils.h" -#include "debug_assert.h" -#include "nodes/vector_agg/vector_agg.h" +static struct CustomScanMethods scan_methods = { .CustomName = "VectorAgg", + .CreateCustomScanState = vector_agg_state_create }; + +void +_vector_agg_init(void) +{ + TryRegisterCustomScanMethods(&scan_methods); +} + +/* + * Build an output targetlist for a custom node that just references all the + * custom scan targetlist entries. + */ +static inline List * +build_trivial_custom_output_targetlist(List *scan_targetlist) +{ + List *result = NIL; + + ListCell *lc; + foreach (lc, scan_targetlist) + { + TargetEntry *scan_entry = (TargetEntry *) lfirst(lc); + + Var *var = makeVar(INDEX_VAR, + scan_entry->resno, + exprType((Node *) scan_entry->expr), + exprTypmod((Node *) scan_entry->expr), + exprCollation((Node *) scan_entry->expr), + /* varlevelsup = */ 0); + + TargetEntry *output_entry = makeTargetEntry((Expr *) var, + scan_entry->resno, + scan_entry->resname, + scan_entry->resjunk); + + result = lappend(result, output_entry); + } + + return result; +} + +static Node * +replace_outer_special_vars_mutator(Node *node, void *context) +{ + if (node == NULL) + { + return NULL; + } + + if (!IsA(node, Var)) + { + return expression_tree_mutator(node, replace_outer_special_vars_mutator, context); + } + + Var *var = castNode(Var, node); + if (var->varno != OUTER_VAR) + { + return node; + } + + var = copyObject(var); + var->varno = DatumGetInt32(PointerGetDatum(context)); + return (Node *) var; +} /* - * Check if we can perform the computation of the aggregate in a vectorized manner directly inside - * of the decompress chunk node. If this is possible, the decompress chunk node will emit partial - * aggregates directly, and there is no need for the PostgreSQL aggregation node on top. + * Replace the OUTER_VAR special variables, that are used in the output + * targetlists of aggregation nodes, with the given other varno. */ -bool -apply_vectorized_agg_optimization(PlannerInfo *root, AggPath *aggregation_path, Path *path) +static List * +replace_outer_special_vars(List *input, int target_varno) { - return false; -// -// if (!ts_guc_enable_vectorized_aggregation || !ts_guc_enable_bulk_decompression) -// return false; -// -// Assert(path != NULL); -// Assert(aggregation_path->aggsplit == AGGSPLIT_INITIAL_SERIAL); -// -// if (is_vectorizable_agg_path(root, aggregation_path, path)) -// { -// Assert(ts_is_decompress_chunk_path(path)); -// DecompressChunkPath *decompress_path = (DecompressChunkPath *) castNode(CustomPath, path); -// -// /* Change the output of the path and let the decompress chunk node emit partial aggregates -// * directly */ -// decompress_path->perform_vectorized_aggregation = true; -// -// decompress_path->custom_path.path.pathtarget = aggregation_path->path.pathtarget; -// -// /* The decompress chunk node can perform the aggregation directly. No need for a -//dedicated -// * agg node on top. */ -// return true; -// } -// -// /* PostgreSQL should handle the aggregation. Regular agg node on top is required. */ -// return false; + return castNode(List, + replace_outer_special_vars_mutator((Node *) input, + DatumGetPointer( + Int32GetDatum(target_varno)))); } static Plan * -insert_vector_agg_node(Plan *plan) +vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) +{ + CustomScan *custom = (CustomScan *) makeNode(CustomScan); + custom->custom_plans = list_make1(decompress_chunk); + custom->methods = &scan_methods; + + /* + * Note that this is being called from the post-planning hook, and therefore + * after set_plan_refs(). The meaning of output targetlists is different from + * the previous planning stages, and they contain special varnos referencing + * the scan targetlists. + */ + custom->custom_scan_tlist = + replace_outer_special_vars(agg->plan.targetlist, decompress_chunk->scan.scanrelid); + custom->scan.plan.targetlist = + build_trivial_custom_output_targetlist(custom->custom_scan_tlist); + + return (Plan *) custom; +} + +Plan * +try_insert_vector_agg_node(Plan *plan) { if (plan->lefttree) { - plan->lefttree = insert_vector_agg_node(plan->lefttree); + plan->lefttree = try_insert_vector_agg_node(plan->lefttree); } if (plan->righttree) { - plan->righttree = insert_vector_agg_node(plan->righttree); + plan->righttree = try_insert_vector_agg_node(plan->righttree); } if (IsA(plan, Append)) @@ -86,7 +137,7 @@ insert_vector_agg_node(Plan *plan) ListCell *lc; foreach (lc, plans) { - lfirst(lc) = insert_vector_agg_node(lfirst(lc)); + lfirst(lc) = try_insert_vector_agg_node(lfirst(lc)); } } @@ -197,8 +248,8 @@ insert_vector_agg_node(Plan *plan) /* * Check if this particular column is a segmentby or has bulk decompression * enabled. This hook is called after set_plan_refs, and at this stage the - * aggregation node uses OUTER_VAR references into the child scan targetlist, - * so first we have to translate this. + * output targetlist of the aggregation node uses OUTER_VAR references into + * the child scan targetlist, so first we have to translate this. */ Assert(aggregated_var->varno == OUTER_VAR); TargetEntry *decompressed_target_entry = @@ -266,18 +317,3 @@ insert_vector_agg_node(Plan *plan) return vector_agg_plan_create(agg, custom); } - -void -tsl_postprocess_plan(PlannedStmt *stmt) -{ - // mybt(); - // my_print(stmt); - - if (ts_guc_enable_vectorized_aggregation) - { - stmt->planTree = insert_vector_agg_node(stmt->planTree); - } - - // fprintf(stderr, "postprocessed:\n"); - // my_print(stmt->planTree); -} diff --git a/tsl/src/nodes/vector_agg/plan.h b/tsl/src/nodes/vector_agg/plan.h new file mode 100644 index 00000000000..653d9d1e1d0 --- /dev/null +++ b/tsl/src/nodes/vector_agg/plan.h @@ -0,0 +1,18 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#include + +#include + +typedef struct VectorAggPlan +{ + CustomScan custom; +} VectorAggPlan; + +extern void _vector_agg_init(void); + +Plan *try_insert_vector_agg_node(Plan *plan); diff --git a/tsl/src/nodes/vector_agg/vector_agg.c b/tsl/src/nodes/vector_agg/vector_agg.c deleted file mode 100644 index b6f04705d7f..00000000000 --- a/tsl/src/nodes/vector_agg/vector_agg.c +++ /dev/null @@ -1,168 +0,0 @@ -/* - * This file and its contents are licensed under the Timescale License. - * Please see the included NOTICE for copyright information and - * LICENSE-TIMESCALE for a copy of the license. - */ - -#include - -#include -#include -#include -#include -#include - -#include "vector_agg.h" - -#include "nodes/decompress_chunk/compressed_batch.h" -#include "nodes/decompress_chunk/exec.h" - -static void -vector_agg_begin(CustomScanState *node, EState *estate, int eflags) -{ - CustomScan *cscan = castNode(CustomScan, node->ss.ps.plan); - node->custom_ps = - lappend(node->custom_ps, ExecInitNode(linitial(cscan->custom_plans), estate, eflags)); -} - -static void -vector_agg_end(CustomScanState *node) -{ - ExecEndNode(linitial(node->custom_ps)); -} - -static void -vector_agg_rescan(CustomScanState *node) -{ - if (node->ss.ps.chgParam != NULL) - UpdateChangedParamSet(linitial(node->custom_ps), node->ss.ps.chgParam); - - ExecReScan(linitial(node->custom_ps)); -} - -static TupleTableSlot * -vector_agg_exec(CustomScanState *node) -{ - // return ExecProcNode(linitial(node->custom_ps)); - DecompressChunkState *ds = (DecompressChunkState *) linitial(node->custom_ps); - return decompress_chunk_exec_vector_agg_impl(node, ds); -} - -static void -vector_agg_explain(CustomScanState *node, List *ancestors, ExplainState *es) -{ - /* noop? */ -} - -static struct CustomExecMethods exec_methods = { - .CustomName = "VectorAgg", - .BeginCustomScan = vector_agg_begin, - .ExecCustomScan = vector_agg_exec, - .EndCustomScan = vector_agg_end, - .ReScanCustomScan = vector_agg_rescan, - .ExplainCustomScan = vector_agg_explain, -}; - -static struct CustomScanMethods scan_methods = { .CustomName = "VectorAgg", - .CreateCustomScanState = vector_agg_state_create }; - -static inline List * -CustomBuildTargetList(List *tlist, Index varNo) -{ - List *result_tlist = NIL; - ListCell *lc; - - foreach (lc, tlist) - { - TargetEntry *tle = (TargetEntry *) lfirst(lc); - - Var *var = makeVar(varNo, - tle->resno, - exprType((Node *) tle->expr), - exprTypmod((Node *) tle->expr), - exprCollation((Node *) tle->expr), - 0); - - TargetEntry *newtle = makeTargetEntry((Expr *) var, tle->resno, tle->resname, tle->resjunk); - - result_tlist = lappend(result_tlist, newtle); - } - - return result_tlist; -} - -static Node * -replace_special_vars_mutator(Node *node, void *context) -{ - if (node == NULL) - { - return NULL; - } - - if (!IsA(node, Var)) - { - return expression_tree_mutator(node, replace_special_vars_mutator, context); - } - - Var *var = castNode(Var, node); - if (var->varno != OUTER_VAR) - { - return node; - } - - var = copyObject(var); - var->varno = DatumGetInt32(PointerGetDatum(context)); - return (Node *) var; -} - -static List * -replace_special_vars(List *input, int target_varno) -{ - return castNode(List, - replace_special_vars_mutator((Node *) input, - DatumGetPointer(Int32GetDatum(target_varno)))); -} - -Plan * -vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) -{ - CustomScan *custom = (CustomScan *) makeNode(CustomScan); - custom->custom_plans = list_make1(decompress_chunk); - custom->methods = &scan_methods; - custom->scan.plan.targetlist = CustomBuildTargetList(agg->plan.targetlist, INDEX_VAR); - // custom->scan.plan.targetlist = replace_special_vars(agg->plan.targetlist); - // custom->scan.plan.targetlist = agg->plan.targetlist; - // fprintf(stderr, "source agg tagetlist:\n"); - // my_print(agg->plan.targetlist); - // fprintf(stderr, "build targetlist:\n"); - // my_print(custom->scan.plan.targetlist); - // custom->custom_scan_tlist = CustomBuildTargetList(decompress_chunk->scan.plan.targetlist, - // INDEX_VAR); - // custom->custom_scan_tlist = decompress_chunk->scan.plan.targetlist; - // custom->custom_scan_tlist = custom->scan.plan.targetlist; - custom->custom_scan_tlist = - replace_special_vars(agg->plan.targetlist, decompress_chunk->scan.scanrelid); - - // custom->scan.plan.lefttree = agg->plan.lefttree; - - // fprintf(stderr, "created:\n"); - // my_print(custom); - - (void) CustomBuildTargetList; - - return (Plan *) custom; -} - -Node * -vector_agg_state_create(CustomScan *cscan) -{ - CustomScanState *state = makeNode(CustomScanState); - state->methods = &exec_methods; - return (Node *) state; -} - -void -_vector_agg_init(void) -{ - TryRegisterCustomScanMethods(&scan_methods); -} diff --git a/tsl/src/partialize_agg.h b/tsl/src/partialize_agg.h deleted file mode 100644 index 925e614ccf1..00000000000 --- a/tsl/src/partialize_agg.h +++ /dev/null @@ -1,11 +0,0 @@ -/* - * This file and its contents are licensed under the Timescale License. - * Please see the included NOTICE for copyright information and - * LICENSE-TIMESCALE for a copy of the license. - */ -#pragma once - -extern bool apply_vectorized_agg_optimization(PlannerInfo *root, AggPath *aggregation_path, - Path *subpath); - -extern void tsl_postprocess_plan(PlannedStmt *stmt); diff --git a/tsl/src/planner.c b/tsl/src/planner.c index e90c4651016..d89d70ec650 100644 --- a/tsl/src/planner.c +++ b/tsl/src/planner.c @@ -22,6 +22,7 @@ #include "nodes/frozen_chunk_dml/frozen_chunk_dml.h" #include "nodes/decompress_chunk/decompress_chunk.h" #include "nodes/gapfill/gapfill.h" +#include "nodes/vector_agg/plan.h" #include "planner.h" #include @@ -202,3 +203,21 @@ tsl_preprocess_query(Query *parse) constify_cagg_watermark(parse); } } + +/* + * Run plan postprocessing optimizations. + */ +void +tsl_postprocess_plan(PlannedStmt *stmt) +{ + // mybt(); + // my_print(stmt); + + if (ts_guc_enable_vectorized_aggregation) + { + stmt->planTree = try_insert_vector_agg_node(stmt->planTree); + } + + // fprintf(stderr, "postprocessed:\n"); + // my_print(stmt->planTree); +} diff --git a/tsl/src/planner.h b/tsl/src/planner.h index d37c32c3bf2..1a53468e872 100644 --- a/tsl/src/planner.h +++ b/tsl/src/planner.h @@ -17,3 +17,4 @@ void tsl_set_rel_pathlist_query(PlannerInfo *, RelOptInfo *, Index, RangeTblEntr void tsl_set_rel_pathlist_dml(PlannerInfo *, RelOptInfo *, Index, RangeTblEntry *, Hypertable *); void tsl_set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte); void tsl_preprocess_query(Query *parse); +void tsl_postprocess_plan(PlannedStmt *stmt); From e146937a265bba0d009a47456a8ba2d3bb6661bd Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 28 Mar 2024 16:07:17 +0100 Subject: [PATCH 07/81] remove more of old planning --- .../nodes/decompress_chunk/decompress_chunk.h | 17 ----- tsl/src/nodes/decompress_chunk/exec.c | 62 ++----------------- tsl/src/nodes/decompress_chunk/exec.h | 5 -- tsl/src/nodes/decompress_chunk/planner.c | 32 +--------- tsl/src/nodes/vector_agg/plan.c | 7 --- 5 files changed, 8 insertions(+), 115 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/decompress_chunk.h b/tsl/src/nodes/decompress_chunk/decompress_chunk.h index a320fc23d11..065c284e6b8 100644 --- a/tsl/src/nodes/decompress_chunk/decompress_chunk.h +++ b/tsl/src/nodes/decompress_chunk/decompress_chunk.h @@ -91,23 +91,6 @@ typedef struct DecompressChunkPath */ DecompressChunkColumnCompression *uncompressed_chunk_attno_to_compression_info; - /* - * Are we able to execute a vectorized aggregation - */ - bool perform_vectorized_aggregation; - - /* - * Columns that are used for vectorized aggregates. The list contains for each attribute -1 if - * this is not an vectorized aggregate column or the Oid of the data type of the attribute. - * - * When creating vectorized aggregates, the decompression logic is not able to determine the - * type of the compressed column based on the output column since we emit partial aggregates - * for this attribute and the raw attribute is not found in the targetlist. So, build a map - * with the used data types here, which is used later to create the compression info - * properly. - */ - List *aggregated_column_type; - List *required_compressed_pathkeys; bool needs_sequence_num; bool reverse; diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index ec03e315c85..d5a6f9174be 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -66,7 +66,6 @@ decompress_chunk_state_create(CustomScan *cscan) chunk_state->decompression_map = lsecond(cscan->custom_private); chunk_state->is_segmentby_column = lthird(cscan->custom_private); chunk_state->bulk_decompression_column = lfourth(cscan->custom_private); - chunk_state->aggregated_column_type = lfifth(cscan->custom_private); chunk_state->sortinfo = lsixth(cscan->custom_private); chunk_state->custom_scan_tlist = cscan->custom_scan_tlist; @@ -77,8 +76,6 @@ decompress_chunk_state_create(CustomScan *cscan) chunk_state->decompress_context.reverse = lthird_int(settings); chunk_state->decompress_context.batch_sorted_merge = lfourth_int(settings); chunk_state->decompress_context.enable_bulk_decompression = lfifth_int(settings); - // chunk_state->perform_vectorized_aggregation = false; //lsixth_int(settings); - chunk_state->perform_vectorized_aggregation = lsixth_int(settings); Assert(IsA(cscan->custom_exprs, List)); Assert(list_length(cscan->custom_exprs) == 1); @@ -86,14 +83,6 @@ decompress_chunk_state_create(CustomScan *cscan) Assert(list_length(chunk_state->decompression_map) == list_length(chunk_state->is_segmentby_column)); -#ifdef USE_ASSERT_CHECKING - if (chunk_state->perform_vectorized_aggregation) - { - Assert(list_length(chunk_state->decompression_map) == - list_length(chunk_state->aggregated_column_type)); - } -#endif - return (Node *) chunk_state; } @@ -174,8 +163,6 @@ decompress_chunk_exec_heap(CustomScanState *node) return decompress_chunk_exec_impl(chunk_state, &BatchQueueFunctionsHeap); } -static TupleTableSlot *decompress_chunk_exec_vector_agg(CustomScanState *node); - /* * Complete initialization of the supplied CustomScanState. * @@ -294,22 +281,12 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) if (column.output_attno > 0) { - if (chunk_state->perform_vectorized_aggregation && - lfirst_int(list_nth_cell(chunk_state->aggregated_column_type, compressed_index)) != - -1) - { - column.typid = lfirst_int( - list_nth_cell(chunk_state->aggregated_column_type, compressed_index)); - } - else - { - /* normal column that is also present in decompressed chunk */ - Form_pg_attribute attribute = - TupleDescAttr(desc, AttrNumberGetAttrOffset(column.output_attno)); + /* normal column that is also present in decompressed chunk */ + Form_pg_attribute attribute = + TupleDescAttr(desc, AttrNumberGetAttrOffset(column.output_attno)); - column.typid = attribute->atttypid; - column.value_bytes = get_typlen(column.typid); - } + column.typid = attribute->atttypid; + column.value_bytes = get_typlen(column.typid); if (list_nth_int(chunk_state->is_segmentby_column, compressed_index)) column.type = SEGMENTBY_COLUMN; @@ -352,13 +329,7 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) * Choose which batch queue we are going to use: heap for batch sorted * merge, and one-element FIFO for normal decompression. */ - if (chunk_state->perform_vectorized_aggregation) - { - chunk_state->batch_queue = - batch_queue_fifo_create(num_compressed, &BatchQueueFunctionsFifo); - chunk_state->exec_methods.ExecCustomScan = decompress_chunk_exec_vector_agg; - } - else if (dcontext->batch_sorted_merge) + if (dcontext->batch_sorted_merge) { chunk_state->batch_queue = batch_queue_heap_create(num_compressed, @@ -684,18 +655,6 @@ decompress_chunk_exec_vector_agg_impl(CustomScanState *vector_agg_state, } } -static TupleTableSlot * -decompress_chunk_exec_vector_agg(CustomScanState *node) -{ - DecompressChunkState *chunk_state = (DecompressChunkState *) node; - Assert(!chunk_state->decompress_context.batch_sorted_merge); - Assert(chunk_state->perform_vectorized_aggregation); - - CustomScanState *vector_agg_state = node; - - return decompress_chunk_exec_vector_agg_impl(vector_agg_state, chunk_state); -} - /* * The exec function for the DecompressChunk node. It takes the explicit queue * functions pointer as an optimization, to allow these functions to be @@ -710,8 +669,6 @@ decompress_chunk_exec_impl(DecompressChunkState *chunk_state, const BatchQueueFu Assert(bq->funcs == bqfuncs); - Assert(!chunk_state->perform_vectorized_aggregation); - bqfuncs->pop(bq, dcontext); while (bqfuncs->needs_next_batch(bq)) @@ -816,12 +773,5 @@ decompress_chunk_explain(CustomScanState *node, List *ancestors, ExplainState *e chunk_state->decompress_context.enable_bulk_decompression, es); } - - if (chunk_state->perform_vectorized_aggregation) - { - ExplainPropertyBool("Vectorized Aggregation", - chunk_state->perform_vectorized_aggregation, - es); - } } } diff --git a/tsl/src/nodes/decompress_chunk/exec.h b/tsl/src/nodes/decompress_chunk/exec.h index 6da4d9e6637..172672bedcc 100644 --- a/tsl/src/nodes/decompress_chunk/exec.h +++ b/tsl/src/nodes/decompress_chunk/exec.h @@ -20,7 +20,6 @@ typedef struct DecompressChunkState List *decompression_map; List *is_segmentby_column; List *bulk_decompression_column; - List *aggregated_column_type; List *custom_scan_tlist; DecompressContext decompress_context; @@ -33,10 +32,6 @@ typedef struct DecompressChunkState List *sortinfo; - /* Perform calculation of the aggregate directly in the decompress chunk node and emit partials - */ - bool perform_vectorized_aggregation; - /* * For some predicates, we have more efficient implementation that work on * the entire compressed batch in one go. They go to this list, and the rest diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index fd8763cd890..48298b34d15 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -238,18 +238,6 @@ build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan (DecompressChunkColumnCompression){ .bulk_decompression_possible = bulk_decompression_possible }; } - - if (path->perform_vectorized_aggregation) - { - Assert(list_length(path->custom_path.path.parent->reltarget->exprs) == 1); - Var *var = linitial(path->custom_path.path.parent->reltarget->exprs); - Assert((Index) var->varno == path->custom_path.path.parent->relid); - if (var->varattno == destination_attno_in_uncompressed_chunk) - path->aggregated_column_type = - lappend_int(path->aggregated_column_type, var->vartype); - else - path->aggregated_column_type = lappend_int(path->aggregated_column_type, -1); - } } /* @@ -1057,7 +1045,7 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat dcpath->reverse, dcpath->batch_sorted_merge, enable_bulk_decompression, - dcpath->perform_vectorized_aggregation); + false /* FIXME */); /* * Vectorized quals must go into custom_exprs, because Postgres has to see @@ -1070,27 +1058,11 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat dcpath->decompression_map, dcpath->is_segmentby_column, dcpath->bulk_decompression_column, - dcpath->aggregated_column_type, + NIL /* FIXME */, sort_options); /* input target list */ decompress_plan->custom_scan_tlist = NIL; - /* Make PostgreSQL aware that we emit partials. In apply_vectorized_agg_optimization the - * pathtarget of the node is changed; the decompress chunk node now emits prtials directly. - * - * We have to set a custom_scan_tlist to make sure tlist_matches_tupdesc is true to prevent the - * call of ExecAssignProjectionInfo in ExecConditionalAssignProjectionInfo. Otherwise, - * PostgreSQL will error out since scan nodes are not intended to emit partial aggregates. - */ - if (dcpath->perform_vectorized_aggregation) - { - decompress_plan->custom_scan_tlist = decompressed_tlist; - decompress_plan->scan.plan.targetlist = decompressed_tlist; - - // fprintf(stderr, "when assigned, the decompressed tlist was:\n"); - // my_print(decompressed_tlist); - } - return &decompress_plan->scan.plan; } diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 56a0e47f7e0..7074c3ecfad 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -304,13 +304,6 @@ try_insert_vector_agg_node(Plan *plan) return plan; } - // bool perform_vectorized_aggregation = list_nth_int(linitial(custom->custom_private), 5); - // if (!perform_vectorized_aggregation) - // { - // fprintf(stderr, "no vectorized aggregation\n"); - // return plan; - // } - fprintf(stderr, "found!!!\n"); // my_print(plan); // mybt(); From 753bf0de70aacb81d57d0cef878229e20cf318da Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 28 Mar 2024 16:08:32 +0100 Subject: [PATCH 08/81] typos --- tsl/src/nodes/vector_agg/plan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 7074c3ecfad..b1f890a4491 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -205,7 +205,7 @@ try_insert_vector_agg_node(Plan *plan) /* * No HAVING support. Probably we can't have it in this node in any case, * because we only replace the partial aggregation nodes which can't - * chech HAVING. + * check the HAVING clause. */ return plan; } @@ -293,7 +293,7 @@ try_insert_vector_agg_node(Plan *plan) /* * We support vectorized aggregation either for segmentby columns or for - * columns wiht bulk decompression enabled. + * columns with bulk decompression enabled. */ if (!list_nth_int(is_segmentby_column, compressed_column_index) && !(bulk_decompression_enabled_for_column && bulk_decompression_enabled_globally)) From 4a4f20bce4aaffb5d5a1070604eb319b39af3a12 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 28 Mar 2024 16:35:17 +0100 Subject: [PATCH 09/81] use enum indexes for settings --- src/import/CMakeLists.txt | 1 + src/import/list.c | 88 ++++++++++++++++++++++++ src/import/list.h | 17 +++++ tsl/src/nodes/decompress_chunk/exec.c | 28 ++++---- tsl/src/nodes/decompress_chunk/planner.c | 28 ++++---- tsl/src/nodes/decompress_chunk/planner.h | 20 ++++++ tsl/src/nodes/vector_agg/plan.c | 10 +-- 7 files changed, 164 insertions(+), 28 deletions(-) create mode 100644 src/import/list.c create mode 100644 src/import/list.h diff --git a/src/import/CMakeLists.txt b/src/import/CMakeLists.txt index f6e08a0c6a4..9d837feb045 100644 --- a/src/import/CMakeLists.txt +++ b/src/import/CMakeLists.txt @@ -1,6 +1,7 @@ set(SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/allpaths.c ${CMAKE_CURRENT_SOURCE_DIR}/ht_hypertable_modify.c + ${CMAKE_CURRENT_SOURCE_DIR}/list.c ${CMAKE_CURRENT_SOURCE_DIR}/planner.c ${CMAKE_CURRENT_SOURCE_DIR}/ts_explain.c) diff --git a/src/import/list.c b/src/import/list.c new file mode 100644 index 00000000000..2c8e3f5de4e --- /dev/null +++ b/src/import/list.c @@ -0,0 +1,88 @@ +/* + * This file and its contents are licensed under the Apache License 2.0. + * Please see the included NOTICE for copyright information and + * LICENSE-APACHE for a copy of the license. + */ + +#include + +#include +#include + +#include "import/list.h" + +/* + * This file contains source code that was copied and/or modified from + * the PostgreSQL database, which is licensed under the open-source + * PostgreSQL License. Please see the NOTICE at the top level + * directory for a copy of the PostgreSQL License. + * + * Copied from PostgreSQL 15.0 (2a7ce2e2ce474504a707ec03e128fde66cfb8b48) + * without modifications. + */ + +/* Overhead for the fixed part of a List header, measured in ListCells */ +#define LIST_HEADER_OVERHEAD ((int) ((offsetof(List, initial_elements) - 1) / sizeof(ListCell) + 1)) + +/* + * Return a freshly allocated List with room for at least min_size cells. + * + * Since empty non-NIL lists are invalid, new_list() sets the initial length + * to min_size, effectively marking that number of cells as valid; the caller + * is responsible for filling in their data. + */ +List * +pg_new_list(NodeTag type, int min_size) +{ + List *newlist; + int max_size; + + Assert(min_size > 0); + + /* + * We allocate all the requested cells, and possibly some more, as part of + * the same palloc request as the List header. This is a big win for the + * typical case of short fixed-length lists. It can lose if we allocate a + * moderately long list and then it gets extended; we'll be wasting more + * initial_elements[] space than if we'd made the header small. However, + * rounding up the request as we do in the normal code path provides some + * defense against small extensions. + */ + +#ifndef DEBUG_LIST_MEMORY_USAGE + + /* + * Normally, we set up a list with some extra cells, to allow it to grow + * without a repalloc. Prefer cell counts chosen to make the total + * allocation a power-of-2, since palloc would round it up to that anyway. + * (That stops being true for very large allocations, but very long lists + * are infrequent, so it doesn't seem worth special logic for such cases.) + * + * The minimum allocation is 8 ListCell units, providing either 4 or 5 + * available ListCells depending on the machine's word width. Counting + * palloc's overhead, this uses the same amount of space as a one-cell + * list did in the old implementation, and less space for any longer list. + * + * We needn't worry about integer overflow; no caller passes min_size + * that's more than twice the size of an existing list, so the size limits + * within palloc will ensure that we don't overflow here. + */ + max_size = pg_nextpower2_32(Max(8, min_size + LIST_HEADER_OVERHEAD)); + max_size -= LIST_HEADER_OVERHEAD; +#else + + /* + * For debugging, don't allow any extra space. This forces any cell + * addition to go through enlarge_list() and thus move the existing data. + */ + max_size = min_size; +#endif + + newlist = (List *) palloc(offsetof(List, initial_elements) + max_size * sizeof(ListCell)); + newlist->type = type; + newlist->length = min_size; + newlist->max_length = max_size; + newlist->elements = newlist->initial_elements; + + return newlist; +} diff --git a/src/import/list.h b/src/import/list.h new file mode 100644 index 00000000000..3506b78b4fa --- /dev/null +++ b/src/import/list.h @@ -0,0 +1,17 @@ +/* + * This file and its contents are licensed under the Apache License 2.0. + * Please see the included NOTICE for copyright information and + * LICENSE-APACHE for a copy of the license. + */ +#pragma once + +#include "export.h" + +/* + * This file contains source code that was copied and/or modified from + * the PostgreSQL database, which is licensed under the open-source + * PostgreSQL License. Please see the NOTICE at the top level + * directory for a copy of the PostgreSQL License. + */ + +extern TSDLLEXPORT List *pg_new_list(NodeTag type, int min_size); diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index d5a6f9174be..10990dc1662 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -61,21 +61,25 @@ decompress_chunk_state_create(CustomScan *cscan) chunk_state->csstate.methods = &chunk_state->exec_methods; Assert(IsA(cscan->custom_private, List)); - Assert(list_length(cscan->custom_private) == 6); - List *settings = linitial(cscan->custom_private); - chunk_state->decompression_map = lsecond(cscan->custom_private); - chunk_state->is_segmentby_column = lthird(cscan->custom_private); - chunk_state->bulk_decompression_column = lfourth(cscan->custom_private); - chunk_state->sortinfo = lsixth(cscan->custom_private); + Assert(list_length(cscan->custom_private) == DCP_Count); + List *settings = list_nth(cscan->custom_private, DCP_Settings); + chunk_state->decompression_map = list_nth(cscan->custom_private, DCP_DecompressionMap); + chunk_state->is_segmentby_column = list_nth(cscan->custom_private, DCP_IsSegmentbyColumn); + chunk_state->bulk_decompression_column = + list_nth(cscan->custom_private, DCP_BulkDecompressionColumn); + chunk_state->sortinfo = list_nth(cscan->custom_private, DCP_SortInfo); + chunk_state->custom_scan_tlist = cscan->custom_scan_tlist; Assert(IsA(settings, IntList)); - Assert(list_length(settings) == 6); - chunk_state->hypertable_id = linitial_int(settings); - chunk_state->chunk_relid = lsecond_int(settings); - chunk_state->decompress_context.reverse = lthird_int(settings); - chunk_state->decompress_context.batch_sorted_merge = lfourth_int(settings); - chunk_state->decompress_context.enable_bulk_decompression = lfifth_int(settings); + Assert(list_length(settings) == DCS_Count); + chunk_state->hypertable_id = list_nth_int(settings, DCS_HypertableId); + chunk_state->chunk_relid = list_nth_int(settings, DCS_ChunkRelid); + chunk_state->decompress_context.reverse = list_nth_int(settings, DCS_Reverse); + chunk_state->decompress_context.batch_sorted_merge = + list_nth_int(settings, DCS_BatchSortedMerge); + chunk_state->decompress_context.enable_bulk_decompression = + list_nth_int(settings, DCS_EnableBulkDecompression); Assert(IsA(cscan->custom_exprs, List)); Assert(list_length(cscan->custom_exprs) == 1); diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index 48298b34d15..4972107a2be 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -29,6 +29,7 @@ #include "custom_type_cache.h" #include "guc.h" #include "import/planner.h" +#include "import/list.h" #include "nodes/decompress_chunk/decompress_chunk.h" #include "nodes/decompress_chunk/exec.h" #include "nodes/decompress_chunk/planner.h" @@ -1040,12 +1041,12 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat } #endif - settings = list_make6_int(dcpath->info->hypertable_id, - dcpath->info->chunk_rte->relid, - dcpath->reverse, - dcpath->batch_sorted_merge, - enable_bulk_decompression, - false /* FIXME */); + settings = pg_new_list(T_IntList, DCS_Count); + lfirst_int(list_nth_cell(settings, DCS_HypertableId)) = dcpath->info->hypertable_id; + lfirst_int(list_nth_cell(settings, DCS_ChunkRelid)) = dcpath->info->chunk_rte->relid; + lfirst_int(list_nth_cell(settings, DCS_Reverse)) = dcpath->reverse; + lfirst_int(list_nth_cell(settings, DCS_BatchSortedMerge)) = dcpath->batch_sorted_merge; + lfirst_int(list_nth_cell(settings, DCS_EnableBulkDecompression)) = enable_bulk_decompression; /* * Vectorized quals must go into custom_exprs, because Postgres has to see @@ -1054,12 +1055,15 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat */ decompress_plan->custom_exprs = list_make1(vectorized_quals); - decompress_plan->custom_private = list_make6(settings, - dcpath->decompression_map, - dcpath->is_segmentby_column, - dcpath->bulk_decompression_column, - NIL /* FIXME */, - sort_options); + decompress_plan->custom_private = pg_new_list(T_List, DCP_Count); + lfirst(list_nth_cell(decompress_plan->custom_private, DCP_Settings)) = settings; + lfirst(list_nth_cell(decompress_plan->custom_private, DCP_DecompressionMap)) = + dcpath->decompression_map; + lfirst(list_nth_cell(decompress_plan->custom_private, DCP_IsSegmentbyColumn)) = + dcpath->is_segmentby_column; + lfirst(list_nth_cell(decompress_plan->custom_private, DCP_BulkDecompressionColumn)) = + dcpath->bulk_decompression_column; + lfirst(list_nth_cell(decompress_plan->custom_private, DCP_SortInfo)) = sort_options; /* input target list */ decompress_plan->custom_scan_tlist = NIL; diff --git a/tsl/src/nodes/decompress_chunk/planner.h b/tsl/src/nodes/decompress_chunk/planner.h index 95098213bdb..a090af30d15 100644 --- a/tsl/src/nodes/decompress_chunk/planner.h +++ b/tsl/src/nodes/decompress_chunk/planner.h @@ -7,6 +7,26 @@ #include +typedef enum +{ + DCS_HypertableId = 0, + DCS_ChunkRelid = 1, + DCS_Reverse = 2, + DCS_BatchSortedMerge = 3, + DCS_EnableBulkDecompression = 4, + DCS_Count +} DecompressChunkSettingsIndex; + +typedef enum +{ + DCP_Settings = 0, + DCP_DecompressionMap = 1, + DCP_IsSegmentbyColumn = 2, + DCP_BulkDecompressionColumn = 3, + DCP_SortInfo = 4, + DCP_Count +} DecompressChunkPrivateIndex; + extern Plan *decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *path, List *tlist, List *clauses, List *custom_plans); diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index b1f890a4491..720e50725d9 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -18,6 +18,7 @@ #include "exec.h" #include "utils.h" +#include "nodes/decompress_chunk/planner.h" static struct CustomScanMethods scan_methods = { .CustomName = "VectorAgg", .CreateCustomScanState = vector_agg_state_create }; @@ -271,9 +272,9 @@ try_insert_vector_agg_node(Plan *plan) * Now, we have to translate the decompressed varno into the compressed * column index, to check if the column supports bulk decompression. */ - List *decompression_map = list_nth(custom->custom_private, 1); - List *is_segmentby_column = list_nth(custom->custom_private, 2); - List *bulk_decompression_column = list_nth(custom->custom_private, 3); + List *decompression_map = list_nth(custom->custom_private, DCP_DecompressionMap); + List *is_segmentby_column = list_nth(custom->custom_private, DCP_IsSegmentbyColumn); + List *bulk_decompression_column = list_nth(custom->custom_private, DCP_BulkDecompressionColumn); int compressed_column_index = 0; for (; compressed_column_index < list_length(decompression_map); compressed_column_index++) { @@ -289,7 +290,8 @@ try_insert_vector_agg_node(Plan *plan) /* Bulk decompression can also be disabled globally. */ List *settings = linitial(custom->custom_private); - const bool bulk_decompression_enabled_globally = list_nth_int(settings, 4); + const bool bulk_decompression_enabled_globally = + list_nth_int(settings, DCS_EnableBulkDecompression); /* * We support vectorized aggregation either for segmentby columns or for From beba737473ed70702fa46d13e27950ffd156c9a2 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 28 Mar 2024 16:47:00 +0100 Subject: [PATCH 10/81] cleanup --- tsl/src/nodes/decompress_chunk/exec.c | 5 ---- tsl/src/nodes/vector_agg/plan.c | 39 ++++++++++++++++----------- tsl/src/planner.c | 6 ----- 3 files changed, 23 insertions(+), 27 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index 10990dc1662..acbaadd3e37 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -634,11 +634,6 @@ decompress_chunk_exec_vector_agg_impl(CustomScanState *vector_agg_state, } /* Determine which kind of vectorized aggregation we should perform */ - // fprintf(stderr, "output tlist:\n"); - // my_print(decompress_state->csstate.ss.ps.plan->targetlist); - // fprintf(stderr, "custom scan tlist:\n"); - // my_print(decompress_state->custom_scan_tlist); - TargetEntry *tlentry = (TargetEntry *) linitial(aggregated_tlist); Assert(IsA(tlentry->expr, Aggref)); Aggref *aggref = castNode(Aggref, tlentry->expr); diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 720e50725d9..c700e75bb85 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -98,6 +98,10 @@ replace_outer_special_vars(List *input, int target_varno) Int32GetDatum(target_varno)))); } +/* + * Create a vectorized aggregation node to replace the given partial aggregation + * node. + */ static Plan * vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) { @@ -119,6 +123,10 @@ vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) return (Plan *) custom; } +/* + * Where possible, replace the partial aggregation plan nodes with our own + * vectorized aggregation node. The replacement is done in-place. + */ Plan * try_insert_vector_agg_node(Plan *plan) { @@ -147,33 +155,37 @@ try_insert_vector_agg_node(Plan *plan) return plan; } - fprintf(stderr, "found agg!\n"); - Agg *agg = castNode(Agg, plan); if (agg->aggsplit != AGGSPLIT_INITIAL_SERIAL) { - fprintf(stderr, "wrong split %d\n", agg->aggsplit); + /* Can only vectorize partial aggregation node. */ return plan; } if (agg->plan.lefttree == NULL) { - fprintf(stderr, "no leftnode?\n"); + /* + * Not sure what this would mean, but check for it just to be on the + * safe side because we can effectively see any possible plan here. + */ return plan; } if (!IsA(agg->plan.lefttree, CustomScan)) { - fprintf(stderr, "not custom\n"); - // my_print(agg->plan.lefttree); + /* + * Should have a Custom Scan under aggregation. + */ return plan; } CustomScan *custom = castNode(CustomScan, agg->plan.lefttree); if (strcmp(custom->methods->CustomName, "DecompressChunk") != 0) { - fprintf(stderr, "not decompress chunk\n"); + /* + * It should be our DecompressChunk node. + */ return plan; } @@ -244,7 +256,6 @@ try_insert_vector_agg_node(Plan *plan) return plan; } Var *aggregated_var = castNode(Var, argument->expr); - // my_print(aggregated_var); /* * Check if this particular column is a segmentby or has bulk decompression @@ -255,7 +266,6 @@ try_insert_vector_agg_node(Plan *plan) Assert(aggregated_var->varno == OUTER_VAR); TargetEntry *decompressed_target_entry = list_nth(custom->scan.plan.targetlist, AttrNumberGetAttrOffset(aggregated_var->varattno)); - // my_print(decompressed_target_entry); if (!IsA(decompressed_target_entry->expr, Var)) { @@ -266,7 +276,6 @@ try_insert_vector_agg_node(Plan *plan) return plan; } Var *decompressed_var = castNode(Var, decompressed_target_entry->expr); - // my_print(decompressed_var); /* * Now, we have to translate the decompressed varno into the compressed @@ -301,14 +310,12 @@ try_insert_vector_agg_node(Plan *plan) !(bulk_decompression_enabled_for_column && bulk_decompression_enabled_globally)) { /* Vectorized aggregation not possible for this particular column. */ - fprintf(stderr, "compressed column index %d\n", compressed_column_index); - // my_print(bulk_decompression_column); return plan; } - fprintf(stderr, "found!!!\n"); - // my_print(plan); - // mybt(); - + /* + * Finally, all requirements are satisfied and we can vectorize this partial + * aggregation node. + */ return vector_agg_plan_create(agg, custom); } diff --git a/tsl/src/planner.c b/tsl/src/planner.c index d89d70ec650..c92084923ee 100644 --- a/tsl/src/planner.c +++ b/tsl/src/planner.c @@ -210,14 +210,8 @@ tsl_preprocess_query(Query *parse) void tsl_postprocess_plan(PlannedStmt *stmt) { - // mybt(); - // my_print(stmt); - if (ts_guc_enable_vectorized_aggregation) { stmt->planTree = try_insert_vector_agg_node(stmt->planTree); } - - // fprintf(stderr, "postprocessed:\n"); - // my_print(stmt->planTree); } From 2dbda15b9365d9312b9b90c48ea662b752abbe0f Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 28 Mar 2024 16:47:13 +0100 Subject: [PATCH 11/81] benchmark separate vectorized agg (2024-03-28 no. 1) From 175cbf214d7af79f409d44f8c872174ee684f65d Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 28 Mar 2024 17:08:29 +0100 Subject: [PATCH 12/81] split out common code --- tsl/src/nodes/decompress_chunk/exec.c | 55 +++++++++++---------------- 1 file changed, 22 insertions(+), 33 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index acbaadd3e37..c5f98c2e270 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -438,22 +438,23 @@ perform_vectorized_sum_int4(CustomScanState *vector_agg_state, Aggref *aggref, int64 result_sum = 0; - if (value_column_description->type == SEGMENTBY_COLUMN) + while (true) { - /* - * To calculate the sum for a segment by value, we need to multiply the value of the segment - * by column with the number of compressed tuples in this batch. - */ - while (true) + TupleTableSlot *compressed_slot = + ExecProcNode(linitial(decompress_state->csstate.custom_ps)); + + if (TupIsNull(compressed_slot)) { - TupleTableSlot *compressed_slot = - ExecProcNode(linitial(decompress_state->csstate.custom_ps)); + /* All values are processed. */ + break; + } - if (TupIsNull(compressed_slot)) - { - /* All segment by values are processed. */ - break; - } + if (value_column_description->type == SEGMENTBY_COLUMN) + { + /* + * To calculate the sum for a segment by value, we need to multiply the value of the + * segment by column with the number of compressed tuples in this batch. + */ MemoryContext old_mctx = MemoryContextSwitchTo(batch_state->per_batch_context); MemoryContextReset(batch_state->per_batch_context); @@ -496,22 +497,11 @@ perform_vectorized_sum_int4(CustomScanState *vector_agg_state, Aggref *aggref, } MemoryContextSwitchTo(old_mctx); } - } - else if (value_column_description->type == COMPRESSED_COLUMN) - { - Assert(dcontext->enable_bulk_decompression); - Assert(value_column_description->bulk_decompression_supported); - Assert(list_length(aggref->args) == 1); - - while (true) + else if (value_column_description->type == COMPRESSED_COLUMN) { - TupleTableSlot *compressed_slot = - ExecProcNode(linitial(decompress_state->csstate.custom_ps)); - if (TupIsNull(compressed_slot)) - { - /* All compressed batches are processed. */ - break; - } + Assert(dcontext->enable_bulk_decompression); + Assert(value_column_description->bulk_decompression_supported); + Assert(list_length(aggref->args) == 1); MemoryContext old_mctx = MemoryContextSwitchTo(batch_state->per_batch_context); MemoryContextReset(batch_state->per_batch_context); @@ -581,12 +571,11 @@ perform_vectorized_sum_int4(CustomScanState *vector_agg_state, Aggref *aggref, errmsg("bigint out of range"))); MemoryContextSwitchTo(old_mctx); } + else + { + elog(ERROR, "unsupported column type"); + } } - else - { - elog(ERROR, "unsupported column type"); - } - /* Use Int64GetDatum to store the result since a 64-bit value is not pass-by-value on 32-bit * systems */ aggregated_slot->tts_values[0] = Int64GetDatum(result_sum); From 21faf6e19bbf15f587e79892ee9fc58003cd94ce Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 28 Mar 2024 17:42:59 +0100 Subject: [PATCH 13/81] show costs in explain --- tsl/src/nodes/vector_agg/plan.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index c700e75bb85..3663ca38b2e 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -120,6 +120,16 @@ vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) custom->scan.plan.targetlist = build_trivial_custom_output_targetlist(custom->custom_scan_tlist); + /* + * Copy the costs from the normal aggregation node, so that they show up in + * the EXPLAIN output. They are not used for any other purposes, because + * this hook is called after the planning is finished. + */ + custom->scan.plan.plan_rows = agg->plan.plan_rows; + custom->scan.plan.plan_width = agg->plan.plan_width; + custom->scan.plan.startup_cost = agg->plan.startup_cost; + custom->scan.plan.total_cost = agg->plan.total_cost; + return (Plan *) custom; } From fa2fb4d566cd368293cb43fac824db5028ab2017 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 28 Mar 2024 17:52:28 +0100 Subject: [PATCH 14/81] wrong prefix --- src/import/list.c | 2 +- src/import/list.h | 2 +- tsl/src/nodes/decompress_chunk/planner.c | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/import/list.c b/src/import/list.c index 2c8e3f5de4e..fb420d89a46 100644 --- a/src/import/list.c +++ b/src/import/list.c @@ -32,7 +32,7 @@ * is responsible for filling in their data. */ List * -pg_new_list(NodeTag type, int min_size) +ts_new_list(NodeTag type, int min_size) { List *newlist; int max_size; diff --git a/src/import/list.h b/src/import/list.h index 3506b78b4fa..28b45d67be2 100644 --- a/src/import/list.h +++ b/src/import/list.h @@ -14,4 +14,4 @@ * directory for a copy of the PostgreSQL License. */ -extern TSDLLEXPORT List *pg_new_list(NodeTag type, int min_size); +extern TSDLLEXPORT List *ts_new_list(NodeTag type, int min_size); diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index 4972107a2be..a6b0412a5e4 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -1041,7 +1041,7 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat } #endif - settings = pg_new_list(T_IntList, DCS_Count); + settings = ts_new_list(T_IntList, DCS_Count); lfirst_int(list_nth_cell(settings, DCS_HypertableId)) = dcpath->info->hypertable_id; lfirst_int(list_nth_cell(settings, DCS_ChunkRelid)) = dcpath->info->chunk_rte->relid; lfirst_int(list_nth_cell(settings, DCS_Reverse)) = dcpath->reverse; @@ -1055,7 +1055,7 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat */ decompress_plan->custom_exprs = list_make1(vectorized_quals); - decompress_plan->custom_private = pg_new_list(T_List, DCP_Count); + decompress_plan->custom_private = ts_new_list(T_List, DCP_Count); lfirst(list_nth_cell(decompress_plan->custom_private, DCP_Settings)) = settings; lfirst(list_nth_cell(decompress_plan->custom_private, DCP_DecompressionMap)) = dcpath->decompression_map; From 30a60694a93eac759e7a85eee6dab0042fb5fcfe Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 11:47:59 +0100 Subject: [PATCH 15/81] Remove temporary data from DecompressChunkPath It's a little confusing because they only live during the creation of decompression plan. Put them into a separate struct instead. --- .../nodes/decompress_chunk/decompress_chunk.h | 35 ---- tsl/src/nodes/decompress_chunk/planner.c | 171 ++++++++++++------ tsl/src/nodes/decompress_chunk/planner.h | 3 +- 3 files changed, 114 insertions(+), 95 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/decompress_chunk.h b/tsl/src/nodes/decompress_chunk/decompress_chunk.h index 065c284e6b8..96d335a84bd 100644 --- a/tsl/src/nodes/decompress_chunk/decompress_chunk.h +++ b/tsl/src/nodes/decompress_chunk/decompress_chunk.h @@ -55,41 +55,6 @@ typedef struct DecompressChunkPath { CustomPath custom_path; CompressionInfo *info; - /* - * decompression_map maps targetlist entries of the compressed scan to tuple - * attribute number of the uncompressed chunk. Negative values are special - * columns in the compressed scan that do not have a representation in the - * uncompressed chunk, but are still used for decompression. - */ - List *decompression_map; - - /* - * This Int list is parallel to the compressed scan targetlist, just like - * the above one. The value is true if a given targetlist entry is a - * segmentby column, false otherwise. Has the same length as the above list. - * We have to use the parallel lists and not a list of structs, because the - * Plans have to be copyable by the Postgres _copy functions, and we can't - * do that for a custom struct. - */ - List *is_segmentby_column; - - /* - * Same structure as above, says whether we support bulk decompression for this - * column. - */ - List *bulk_decompression_column; - - /* - * If we produce at least some columns that support bulk decompression. - */ - bool have_bulk_decompression_columns; - - /* - * Maps the uncompressed chunk attno to the respective column compression - * info. This lives only during planning so that we can understand on which - * columns we can apply vectorized quals. - */ - DecompressChunkColumnCompression *uncompressed_chunk_attno_to_compression_info; List *required_compressed_pathkeys; bool needs_sequence_num; diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index a6b0412a5e4..071985a1c79 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -64,18 +64,61 @@ check_for_system_columns(Bitmapset *attrs_used) } } +/* + * Scratch space for mapping out the decompressed columns. + */ +typedef struct +{ + /* + * decompression_map maps targetlist entries of the compressed scan to tuple + * attribute number of the uncompressed chunk. Negative values are special + * columns in the compressed scan that do not have a representation in the + * uncompressed chunk, but are still used for decompression. + */ + List *decompression_map; + + /* + * This Int list is parallel to the compressed scan targetlist, just like + * the above one. The value is true if a given targetlist entry is a + * segmentby column, false otherwise. Has the same length as the above list. + * We have to use the parallel lists and not a list of structs, because the + * Plans have to be copyable by the Postgres _copy functions, and we can't + * do that for a custom struct. + */ + List *is_segmentby_column; + + /* + * Same structure as above, says whether we support bulk decompression for this + * column. + */ + List *bulk_decompression_column; + + /* + * If we produce at least some columns that support bulk decompression. + */ + bool have_bulk_decompression_columns; + + /* + * Maps the uncompressed chunk attno to the respective column compression + * info. This lives only during planning so that we can understand on which + * columns we can apply vectorized quals. + */ + DecompressChunkColumnCompression *uncompressed_chunk_attno_to_compression_info; +} DecompressionMapContext; + /* * Given the scan targetlist and the bitmapset of the needed columns, determine * which scan columns become which decompressed columns (fill decompression_map). * - * Note that the chunk_attrs_needed bitmap is offset by the + * Note that the uncompressed_attrs_needed bitmap is offset by the * FirstLowInvalidHeapAttributeNumber, similar to RelOptInfo.attr_needed. This * allows to encode the requirement for system columns, which have negative * attnos. */ static void -build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan_tlist, - Bitmapset *chunk_attrs_needed) +build_decompression_map(PlannerInfo *root, DecompressionMapContext *context, + DecompressChunkPath *path, List *scan_tlist, + Bitmapset *uncompressed_attrs_needed) { /* * Track which normal and metadata columns we were able to find in the @@ -83,7 +126,8 @@ build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan */ bool missing_count = true; bool missing_sequence = path->needs_sequence_num; - Bitmapset *chunk_attrs_found = NULL, *selectedCols = NULL; + Bitmapset *uncompressed_attrs_found = NULL; + Bitmapset *selectedCols = NULL; #if PG16_LT selectedCols = path->info->ht_rte->selectedCols; @@ -108,25 +152,25 @@ build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan * be added at decompression time. Always mark it as found. */ if (bms_is_member(TableOidAttributeNumber - FirstLowInvalidHeapAttributeNumber, - chunk_attrs_needed)) + uncompressed_attrs_needed)) { - chunk_attrs_found = - bms_add_member(chunk_attrs_found, + uncompressed_attrs_found = + bms_add_member(uncompressed_attrs_found, TableOidAttributeNumber - FirstLowInvalidHeapAttributeNumber); } ListCell *lc; - path->uncompressed_chunk_attno_to_compression_info = - palloc0(sizeof(*path->uncompressed_chunk_attno_to_compression_info) * + context->uncompressed_chunk_attno_to_compression_info = + palloc0(sizeof(*context->uncompressed_chunk_attno_to_compression_info) * (path->info->chunk_rel->max_attr + 1)); /* * Go over the scan targetlist and determine to which output column each * scan column goes, saving other additional info as we do that. */ - path->have_bulk_decompression_columns = false; - path->decompression_map = NIL; + context->have_bulk_decompression_columns = false; + context->decompression_map = NIL; foreach (lc, scan_tlist) { TargetEntry *target = (TargetEntry *) lfirst(lc); @@ -153,33 +197,33 @@ build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan const char *column_name = get_attname(path->info->compressed_rte->relid, compressed_attno, /* missing_ok = */ false); - AttrNumber chunk_attno = get_attnum(path->info->chunk_rte->relid, column_name); + AttrNumber uncompressed_attno = get_attnum(path->info->chunk_rte->relid, column_name); - AttrNumber destination_attno_in_uncompressed_chunk = 0; - if (chunk_attno != InvalidAttrNumber) + AttrNumber destination_attno = 0; + if (uncompressed_attno != InvalidAttrNumber) { /* * Normal column, not a metadata column. */ - Assert(chunk_attno != InvalidAttrNumber); + Assert(uncompressed_attno != InvalidAttrNumber); - if (bms_is_member(0 - FirstLowInvalidHeapAttributeNumber, chunk_attrs_needed)) + if (bms_is_member(0 - FirstLowInvalidHeapAttributeNumber, uncompressed_attrs_needed)) { /* * attno = 0 means whole-row var. Output all the columns. */ - destination_attno_in_uncompressed_chunk = chunk_attno; - chunk_attrs_found = - bms_add_member(chunk_attrs_found, - chunk_attno - FirstLowInvalidHeapAttributeNumber); + destination_attno = uncompressed_attno; + uncompressed_attrs_found = + bms_add_member(uncompressed_attrs_found, + uncompressed_attno - FirstLowInvalidHeapAttributeNumber); } - else if (bms_is_member(chunk_attno - FirstLowInvalidHeapAttributeNumber, - chunk_attrs_needed)) + else if (bms_is_member(uncompressed_attno - FirstLowInvalidHeapAttributeNumber, + uncompressed_attrs_needed)) { - destination_attno_in_uncompressed_chunk = chunk_attno; - chunk_attrs_found = - bms_add_member(chunk_attrs_found, - chunk_attno - FirstLowInvalidHeapAttributeNumber); + destination_attno = uncompressed_attno; + uncompressed_attrs_found = + bms_add_member(uncompressed_attrs_found, + uncompressed_attno - FirstLowInvalidHeapAttributeNumber); } } else @@ -199,43 +243,41 @@ build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan if (strcmp(column_name, COMPRESSION_COLUMN_METADATA_COUNT_NAME) == 0) { - destination_attno_in_uncompressed_chunk = DECOMPRESS_CHUNK_COUNT_ID; + destination_attno = DECOMPRESS_CHUNK_COUNT_ID; missing_count = false; } else if (path->needs_sequence_num && strcmp(column_name, COMPRESSION_COLUMN_METADATA_SEQUENCE_NUM_NAME) == 0) { - destination_attno_in_uncompressed_chunk = DECOMPRESS_CHUNK_SEQUENCE_NUM_ID; + destination_attno = DECOMPRESS_CHUNK_SEQUENCE_NUM_ID; missing_sequence = false; } } bool is_segment = ts_array_is_member(path->info->settings->fd.segmentby, column_name); - path->decompression_map = - lappend_int(path->decompression_map, destination_attno_in_uncompressed_chunk); - path->is_segmentby_column = lappend_int(path->is_segmentby_column, is_segment); + context->decompression_map = lappend_int(context->decompression_map, destination_attno); + context->is_segmentby_column = lappend_int(context->is_segmentby_column, is_segment); /* * Determine if we can use bulk decompression for this column. */ - Oid typoid = get_atttype(path->info->chunk_rte->relid, chunk_attno); + Oid typoid = get_atttype(path->info->chunk_rte->relid, uncompressed_attno); const bool bulk_decompression_possible = - !is_segment && destination_attno_in_uncompressed_chunk > 0 && + !is_segment && destination_attno > 0 && tsl_get_decompress_all_function(compression_get_default_algorithm(typoid), typoid) != NULL; - path->have_bulk_decompression_columns |= bulk_decompression_possible; - path->bulk_decompression_column = - lappend_int(path->bulk_decompression_column, bulk_decompression_possible); + context->have_bulk_decompression_columns |= bulk_decompression_possible; + context->bulk_decompression_column = + lappend_int(context->bulk_decompression_column, bulk_decompression_possible); /* * Save information about decompressed columns in uncompressed chunk * for planning of vectorized filters. */ - if (destination_attno_in_uncompressed_chunk > 0) + if (destination_attno > 0) { - path->uncompressed_chunk_attno_to_compression_info - [destination_attno_in_uncompressed_chunk] = + context->uncompressed_chunk_attno_to_compression_info[destination_attno] = (DecompressChunkColumnCompression){ .bulk_decompression_possible = bulk_decompression_possible }; } @@ -246,7 +288,8 @@ build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan * We can't conveniently check that we have all columns for all-row vars, so * skip attno 0 in this check. */ - Bitmapset *attrs_not_found = bms_difference(chunk_attrs_needed, chunk_attrs_found); + Bitmapset *attrs_not_found = + bms_difference(uncompressed_attrs_needed, uncompressed_attrs_found); int bit = bms_next_member(attrs_not_found, 0 - FirstLowInvalidHeapAttributeNumber); if (bit >= 0) { @@ -418,7 +461,7 @@ is_not_runtime_constant(Node *node) * commuted copy. If not, return NULL. */ static Node * -make_vectorized_qual(DecompressChunkPath *path, Node *qual) +make_vectorized_qual(DecompressionMapContext *context, DecompressChunkPath *path, Node *qual) { /* * We can vectorize BoolExpr (AND/OR/NOT). @@ -442,7 +485,7 @@ make_vectorized_qual(DecompressChunkPath *path, Node *qual) foreach (lc, boolexpr->args) { Node *arg = lfirst(lc); - Node *vectorized_arg = make_vectorized_qual(path, arg); + Node *vectorized_arg = make_vectorized_qual(context, path, arg); if (vectorized_arg == NULL) { return NULL; @@ -559,7 +602,7 @@ make_vectorized_qual(DecompressChunkPath *path, Node *qual) * ExecQual is performed before ExecProject and operates on the decompressed * scan slot, so the qual attnos are the uncompressed chunk attnos. */ - if (!path->uncompressed_chunk_attno_to_compression_info[var->varattno] + if (!context->uncompressed_chunk_attno_to_compression_info[var->varattno] .bulk_decompression_possible) { /* This column doesn't support bulk decompression. */ @@ -623,8 +666,8 @@ make_vectorized_qual(DecompressChunkPath *path, Node *qual) * list. */ static void -find_vectorized_quals(DecompressChunkPath *path, List *qual_list, List **vectorized, - List **nonvectorized) +find_vectorized_quals(DecompressionMapContext *context, DecompressChunkPath *path, List *qual_list, + List **vectorized, List **nonvectorized) { ListCell *lc; foreach (lc, qual_list) @@ -640,7 +683,7 @@ find_vectorized_quals(DecompressChunkPath *path, List *qual_list, List **vectori Node *transformed_comparison = (Node *) ts_transform_cross_datatype_comparison((Expr *) source_qual); - Node *vectorized_qual = make_vectorized_qual(path, transformed_comparison); + Node *vectorized_qual = make_vectorized_qual(context, path, transformed_comparison); if (vectorized_qual) { *vectorized = lappend(*vectorized, vectorized_qual); @@ -694,7 +737,7 @@ ts_label_sort_with_costsize(PlannerInfo *root, Sort *plan, double limit_tuples) Plan * decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *path, - List *decompressed_tlist, List *clauses, List *custom_plans) + List *output_targetlist, List *clauses, List *custom_plans) { DecompressChunkPath *dcpath = (DecompressChunkPath *) path; CustomScan *decompress_plan = makeNode(CustomScan); @@ -711,7 +754,7 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat decompress_plan->scan.scanrelid = dcpath->info->chunk_rel->relid; /* output target list */ - decompress_plan->scan.plan.targetlist = decompressed_tlist; + decompress_plan->scan.plan.targetlist = output_targetlist; if (IsA(compressed_path, IndexPath)) { @@ -793,25 +836,30 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat /* * Determine which columns we have to decompress. - * decompressed_tlist is sometimes empty, e.g. for a direct select from + * output_targetlist is sometimes empty, e.g. for a direct select from * chunk. We have a ProjectionPath above DecompressChunk in this case, and * the targetlist for this path is not built by the planner * (CP_IGNORE_TLIST). This is why we have to examine rel pathtarget. * Looking at the targetlist is not enough, we also have to decompress the * columns participating in quals and in pathkeys. */ - Bitmapset *chunk_attrs_needed = NULL; + Bitmapset *uncompressed_attrs_needed = NULL; pull_varattnos((Node *) decompress_plan->scan.plan.qual, dcpath->info->chunk_rel->relid, - &chunk_attrs_needed); + &uncompressed_attrs_needed); pull_varattnos((Node *) dcpath->custom_path.path.pathtarget->exprs, dcpath->info->chunk_rel->relid, - &chunk_attrs_needed); + &uncompressed_attrs_needed); /* * Determine which compressed column goes to which output column. */ - build_decompression_map(root, dcpath, compressed_scan->plan.targetlist, chunk_attrs_needed); + DecompressionMapContext context = { 0 }; + build_decompression_map(root, + &context, + dcpath, + compressed_scan->plan.targetlist, + uncompressed_attrs_needed); /* Build heap sort info for sorted_merge_append */ List *sort_options = NIL; @@ -1004,7 +1052,7 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat const bool enable_bulk_decompression = !dcpath->batch_sorted_merge && ts_guc_enable_bulk_decompression && - dcpath->have_bulk_decompression_columns; + context.have_bulk_decompression_columns; /* * For some predicates, we have more efficient implementation that work on @@ -1015,7 +1063,8 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat if (enable_bulk_decompression) { List *nonvectorized_quals = NIL; - find_vectorized_quals(dcpath, + find_vectorized_quals(&context, + dcpath, decompress_plan->scan.plan.qual, &vectorized_quals, &nonvectorized_quals); @@ -1058,14 +1107,18 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat decompress_plan->custom_private = ts_new_list(T_List, DCP_Count); lfirst(list_nth_cell(decompress_plan->custom_private, DCP_Settings)) = settings; lfirst(list_nth_cell(decompress_plan->custom_private, DCP_DecompressionMap)) = - dcpath->decompression_map; + context.decompression_map; lfirst(list_nth_cell(decompress_plan->custom_private, DCP_IsSegmentbyColumn)) = - dcpath->is_segmentby_column; + context.is_segmentby_column; lfirst(list_nth_cell(decompress_plan->custom_private, DCP_BulkDecompressionColumn)) = - dcpath->bulk_decompression_column; + context.bulk_decompression_column; lfirst(list_nth_cell(decompress_plan->custom_private, DCP_SortInfo)) = sort_options; - /* input target list */ + /* + * Note that our scan tuple type is uncompressed chunk tuple. This is the + * assumption of decompression map and generally of all decompression + * functions. + */ decompress_plan->custom_scan_tlist = NIL; return &decompress_plan->scan.plan; diff --git a/tsl/src/nodes/decompress_chunk/planner.h b/tsl/src/nodes/decompress_chunk/planner.h index a090af30d15..11231c873b2 100644 --- a/tsl/src/nodes/decompress_chunk/planner.h +++ b/tsl/src/nodes/decompress_chunk/planner.h @@ -28,6 +28,7 @@ typedef enum } DecompressChunkPrivateIndex; extern Plan *decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *path, - List *tlist, List *clauses, List *custom_plans); + List *output_targetlist, List *clauses, + List *custom_plans); extern void _decompress_chunk_init(void); From e25267db624216764541f310a6dedceb0f1f91ca Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 12:38:44 +0100 Subject: [PATCH 16/81] rename --- tsl/src/nodes/decompress_chunk/planner.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index 071985a1c79..bf459a1082f 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -104,6 +104,8 @@ typedef struct * columns we can apply vectorized quals. */ DecompressChunkColumnCompression *uncompressed_chunk_attno_to_compression_info; + + List * } DecompressionMapContext; /* @@ -117,7 +119,7 @@ typedef struct */ static void build_decompression_map(PlannerInfo *root, DecompressionMapContext *context, - DecompressChunkPath *path, List *scan_tlist, + DecompressChunkPath *path, List *compressed_scan_tlist, Bitmapset *uncompressed_attrs_needed) { /* @@ -171,7 +173,7 @@ build_decompression_map(PlannerInfo *root, DecompressionMapContext *context, */ context->have_bulk_decompression_columns = false; context->decompression_map = NIL; - foreach (lc, scan_tlist) + foreach (lc, compressed_scan_tlist) { TargetEntry *target = (TargetEntry *) lfirst(lc); if (!IsA(target->expr, Var)) From 5e6221d043cc76f2ac82be623e468d64452225f2 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 13:19:23 +0100 Subject: [PATCH 17/81] typo --- tsl/src/nodes/decompress_chunk/planner.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index bf459a1082f..8e8a8208857 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -104,8 +104,6 @@ typedef struct * columns we can apply vectorized quals. */ DecompressChunkColumnCompression *uncompressed_chunk_attno_to_compression_info; - - List * } DecompressionMapContext; /* From 5c4af48ab6acd942e302947a5a5dcdf08b0a77dc Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 16:10:55 +0100 Subject: [PATCH 18/81] benchmark separate vectorized agg (2024-03-29 no. 2) From 4130683d51b57d169eb806500dbdd86ce8fead08 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 16:11:44 +0100 Subject: [PATCH 19/81] produce partials for each batch also unroll the loop --- tsl/src/compression/array.c | 25 +-- tsl/src/compression/deltadelta_impl.c | 33 ++-- tsl/src/compression/dictionary.c | 25 +-- tsl/src/compression/gorilla_impl.c | 32 ++-- tsl/src/nodes/decompress_chunk/exec.c | 247 ++++++++++---------------- 5 files changed, 141 insertions(+), 221 deletions(-) diff --git a/tsl/src/compression/array.c b/tsl/src/compression/array.c index 9cbce7a91b2..51357bbff69 100644 --- a/tsl/src/compression/array.c +++ b/tsl/src/compression/array.c @@ -570,7 +570,20 @@ text_array_decompress_all_serialized_no_header(StringInfo si, bool has_nulls, const int validity_bitmap_bytes = sizeof(uint64) * (pad_to_multiple(64, n_total) / 64); uint64 *restrict validity_bitmap = MemoryContextAlloc(dest_mctx, validity_bitmap_bytes); + + /* + * First, mark all data as valid, we will fill the nulls later if needed. + * Note that the validity bitmap size is a multiple of 64 bits. We have to + * fill the tail bits with zeros, because the corresponding elements are not + * valid. + * + */ memset(validity_bitmap, 0xFF, validity_bitmap_bytes); + if (n_total % 64) + { + const uint64 tail_mask = -1ULL >> (64 - n_total % 64); + validity_bitmap[n_total / 64] &= tail_mask; + } if (has_nulls) { @@ -612,18 +625,6 @@ text_array_decompress_all_serialized_no_header(StringInfo si, bool has_nulls, Assert(current_notnull_element == -1); } - else - { - /* - * The validity bitmap size is a multiple of 64 bits. Fill the tail bits - * with zeros, because the corresponding elements are not valid. - */ - if (n_total % 64) - { - const uint64 tail_mask = -1ULL >> (64 - n_total % 64); - validity_bitmap[n_total / 64] &= tail_mask; - } - } ArrowArray *result = MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + sizeof(void *) * 3); const void **buffers = (const void **) &result[1]; diff --git a/tsl/src/compression/deltadelta_impl.c b/tsl/src/compression/deltadelta_impl.c index b2233977470..82e7bcd31b6 100644 --- a/tsl/src/compression/deltadelta_impl.c +++ b/tsl/src/compression/deltadelta_impl.c @@ -92,8 +92,19 @@ FUNCTION_NAME(delta_delta_decompress_all, ELEMENT_TYPE)(Datum compressed, Memory } #undef INNER_LOOP_SIZE - /* All data valid by default, we will fill in the nulls later. */ + /* + * First, mark all data as valid, we will fill the nulls later if needed. + * Note that the validity bitmap size is a multiple of 64 bits. We have to + * fill the tail bits with zeros, because the corresponding elements are not + * valid. + * + */ memset(validity_bitmap, 0xFF, validity_bitmap_bytes); + if (n_total % 64) + { + const uint64 tail_mask = -1ULL >> (64 - n_total % 64); + validity_bitmap[n_total / 64] &= tail_mask; + } /* Now move the data to account for nulls, and fill the validity bitmap. */ if (has_nulls) @@ -123,26 +134,6 @@ FUNCTION_NAME(delta_delta_decompress_all, ELEMENT_TYPE)(Datum compressed, Memory Assert(current_notnull_element == -1); } - else - { - /* - * The validity bitmap size is a multiple of 64 bits. Fill the tail bits - * with zeros, because the corresponding elements are not valid. - */ - if (n_total % 64) - { - const uint64 tail_mask = -1ULL >> (64 - n_total % 64); - validity_bitmap[n_total / 64] &= tail_mask; - -#ifdef USE_ASSERT_CHECKING - for (uint32 i = 0; i < 64; i++) - { - Assert(arrow_row_is_valid(validity_bitmap, (n_total / 64) * 64 + i) == - (i < n_total % 64)); - } -#endif - } - } /* Return the result. */ ArrowArray *result = MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + sizeof(void *) * 2); diff --git a/tsl/src/compression/dictionary.c b/tsl/src/compression/dictionary.c index b5428818fe4..12997778fd7 100644 --- a/tsl/src/compression/dictionary.c +++ b/tsl/src/compression/dictionary.c @@ -454,7 +454,20 @@ tsl_text_dictionary_decompress_all(Datum compressed, Oid element_type, MemoryCon /* Fill validity and indices of the array elements, reshuffling for nulls if needed. */ const int validity_bitmap_bytes = sizeof(uint64) * pad_to_multiple(64, n_total) / 64; uint64 *restrict validity_bitmap = MemoryContextAlloc(dest_mctx, validity_bitmap_bytes); + + /* + * First, mark all data as valid, we will fill the nulls later if needed. + * Note that the validity bitmap size is a multiple of 64 bits. We have to + * fill the tail bits with zeros, because the corresponding elements are not + * valid. + * + */ memset(validity_bitmap, 0xFF, validity_bitmap_bytes); + if (n_total % 64) + { + const uint64 tail_mask = -1ULL >> (64 - n_total % 64); + validity_bitmap[n_total / 64] &= tail_mask; + } if (header->has_nulls) { @@ -485,18 +498,6 @@ tsl_text_dictionary_decompress_all(Datum compressed, Oid element_type, MemoryCon Assert(current_notnull_element == -1); } - else - { - /* - * The validity bitmap size is a multiple of 64 bits. Fill the tail bits - * with zeros, because the corresponding elements are not valid. - */ - if (n_total % 64) - { - const uint64 tail_mask = -1ULL >> (64 - n_total % 64); - validity_bitmap[n_total / 64] &= tail_mask; - } - } ArrowArray *result = MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + sizeof(void *) * 2); const void **buffers = (const void **) &result[1]; diff --git a/tsl/src/compression/gorilla_impl.c b/tsl/src/compression/gorilla_impl.c index d56609dddc9..ba5e97844b2 100644 --- a/tsl/src/compression/gorilla_impl.c +++ b/tsl/src/compression/gorilla_impl.c @@ -136,10 +136,18 @@ FUNCTION_NAME(gorilla_decompress_all, ELEMENT_TYPE)(CompressedGorillaData *goril uint64 *restrict validity_bitmap = MemoryContextAlloc(dest_mctx, validity_bitmap_bytes); /* - * For starters, set the validity bitmap to all ones. We probably have less - * nulls than values, so this is faster. + * First, mark all data as valid, we will fill the nulls later if needed. + * Note that the validity bitmap size is a multiple of 64 bits. We have to + * fill the tail bits with zeros, because the corresponding elements are not + * valid. + * */ memset(validity_bitmap, 0xFF, validity_bitmap_bytes); + if (n_total % 64) + { + const uint64 tail_mask = -1ULL >> (64 - n_total % 64); + validity_bitmap[n_total / 64] &= tail_mask; + } if (has_nulls) { @@ -169,26 +177,6 @@ FUNCTION_NAME(gorilla_decompress_all, ELEMENT_TYPE)(CompressedGorillaData *goril Assert(current_notnull_element == -1); } - else - { - /* - * The validity bitmap size is a multiple of 64 bits. Fill the tail bits - * with zeros, because the corresponding elements are not valid. - */ - if (n_total % 64) - { - const uint64 tail_mask = -1ULL >> (64 - n_total % 64); - validity_bitmap[n_total / 64] &= tail_mask; - -#ifdef USE_ASSERT_CHECKING - for (uint32 i = 0; i < 64; i++) - { - Assert(arrow_row_is_valid(validity_bitmap, (n_total / 64) * 64 + i) == - (i < n_total % 64)); - } -#endif - } - } /* Return the result. */ ArrowArray *result = MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + sizeof(void *) * 2); diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index c5f98c2e270..aaf64aeb7db 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -381,7 +381,6 @@ perform_vectorized_sum_int4(CustomScanState *vector_agg_state, Aggref *aggref, DecompressChunkState *decompress_state) { DecompressContext *dcontext = &decompress_state->decompress_context; - BatchQueue *batch_queue = decompress_state->batch_queue; Assert(aggref != NULL); @@ -407,178 +406,130 @@ perform_vectorized_sum_int4(CustomScanState *vector_agg_state, Aggref *aggref, value_column_description->type == SEGMENTBY_COLUMN); Assert(count_column_description->type == COUNT_COLUMN); - /* Get a free batch slot */ - const int new_batch_index = batch_array_get_unused_slot(&batch_queue->batch_array); - - /* Nobody else should use batch states */ - Assert(new_batch_index == 0); - DecompressBatchState *batch_state = - batch_array_get_at(&batch_queue->batch_array, new_batch_index); - - /* Init per batch memory context */ - Assert(batch_state != NULL); - Assert(batch_state->per_batch_context == NULL); - batch_state->per_batch_context = create_per_batch_mctx(dcontext); - Assert(batch_state->per_batch_context != NULL); - - /* Init bulk decompression memory context */ - Assert(dcontext->bulk_decompression_context == NULL); - dcontext->bulk_decompression_context = create_bulk_decompression_mctx(CurrentMemoryContext); - Assert(dcontext->bulk_decompression_context != NULL); + BatchQueue *batch_queue = decompress_state->batch_queue; + DecompressBatchState *batch_state = batch_array_get_at(&batch_queue->batch_array, 0); /* Get a reference the the output TupleTableSlot */ TupleTableSlot *aggregated_slot = vector_agg_state->ss.ps.ps_ResultTupleSlot; Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); - /* Set all attributes of the result tuple to NULL. So, we return NULL if no data is processed - * by our implementation. In addition, the call marks the slot as being used (i.e., no - * ExecStoreVirtualTuple call is required). */ - ExecStoreAllNullTuple(aggregated_slot); - Assert(!TupIsNull(aggregated_slot)); - int64 result_sum = 0; - while (true) - { - TupleTableSlot *compressed_slot = - ExecProcNode(linitial(decompress_state->csstate.custom_ps)); - - if (TupIsNull(compressed_slot)) - { - /* All values are processed. */ - break; - } + aggregated_slot->tts_isnull[0] = true; + aggregated_slot->tts_values[0] = 0; + ExecClearTuple(aggregated_slot); - if (value_column_description->type == SEGMENTBY_COLUMN) - { - /* - * To calculate the sum for a segment by value, we need to multiply the value of the - * segment by column with the number of compressed tuples in this batch. - */ + TupleTableSlot *compressed_slot = ExecProcNode(linitial(decompress_state->csstate.custom_ps)); - MemoryContext old_mctx = MemoryContextSwitchTo(batch_state->per_batch_context); - MemoryContextReset(batch_state->per_batch_context); + if (TupIsNull(compressed_slot)) + { + /* All values are processed. */ + return NULL; + } - bool isnull_value, isnull_elements; - Datum value = slot_getattr(compressed_slot, - value_column_description->compressed_scan_attno, - &isnull_value); + compressed_batch_set_compressed_tuple(dcontext, batch_state, compressed_slot); - /* We have multiple compressed tuples for this segment by value. Get number of - * compressed tuples */ - Datum elements = slot_getattr(compressed_slot, - count_column_description->compressed_scan_attno, - &isnull_elements); + ArrowArray *arrow = NULL; + if (value_column_description->type == COMPRESSED_COLUMN) + { + Assert(dcontext->enable_bulk_decompression); + Assert(value_column_description->bulk_decompression_supported); + Assert(list_length(aggref->args) == 1); + CompressedColumnValues *values = + &batch_state->compressed_columns[value_column_description - dcontext->template_columns]; + Assert(values->decompression_type != DT_Invalid); + arrow = values->arrow; + } + else + { + Assert(value_column_description->type == SEGMENTBY_COLUMN); + } - if (!isnull_value && !isnull_elements) - { - int32 intvalue = DatumGetInt32(value); - int32 amount = DatumGetInt32(elements); - int64 batch_sum = 0; - - Assert(amount > 0); - - /* We have at least one value */ - aggregated_slot->tts_isnull[0] = false; - - /* Multiply the number of tuples with the actual value */ - if (unlikely(pg_mul_s64_overflow(intvalue, amount, &batch_sum))) - { - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); - } - - /* Add the value to our sum */ - if (unlikely(pg_add_s64_overflow(result_sum, batch_sum, ((int64 *) &result_sum)))) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); - } - MemoryContextSwitchTo(old_mctx); - } - else if (value_column_description->type == COMPRESSED_COLUMN) + if (arrow == NULL) + { + /* + * To calculate the sum for a segment by value or default compressed + * column value, we need to multiply this value with the number of + * compressed tuples in this batch. + */ + int offs = AttrNumberGetAttrOffset(value_column_description->output_attno); + if (!batch_state->decompressed_scan_slot_data.base.tts_isnull[offs]) { - Assert(dcontext->enable_bulk_decompression); - Assert(value_column_description->bulk_decompression_supported); - Assert(list_length(aggref->args) == 1); - - MemoryContext old_mctx = MemoryContextSwitchTo(batch_state->per_batch_context); - MemoryContextReset(batch_state->per_batch_context); - - /* Decompress data */ - bool isnull; - Datum value = slot_getattr(compressed_slot, - value_column_description->compressed_scan_attno, - &isnull); - - Ensure(isnull == false, "got unexpected NULL attribute value from compressed batch"); + int32 intvalue = + DatumGetInt32(batch_state->decompressed_scan_slot_data.base.tts_values[offs]); + int64 batch_sum = 0; /* We have at least one value */ aggregated_slot->tts_isnull[0] = false; - CompressedDataHeader *header = - (CompressedDataHeader *) detoaster_detoast_attr((struct varlena *) DatumGetPointer( - value), - &dcontext->detoaster, - CurrentMemoryContext); - - ArrowArray *arrow = NULL; - - DecompressAllFunction decompress_all = - tsl_get_decompress_all_function(header->compression_algorithm, - value_column_description->typid); - Assert(decompress_all != NULL); - - MemoryContextSwitchTo(dcontext->bulk_decompression_context); - - arrow = decompress_all(PointerGetDatum(header), - value_column_description->typid, - batch_state->per_batch_context); - - Assert(arrow != NULL); - - MemoryContextReset(dcontext->bulk_decompression_context); - MemoryContextSwitchTo(batch_state->per_batch_context); - - /* - * We accumulate the sum as int64, so we can sum INT_MAX = 2^31 - 1 - * at least 2^31 times without incurrint an overflow of the int64 - * accumulator. The same is true for negative numbers. The - * compressed batch size is currently capped at 1000 rows, but even - * if it's changed in the future, it's unlikely that we support - * batches larger than 65536 rows, not to mention 2^31. Therefore, - * we don't need to check for overflows within the loop, which would - * slow down the calculation. - */ - Assert(arrow->length <= INT_MAX); - - int64 batch_sum = 0; - for (int i = 0; i < arrow->length; i++) + /* Multiply the number of tuples with the actual value */ + if (unlikely(pg_mul_s64_overflow(intvalue, batch_state->total_batch_rows, &batch_sum))) { - const bool arrow_isnull = !arrow_row_is_valid(arrow->buffers[0], i); - - if (likely(!arrow_isnull)) - { - const int32 arrow_value = ((int32 *) arrow->buffers[1])[i]; - batch_sum += arrow_value; - } + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), + errmsg("bigint out of range"))); } + /* Add the value to our sum */ if (unlikely(pg_add_s64_overflow(result_sum, batch_sum, ((int64 *) &result_sum)))) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); - MemoryContextSwitchTo(old_mctx); } - else + } + else + { + Assert(arrow != NULL); + + /* We have at least one value */ + aggregated_slot->tts_isnull[0] = false; + + /* + * We accumulate the sum as int64, so we can sum INT_MAX = 2^31 - 1 + * at least 2^31 times without incurring an overflow of the int64 + * accumulator. The same is true for negative numbers. The + * compressed batch size is currently capped at 1000 rows, but even + * if it's changed in the future, it's unlikely that we support + * batches larger than 65536 rows, not to mention 2^31. Therefore, + * we don't need to check for overflows within the loop, which would + * slow down the calculation. + */ + Assert(arrow->length <= INT_MAX); + + int64 batch_sum = 0; + + /* + * This loop is not unrolled automatically, so do it manually as usual. + * The value buffer is padded to an even multiple of 64 bytes, i.e. to + * 64 / 4 = 16 elements. The bitmap is an even multiple of 64 elements. + * The number of elements in the inner loop must be less than both these + * values so that we don't go out of bounds. The particular value was + * chosen because it gives some speedup, and the larger values blow up + * the generated code with no performance benefit (checked on clang 16). + */ +#define INNER_LOOP_SIZE 4 + const int outer_boundary = pad_to_multiple(INNER_LOOP_SIZE, arrow->length); + for (int outer = 0; outer < outer_boundary; outer += INNER_LOOP_SIZE) { - elog(ERROR, "unsupported column type"); + for (int inner = 0; inner < INNER_LOOP_SIZE; inner++) + { + const int row = outer + inner; + const int32 arrow_value = ((int32 *) arrow->buffers[1])[row]; + batch_sum += arrow_value * arrow_row_is_valid(arrow->buffers[0], row); + } } +#undef INNER_LOOP_SIZE + + if (unlikely(pg_add_s64_overflow(result_sum, batch_sum, ((int64 *) &result_sum)))) + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); } + + compressed_batch_discard_tuples(batch_state); /* Use Int64GetDatum to store the result since a 64-bit value is not pass-by-value on 32-bit * systems */ aggregated_slot->tts_values[0] = Int64GetDatum(result_sum); + ExecStoreVirtualTuple(aggregated_slot); return aggregated_slot; } @@ -594,8 +545,6 @@ TupleTableSlot * decompress_chunk_exec_vector_agg_impl(CustomScanState *vector_agg_state, DecompressChunkState *decompress_state) { - BatchQueue *bq = decompress_state->batch_queue; - /* * The aggregated targetlist with Aggrefs is in the custom scan targetlist * of the custom scan node that is performing the vectorized aggregation. @@ -612,16 +561,6 @@ decompress_chunk_exec_vector_agg_impl(CustomScanState *vector_agg_state, Assert(ts_guc_enable_vectorized_aggregation); Assert(ts_guc_enable_bulk_decompression); - /* - * When using vectorized aggregates, only one result tuple is produced. So, if we have - * already initialized a batch state, the aggregation was already performed. - */ - if (batch_array_has_active_batches(&bq->batch_array)) - { - ExecClearTuple(vector_agg_state->ss.ss_ScanTupleSlot); - return vector_agg_state->ss.ss_ScanTupleSlot; - } - /* Determine which kind of vectorized aggregation we should perform */ TargetEntry *tlentry = (TargetEntry *) linitial(aggregated_tlist); Assert(IsA(tlentry->expr, Aggref)); From e7f01ab1f9bb049dc4804d1cd18dc673ca0c0fe8 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 16:11:56 +0100 Subject: [PATCH 20/81] benchmark separate vectorized agg (2024-03-29 no. 3) From cff844d002ec6b67dc60b8569f39a1c8cd1a23a0 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 17:19:52 +0100 Subject: [PATCH 21/81] more generic interface --- tsl/src/nodes/decompress_chunk/exec.c | 209 ------------------- tsl/src/nodes/vector_agg/exec.c | 248 ++++++++++++++++++++++- tsl/test/expected/vector_agg_default.out | 67 ++++++ tsl/test/sql/CMakeLists.txt | 3 +- tsl/test/sql/vector_agg_default.sql | 22 ++ 5 files changed, 335 insertions(+), 214 deletions(-) create mode 100644 tsl/test/expected/vector_agg_default.out create mode 100644 tsl/test/sql/vector_agg_default.sql diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index aaf64aeb7db..db5035c284d 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -373,215 +373,6 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) detoaster_init(&dcontext->detoaster, CurrentMemoryContext); } -/* - * Perform a vectorized aggregation on int4 values - */ -static TupleTableSlot * -perform_vectorized_sum_int4(CustomScanState *vector_agg_state, Aggref *aggref, - DecompressChunkState *decompress_state) -{ - DecompressContext *dcontext = &decompress_state->decompress_context; - - Assert(aggref != NULL); - - /* Partial result is a int8 */ - Assert(aggref->aggtranstype == INT8OID); - - /* Two columns are decompressed, the column that needs to be aggregated and the count column */ - Assert(dcontext->num_total_columns == 2); - - CompressionColumnDescription *value_column_description = &dcontext->template_columns[0]; - CompressionColumnDescription *count_column_description = &dcontext->template_columns[1]; - if (count_column_description->type != COUNT_COLUMN) - { - /* - * The count and value columns can go in different order based on their - * order in compressed chunk, so check which one we are seeing. - */ - CompressionColumnDescription *tmp = value_column_description; - value_column_description = count_column_description; - count_column_description = tmp; - } - Assert(value_column_description->type == COMPRESSED_COLUMN || - value_column_description->type == SEGMENTBY_COLUMN); - Assert(count_column_description->type == COUNT_COLUMN); - - BatchQueue *batch_queue = decompress_state->batch_queue; - DecompressBatchState *batch_state = batch_array_get_at(&batch_queue->batch_array, 0); - - /* Get a reference the the output TupleTableSlot */ - TupleTableSlot *aggregated_slot = vector_agg_state->ss.ps.ps_ResultTupleSlot; - Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); - - int64 result_sum = 0; - - aggregated_slot->tts_isnull[0] = true; - aggregated_slot->tts_values[0] = 0; - ExecClearTuple(aggregated_slot); - - TupleTableSlot *compressed_slot = ExecProcNode(linitial(decompress_state->csstate.custom_ps)); - - if (TupIsNull(compressed_slot)) - { - /* All values are processed. */ - return NULL; - } - - compressed_batch_set_compressed_tuple(dcontext, batch_state, compressed_slot); - - ArrowArray *arrow = NULL; - if (value_column_description->type == COMPRESSED_COLUMN) - { - Assert(dcontext->enable_bulk_decompression); - Assert(value_column_description->bulk_decompression_supported); - Assert(list_length(aggref->args) == 1); - CompressedColumnValues *values = - &batch_state->compressed_columns[value_column_description - dcontext->template_columns]; - Assert(values->decompression_type != DT_Invalid); - arrow = values->arrow; - } - else - { - Assert(value_column_description->type == SEGMENTBY_COLUMN); - } - - if (arrow == NULL) - { - /* - * To calculate the sum for a segment by value or default compressed - * column value, we need to multiply this value with the number of - * compressed tuples in this batch. - */ - int offs = AttrNumberGetAttrOffset(value_column_description->output_attno); - if (!batch_state->decompressed_scan_slot_data.base.tts_isnull[offs]) - { - int32 intvalue = - DatumGetInt32(batch_state->decompressed_scan_slot_data.base.tts_values[offs]); - int64 batch_sum = 0; - - /* We have at least one value */ - aggregated_slot->tts_isnull[0] = false; - - /* Multiply the number of tuples with the actual value */ - if (unlikely(pg_mul_s64_overflow(intvalue, batch_state->total_batch_rows, &batch_sum))) - { - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); - } - - /* Add the value to our sum */ - if (unlikely(pg_add_s64_overflow(result_sum, batch_sum, ((int64 *) &result_sum)))) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("bigint out of range"))); - } - } - else - { - Assert(arrow != NULL); - - /* We have at least one value */ - aggregated_slot->tts_isnull[0] = false; - - /* - * We accumulate the sum as int64, so we can sum INT_MAX = 2^31 - 1 - * at least 2^31 times without incurring an overflow of the int64 - * accumulator. The same is true for negative numbers. The - * compressed batch size is currently capped at 1000 rows, but even - * if it's changed in the future, it's unlikely that we support - * batches larger than 65536 rows, not to mention 2^31. Therefore, - * we don't need to check for overflows within the loop, which would - * slow down the calculation. - */ - Assert(arrow->length <= INT_MAX); - - int64 batch_sum = 0; - - /* - * This loop is not unrolled automatically, so do it manually as usual. - * The value buffer is padded to an even multiple of 64 bytes, i.e. to - * 64 / 4 = 16 elements. The bitmap is an even multiple of 64 elements. - * The number of elements in the inner loop must be less than both these - * values so that we don't go out of bounds. The particular value was - * chosen because it gives some speedup, and the larger values blow up - * the generated code with no performance benefit (checked on clang 16). - */ -#define INNER_LOOP_SIZE 4 - const int outer_boundary = pad_to_multiple(INNER_LOOP_SIZE, arrow->length); - for (int outer = 0; outer < outer_boundary; outer += INNER_LOOP_SIZE) - { - for (int inner = 0; inner < INNER_LOOP_SIZE; inner++) - { - const int row = outer + inner; - const int32 arrow_value = ((int32 *) arrow->buffers[1])[row]; - batch_sum += arrow_value * arrow_row_is_valid(arrow->buffers[0], row); - } - } -#undef INNER_LOOP_SIZE - - if (unlikely(pg_add_s64_overflow(result_sum, batch_sum, ((int64 *) &result_sum)))) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); - } - - compressed_batch_discard_tuples(batch_state); - /* Use Int64GetDatum to store the result since a 64-bit value is not pass-by-value on 32-bit - * systems */ - aggregated_slot->tts_values[0] = Int64GetDatum(result_sum); - ExecStoreVirtualTuple(aggregated_slot); - - return aggregated_slot; -} - -/* - * Directly execute an aggregation function on decompressed data and emit a partial aggregate - * result. - * - * Executing the aggregation directly in this node makes it possible to use the columnar data - * directly before it is converted into row-based tuples. - */ -TupleTableSlot * -decompress_chunk_exec_vector_agg_impl(CustomScanState *vector_agg_state, - DecompressChunkState *decompress_state) -{ - /* - * The aggregated targetlist with Aggrefs is in the custom scan targetlist - * of the custom scan node that is performing the vectorized aggregation. - * We do this to avoid projections at this node, because the postgres - * projection functions complain when they see an Aggref in a custom - * node output targetlist. - * The output targetlist, in turn, consists of just the INDEX_VAR references - * into the custom_scan_tlist. - */ - List *aggregated_tlist = castNode(CustomScan, vector_agg_state->ss.ps.plan)->custom_scan_tlist; - Assert(list_length(aggregated_tlist) == 1); - - /* Checked by planner */ - Assert(ts_guc_enable_vectorized_aggregation); - Assert(ts_guc_enable_bulk_decompression); - - /* Determine which kind of vectorized aggregation we should perform */ - TargetEntry *tlentry = (TargetEntry *) linitial(aggregated_tlist); - Assert(IsA(tlentry->expr, Aggref)); - Aggref *aggref = castNode(Aggref, tlentry->expr); - - /* The aggregate should be a partial aggregate */ - Assert(aggref->aggsplit == AGGSPLIT_INITIAL_SERIAL); - - switch (aggref->aggfnoid) - { - case F_SUM_INT4: - return perform_vectorized_sum_int4(vector_agg_state, aggref, decompress_state); - default: - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("vectorized aggregation for function %d is not supported", - aggref->aggfnoid))); - pg_unreachable(); - } -} - /* * The exec function for the DecompressChunk node. It takes the explicit queue * functions pointer as an optimization, to allow these functions to be diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index 358ed84bc88..f5ca3b4dba9 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -14,8 +14,10 @@ #include "exec.h" +#include "compression/arrow_c_data_interface.h" #include "nodes/decompress_chunk/compressed_batch.h" #include "nodes/decompress_chunk/exec.h" +#include "guc.h" static void vector_agg_begin(CustomScanState *node, EState *estate, int eflags) @@ -40,12 +42,250 @@ vector_agg_rescan(CustomScanState *node) ExecReScan(linitial(node->custom_ps)); } +typedef struct +{ + void (*agg_init)(Datum *agg_value, bool *agg_isnull); + void (*agg_vector_all)(ArrowArray *vector, Datum *agg_value, bool *agg_isnull); + void (*agg_const)(Datum constvalue, bool constisnull, int n, Datum *agg_value, + bool *agg_isnull); +} VectorAggregate; + +static void +int4_sum_init(Datum *agg_value, bool *agg_isnull) +{ + *agg_value = Int64GetDatum(0); + *agg_isnull = true; +} + +static void +int4_sum_vector_all(ArrowArray *vector, Datum *agg_value, bool *agg_isnull) +{ + Assert(vector != NULL); + Assert(vector->length > 0); + + /* + * We accumulate the sum as int64, so we can sum INT_MAX = 2^31 - 1 + * at least 2^31 times without incurring an overflow of the int64 + * accumulator. The same is true for negative numbers. The + * compressed batch size is currently capped at 1000 rows, but even + * if it's changed in the future, it's unlikely that we support + * batches larger than 65536 rows, not to mention 2^31. Therefore, + * we don't need to check for overflows within the loop, which would + * slow down the calculation. + */ + Assert(vector->length <= INT_MAX); + + int64 batch_sum = 0; + + /* + * This loop is not unrolled automatically, so do it manually as usual. + * The value buffer is padded to an even multiple of 64 bytes, i.e. to + * 64 / 4 = 16 elements. The bitmap is an even multiple of 64 elements. + * The number of elements in the inner loop must be less than both these + * values so that we don't go out of bounds. The particular value was + * chosen because it gives some speedup, and the larger values blow up + * the generated code with no performance benefit (checked on clang 16). + */ +#define INNER_LOOP_SIZE 4 + const int outer_boundary = pad_to_multiple(INNER_LOOP_SIZE, vector->length); + for (int outer = 0; outer < outer_boundary; outer += INNER_LOOP_SIZE) + { + for (int inner = 0; inner < INNER_LOOP_SIZE; inner++) + { + const int row = outer + inner; + const int32 arrow_value = ((int32 *) vector->buffers[1])[row]; + batch_sum += arrow_value * arrow_row_is_valid(vector->buffers[0], row); + } + } +#undef INNER_LOOP_SIZE + + int64 tmp = DatumGetInt64(*agg_value); + if (unlikely(pg_add_s64_overflow(tmp, batch_sum, &tmp))) + { + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); + } + *agg_value = Int64GetDatum(tmp); + + *agg_isnull = false; +} + +static void +int4_sum_const(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool *agg_isnull) +{ + Assert(n > 0); + + if (constisnull) + { + return; + } + + int32 intvalue = DatumGetInt32(constvalue); + int64 batch_sum = 0; + + /* We have at least one value */ + *agg_isnull = false; + + /* Multiply the number of tuples with the actual value */ + if (unlikely(pg_mul_s64_overflow(intvalue, n, &batch_sum))) + { + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); + } + + /* Add the value to our sum */ + int64 tmp = DatumGetInt64(*agg_value); + if (unlikely(pg_add_s64_overflow(tmp, batch_sum, &tmp))) + { + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); + } + *agg_value = Int64GetDatum(tmp); +} + +/* + * Perform a vectorized aggregation. + */ +static TupleTableSlot * +perform_vectorized_agg(VectorAggregate agg, CustomScanState *vector_agg_state, + DecompressChunkState *decompress_state) +{ + DecompressContext *dcontext = &decompress_state->decompress_context; + + /* Two columns are decompressed, the column that needs to be aggregated and the count column */ + Assert(dcontext->num_total_columns == 2); + + CompressionColumnDescription *value_column_description = &dcontext->template_columns[0]; + CompressionColumnDescription *count_column_description = &dcontext->template_columns[1]; + if (count_column_description->type != COUNT_COLUMN) + { + /* + * The count and value columns can go in different order based on their + * order in compressed chunk, so check which one we are seeing. + */ + CompressionColumnDescription *tmp = value_column_description; + value_column_description = count_column_description; + count_column_description = tmp; + } + Assert(value_column_description->type == COMPRESSED_COLUMN || + value_column_description->type == SEGMENTBY_COLUMN); + Assert(count_column_description->type == COUNT_COLUMN); + + BatchQueue *batch_queue = decompress_state->batch_queue; + DecompressBatchState *batch_state = batch_array_get_at(&batch_queue->batch_array, 0); + + /* Get a reference the the output TupleTableSlot */ + TupleTableSlot *aggregated_slot = vector_agg_state->ss.ps.ps_ResultTupleSlot; + Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); + + agg.agg_init(&aggregated_slot->tts_values[0], &aggregated_slot->tts_isnull[0]); + ExecClearTuple(aggregated_slot); + + TupleTableSlot *compressed_slot = ExecProcNode(linitial(decompress_state->csstate.custom_ps)); + + if (TupIsNull(compressed_slot)) + { + /* All values are processed. */ + return NULL; + } + + compressed_batch_set_compressed_tuple(dcontext, batch_state, compressed_slot); + + ArrowArray *arrow = NULL; + if (value_column_description->type == COMPRESSED_COLUMN) + { + Assert(dcontext->enable_bulk_decompression); + Assert(value_column_description->bulk_decompression_supported); + CompressedColumnValues *values = + &batch_state->compressed_columns[value_column_description - dcontext->template_columns]; + Assert(values->decompression_type != DT_Invalid); + arrow = values->arrow; + } + else + { + Assert(value_column_description->type == SEGMENTBY_COLUMN); + } + + if (arrow == NULL) + { + /* + * To calculate the sum for a segment by value or default compressed + * column value, we need to multiply this value with the number of + * compressed tuples in this batch. + */ + int offs = AttrNumberGetAttrOffset(value_column_description->output_attno); + agg.agg_const(batch_state->decompressed_scan_slot_data.base.tts_values[offs], + batch_state->decompressed_scan_slot_data.base.tts_isnull[offs], + batch_state->total_batch_rows, + &aggregated_slot->tts_values[0], + &aggregated_slot->tts_isnull[0]); + } + else + { + agg.agg_vector_all(arrow, &aggregated_slot->tts_values[0], &aggregated_slot->tts_isnull[0]); + } + + compressed_batch_discard_tuples(batch_state); + /* Use Int64GetDatum to store the result since a 64-bit value is not pass-by-value on 32-bit + * systems */ + ExecStoreVirtualTuple(aggregated_slot); + + return aggregated_slot; +} + static TupleTableSlot * -vector_agg_exec(CustomScanState *node) +vector_agg_exec(CustomScanState *vector_agg_state) { - // return ExecProcNode(linitial(node->custom_ps)); - DecompressChunkState *ds = (DecompressChunkState *) linitial(node->custom_ps); - return decompress_chunk_exec_vector_agg_impl(node, ds); + DecompressChunkState *decompress_state = + (DecompressChunkState *) linitial(vector_agg_state->custom_ps); + + /* + * The aggregated targetlist with Aggrefs is in the custom scan targetlist + * of the custom scan node that is performing the vectorized aggregation. + * We do this to avoid projections at this node, because the postgres + * projection functions complain when they see an Aggref in a custom + * node output targetlist. + * The output targetlist, in turn, consists of just the INDEX_VAR references + * into the custom_scan_tlist. + */ + List *aggregated_tlist = castNode(CustomScan, vector_agg_state->ss.ps.plan)->custom_scan_tlist; + Assert(list_length(aggregated_tlist) == 1); + + /* Checked by planner */ + Assert(ts_guc_enable_vectorized_aggregation); + Assert(ts_guc_enable_bulk_decompression); + + /* Determine which kind of vectorized aggregation we should perform */ + TargetEntry *tlentry = (TargetEntry *) linitial(aggregated_tlist); + Assert(IsA(tlentry->expr, Aggref)); + Aggref *aggref = castNode(Aggref, tlentry->expr); + + /* Partial result is a int8 */ + Assert(aggref->aggtranstype == INT8OID); + + Assert(list_length(aggref->args) == 1); + + /* The aggregate should be a partial aggregate */ + Assert(aggref->aggsplit == AGGSPLIT_INITIAL_SERIAL); + + switch (aggref->aggfnoid) + { + case F_SUM_INT4: + { + VectorAggregate agg = { + .agg_init = int4_sum_init, + .agg_const = int4_sum_const, + .agg_vector_all = int4_sum_vector_all, + }; + return perform_vectorized_agg(agg, vector_agg_state, decompress_state); + } + default: + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("vectorized aggregation for function %d is not supported", + aggref->aggfnoid))); + pg_unreachable(); + } } static void diff --git a/tsl/test/expected/vector_agg_default.out b/tsl/test/expected/vector_agg_default.out new file mode 100644 index 00000000000..bf8211e84a3 --- /dev/null +++ b/tsl/test/expected/vector_agg_default.out @@ -0,0 +1,67 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +create table t(a int); +select create_hypertable('t', 'a', chunk_time_interval => 1000); +NOTICE: adding not-null constraint to column "a" + create_hypertable +------------------- + (1,public,t,t) +(1 row) + +insert into t select generate_series(1, 999); +alter table t set (timescaledb.compress); +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "t" is set to "" +NOTICE: default order by for hypertable "t" is set to "a DESC" +select compress_chunk(show_chunks('t')); + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk +(1 row) + +alter table t add column b int default 7; +insert into t select x, 11 from generate_series(1001, 1999) x; +select compress_chunk(show_chunks('t')); +NOTICE: chunk "_hyper_1_1_chunk" is already compressed + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_3_chunk +(2 rows) + +explain (costs off) select sum(b) from t; + QUERY PLAN +----------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Parallel Seq Scan on compress_hyper_2_2_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + -> Parallel Seq Scan on compress_hyper_2_4_chunk +(10 rows) + +select sum(b) from t; + sum +------- + 17982 +(1 row) + +select decompress_chunk(show_chunks('t')); + decompress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_3_chunk +(2 rows) + +select sum(b) from t; + sum +------- + 17982 +(1 row) + +drop table t; diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 19a6b1bed2a..5a72eef3db2 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -39,7 +39,8 @@ set(TEST_FILES skip_scan.sql size_utils_tsl.sql transparent_decompression_join_index.sql - vectorized_aggregation.sql) + vectorized_aggregation.sql + vector_agg_default.sql) if(USE_TELEMETRY) list(APPEND TEST_FILES bgw_telemetry.sql) diff --git a/tsl/test/sql/vector_agg_default.sql b/tsl/test/sql/vector_agg_default.sql new file mode 100644 index 00000000000..ac5f6937ccf --- /dev/null +++ b/tsl/test/sql/vector_agg_default.sql @@ -0,0 +1,22 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +create table t(a int); +select create_hypertable('t', 'a', chunk_time_interval => 1000); + +insert into t select generate_series(1, 999); +alter table t set (timescaledb.compress); +select compress_chunk(show_chunks('t')); + +alter table t add column b int default 7; +insert into t select x, 11 from generate_series(1001, 1999) x; +select compress_chunk(show_chunks('t')); + +explain (costs off) select sum(b) from t; +select sum(b) from t; + +select decompress_chunk(show_chunks('t')); +select sum(b) from t; + +drop table t; From 209838e36dd874333b4edfe89c8e7899cc1b883e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 18:59:56 +0100 Subject: [PATCH 22/81] fix outer_var resolution --- tsl/src/nodes/vector_agg/plan.c | 25 +- tsl/test/expected/vectorized_aggregation.out | 464 +++++++++---------- 2 files changed, 244 insertions(+), 245 deletions(-) diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 3663ca38b2e..5259a9858a9 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -62,7 +62,7 @@ build_trivial_custom_output_targetlist(List *scan_targetlist) } static Node * -replace_outer_special_vars_mutator(Node *node, void *context) +resolve_outer_special_vars_mutator(Node *node, void *context) { if (node == NULL) { @@ -71,7 +71,7 @@ replace_outer_special_vars_mutator(Node *node, void *context) if (!IsA(node, Var)) { - return expression_tree_mutator(node, replace_outer_special_vars_mutator, context); + return expression_tree_mutator(node, resolve_outer_special_vars_mutator, context); } Var *var = castNode(Var, node); @@ -80,22 +80,21 @@ replace_outer_special_vars_mutator(Node *node, void *context) return node; } - var = copyObject(var); - var->varno = DatumGetInt32(PointerGetDatum(context)); - return (Node *) var; + TargetEntry *decompress_chunk_tentry = + castNode(TargetEntry, list_nth(context, var->varattno - 1)); + Var *uncompressed_var = castNode(Var, decompress_chunk_tentry->expr); + return (Node *) copyObject(uncompressed_var); } /* - * Replace the OUTER_VAR special variables, that are used in the output - * targetlists of aggregation nodes, with the given other varno. + * Resolve the OUTER_VAR special variables, that are used in the output + * targetlists of aggregation nodes, replacing them with the uncompressed chunk + * variables. */ static List * -replace_outer_special_vars(List *input, int target_varno) +resolve_outer_special_vars(List *agg_tlist, List *outer_tlist) { - return castNode(List, - replace_outer_special_vars_mutator((Node *) input, - DatumGetPointer( - Int32GetDatum(target_varno)))); + return castNode(List, resolve_outer_special_vars_mutator((Node *) agg_tlist, outer_tlist)); } /* @@ -116,7 +115,7 @@ vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) * the scan targetlists. */ custom->custom_scan_tlist = - replace_outer_special_vars(agg->plan.targetlist, decompress_chunk->scan.scanrelid); + resolve_outer_special_vars(agg->plan.targetlist, decompress_chunk->scan.plan.targetlist); custom->scan.plan.targetlist = build_trivial_custom_output_targetlist(custom->custom_scan_tlist); diff --git a/tsl/test/expected/vectorized_aggregation.out b/tsl/test/expected/vectorized_aggregation.out index 680e6da3fb3..040f4a0a6ce 100644 --- a/tsl/test/expected/vectorized_aggregation.out +++ b/tsl/test/expected/vectorized_aggregation.out @@ -52,25 +52,25 @@ SELECT sum(segment_by_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.segment_by_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk @@ -111,24 +111,24 @@ SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.segment_by_value) -> Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value -> Index Scan using compress_hyper_2_11_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value Index Cond: (compress_hyper_2_11_chunk.segment_by_value > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value -> Index Scan using compress_hyper_2_12_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value Index Cond: (compress_hyper_2_12_chunk.segment_by_value > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value -> Index Scan using compress_hyper_2_13_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_13_chunk @@ -179,24 +179,24 @@ SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0 AND int_v Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) -> Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) -> Index Scan using compress_hyper_2_11_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value Index Cond: (compress_hyper_2_11_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_2_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) -> Index Scan using compress_hyper_2_12_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value Index Cond: (compress_hyper_2_12_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_3_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) @@ -250,22 +250,22 @@ SELECT sum(segment_by_value) FROM testtable WHERE int_value > 0; Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_2_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_3_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) @@ -318,22 +318,22 @@ SELECT sum(segment_by_value) FROM testtable WHERE float_value > 0; Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value Vectorized Filter: (_hyper_1_1_chunk.float_value > '0'::double precision) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_2_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value Vectorized Filter: (_hyper_1_2_chunk.float_value > '0'::double precision) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_3_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value Vectorized Filter: (_hyper_1_3_chunk.float_value > '0'::double precision) @@ -663,25 +663,25 @@ SELECT sum(int_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.int_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk @@ -814,67 +814,67 @@ SELECT sum(segment_by_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.segment_by_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -893,67 +893,67 @@ SELECT sum(int_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.int_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -977,67 +977,67 @@ SELECT sum(segment_by_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.segment_by_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -1060,67 +1060,67 @@ SELECT sum(int_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.int_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -1378,67 +1378,67 @@ SELECT sum(int_value), sum(int_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time"), sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.int_value), sum(_hyper_1_1_chunk.int_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -1455,67 +1455,67 @@ SELECT sum(segment_by_value), sum(segment_by_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time"), sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.segment_by_value), sum(_hyper_1_1_chunk.segment_by_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -1859,76 +1859,76 @@ SELECT sum(int_value) FROM testtable WHERE segment_by_value > 5; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_41_chunk."time") + Output: sum(_hyper_1_41_chunk.int_value) -> Gather - Output: (PARTIAL sum(_hyper_1_41_chunk."time")) + Output: (PARTIAL sum(_hyper_1_41_chunk.int_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_41_chunk."time")) + Output: (PARTIAL sum(_hyper_1_41_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk Output: _hyper_1_41_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_51_chunk Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk._ts_meta_sequence_num, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value Filter: (compress_hyper_2_51_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_42_chunk."time")) + Output: (PARTIAL sum(_hyper_1_42_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk Output: _hyper_1_42_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_52_chunk Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk._ts_meta_sequence_num, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value Filter: (compress_hyper_2_52_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_43_chunk."time")) + Output: (PARTIAL sum(_hyper_1_43_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk Output: _hyper_1_43_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_53_chunk Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk._ts_meta_sequence_num, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value Filter: (compress_hyper_2_53_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_44_chunk."time")) + Output: (PARTIAL sum(_hyper_1_44_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk Output: _hyper_1_44_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_54_chunk Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk._ts_meta_sequence_num, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value Filter: (compress_hyper_2_54_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_45_chunk."time")) + Output: (PARTIAL sum(_hyper_1_45_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk Output: _hyper_1_45_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_55_chunk Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk._ts_meta_sequence_num, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value Filter: (compress_hyper_2_55_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_46_chunk."time")) + Output: (PARTIAL sum(_hyper_1_46_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk Output: _hyper_1_46_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_56_chunk Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk._ts_meta_sequence_num, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value Filter: (compress_hyper_2_56_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_47_chunk."time")) + Output: (PARTIAL sum(_hyper_1_47_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk Output: _hyper_1_47_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_57_chunk Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk._ts_meta_sequence_num, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value Filter: (compress_hyper_2_57_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_48_chunk."time")) + Output: (PARTIAL sum(_hyper_1_48_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk Output: _hyper_1_48_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_58_chunk Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk._ts_meta_sequence_num, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value Filter: (compress_hyper_2_58_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_49_chunk."time")) + Output: (PARTIAL sum(_hyper_1_49_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk Output: _hyper_1_49_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_59_chunk Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk._ts_meta_sequence_num, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value Filter: (compress_hyper_2_59_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_50_chunk."time")) + Output: (PARTIAL sum(_hyper_1_50_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk Output: _hyper_1_50_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_60_chunk @@ -1976,67 +1976,67 @@ SELECT sum(segment_by_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_41_chunk."time") + Output: sum(_hyper_1_41_chunk.segment_by_value) -> Gather - Output: (PARTIAL sum(_hyper_1_41_chunk."time")) + Output: (PARTIAL sum(_hyper_1_41_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_41_chunk."time")) + Output: (PARTIAL sum(_hyper_1_41_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk Output: _hyper_1_41_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_51_chunk Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk._ts_meta_sequence_num, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_42_chunk."time")) + Output: (PARTIAL sum(_hyper_1_42_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk Output: _hyper_1_42_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_52_chunk Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk._ts_meta_sequence_num, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_43_chunk."time")) + Output: (PARTIAL sum(_hyper_1_43_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk Output: _hyper_1_43_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_53_chunk Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk._ts_meta_sequence_num, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_44_chunk."time")) + Output: (PARTIAL sum(_hyper_1_44_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk Output: _hyper_1_44_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_54_chunk Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk._ts_meta_sequence_num, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_45_chunk."time")) + Output: (PARTIAL sum(_hyper_1_45_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk Output: _hyper_1_45_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_55_chunk Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk._ts_meta_sequence_num, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_46_chunk."time")) + Output: (PARTIAL sum(_hyper_1_46_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk Output: _hyper_1_46_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_56_chunk Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk._ts_meta_sequence_num, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_47_chunk."time")) + Output: (PARTIAL sum(_hyper_1_47_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk Output: _hyper_1_47_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_57_chunk Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk._ts_meta_sequence_num, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_48_chunk."time")) + Output: (PARTIAL sum(_hyper_1_48_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk Output: _hyper_1_48_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_58_chunk Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk._ts_meta_sequence_num, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_49_chunk."time")) + Output: (PARTIAL sum(_hyper_1_49_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk Output: _hyper_1_49_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_59_chunk Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk._ts_meta_sequence_num, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_50_chunk."time")) + Output: (PARTIAL sum(_hyper_1_50_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk Output: _hyper_1_50_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_60_chunk @@ -2161,67 +2161,67 @@ SELECT sum(segment_by_value) FROM testtable; QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_81_chunk."time") + Output: sum(_hyper_1_81_chunk.segment_by_value) -> Gather - Output: (PARTIAL sum(_hyper_1_81_chunk."time")) + Output: (PARTIAL sum(_hyper_1_81_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_81_chunk."time")) + Output: (PARTIAL sum(_hyper_1_81_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_81_chunk Output: _hyper_1_81_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_91_chunk Output: compress_hyper_2_91_chunk._ts_meta_count, compress_hyper_2_91_chunk._ts_meta_sequence_num, compress_hyper_2_91_chunk.segment_by_value, compress_hyper_2_91_chunk._ts_meta_min_1, compress_hyper_2_91_chunk._ts_meta_max_1, compress_hyper_2_91_chunk."time", compress_hyper_2_91_chunk.int_value, compress_hyper_2_91_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_82_chunk."time")) + Output: (PARTIAL sum(_hyper_1_82_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_82_chunk Output: _hyper_1_82_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_92_chunk Output: compress_hyper_2_92_chunk._ts_meta_count, compress_hyper_2_92_chunk._ts_meta_sequence_num, compress_hyper_2_92_chunk.segment_by_value, compress_hyper_2_92_chunk._ts_meta_min_1, compress_hyper_2_92_chunk._ts_meta_max_1, compress_hyper_2_92_chunk."time", compress_hyper_2_92_chunk.int_value, compress_hyper_2_92_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_83_chunk."time")) + Output: (PARTIAL sum(_hyper_1_83_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_83_chunk Output: _hyper_1_83_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_93_chunk Output: compress_hyper_2_93_chunk._ts_meta_count, compress_hyper_2_93_chunk._ts_meta_sequence_num, compress_hyper_2_93_chunk.segment_by_value, compress_hyper_2_93_chunk._ts_meta_min_1, compress_hyper_2_93_chunk._ts_meta_max_1, compress_hyper_2_93_chunk."time", compress_hyper_2_93_chunk.int_value, compress_hyper_2_93_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_84_chunk."time")) + Output: (PARTIAL sum(_hyper_1_84_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_84_chunk Output: _hyper_1_84_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_94_chunk Output: compress_hyper_2_94_chunk._ts_meta_count, compress_hyper_2_94_chunk._ts_meta_sequence_num, compress_hyper_2_94_chunk.segment_by_value, compress_hyper_2_94_chunk._ts_meta_min_1, compress_hyper_2_94_chunk._ts_meta_max_1, compress_hyper_2_94_chunk."time", compress_hyper_2_94_chunk.int_value, compress_hyper_2_94_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_85_chunk."time")) + Output: (PARTIAL sum(_hyper_1_85_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_85_chunk Output: _hyper_1_85_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_95_chunk Output: compress_hyper_2_95_chunk._ts_meta_count, compress_hyper_2_95_chunk._ts_meta_sequence_num, compress_hyper_2_95_chunk.segment_by_value, compress_hyper_2_95_chunk._ts_meta_min_1, compress_hyper_2_95_chunk._ts_meta_max_1, compress_hyper_2_95_chunk."time", compress_hyper_2_95_chunk.int_value, compress_hyper_2_95_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_86_chunk."time")) + Output: (PARTIAL sum(_hyper_1_86_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_86_chunk Output: _hyper_1_86_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_96_chunk Output: compress_hyper_2_96_chunk._ts_meta_count, compress_hyper_2_96_chunk._ts_meta_sequence_num, compress_hyper_2_96_chunk.segment_by_value, compress_hyper_2_96_chunk._ts_meta_min_1, compress_hyper_2_96_chunk._ts_meta_max_1, compress_hyper_2_96_chunk."time", compress_hyper_2_96_chunk.int_value, compress_hyper_2_96_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_87_chunk."time")) + Output: (PARTIAL sum(_hyper_1_87_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_87_chunk Output: _hyper_1_87_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_97_chunk Output: compress_hyper_2_97_chunk._ts_meta_count, compress_hyper_2_97_chunk._ts_meta_sequence_num, compress_hyper_2_97_chunk.segment_by_value, compress_hyper_2_97_chunk._ts_meta_min_1, compress_hyper_2_97_chunk._ts_meta_max_1, compress_hyper_2_97_chunk."time", compress_hyper_2_97_chunk.int_value, compress_hyper_2_97_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_88_chunk."time")) + Output: (PARTIAL sum(_hyper_1_88_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_88_chunk Output: _hyper_1_88_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_98_chunk Output: compress_hyper_2_98_chunk._ts_meta_count, compress_hyper_2_98_chunk._ts_meta_sequence_num, compress_hyper_2_98_chunk.segment_by_value, compress_hyper_2_98_chunk._ts_meta_min_1, compress_hyper_2_98_chunk._ts_meta_max_1, compress_hyper_2_98_chunk."time", compress_hyper_2_98_chunk.int_value, compress_hyper_2_98_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_89_chunk."time")) + Output: (PARTIAL sum(_hyper_1_89_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_89_chunk Output: _hyper_1_89_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_99_chunk Output: compress_hyper_2_99_chunk._ts_meta_count, compress_hyper_2_99_chunk._ts_meta_sequence_num, compress_hyper_2_99_chunk.segment_by_value, compress_hyper_2_99_chunk._ts_meta_min_1, compress_hyper_2_99_chunk._ts_meta_max_1, compress_hyper_2_99_chunk."time", compress_hyper_2_99_chunk.int_value, compress_hyper_2_99_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_90_chunk."time")) + Output: (PARTIAL sum(_hyper_1_90_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_90_chunk Output: _hyper_1_90_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_100_chunk @@ -2233,67 +2233,67 @@ SELECT sum(int_value) FROM testtable; QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_81_chunk."time") + Output: sum(_hyper_1_81_chunk.int_value) -> Gather - Output: (PARTIAL sum(_hyper_1_81_chunk."time")) + Output: (PARTIAL sum(_hyper_1_81_chunk.int_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_81_chunk."time")) + Output: (PARTIAL sum(_hyper_1_81_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_81_chunk Output: _hyper_1_81_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_91_chunk Output: compress_hyper_2_91_chunk._ts_meta_count, compress_hyper_2_91_chunk._ts_meta_sequence_num, compress_hyper_2_91_chunk.segment_by_value, compress_hyper_2_91_chunk._ts_meta_min_1, compress_hyper_2_91_chunk._ts_meta_max_1, compress_hyper_2_91_chunk."time", compress_hyper_2_91_chunk.int_value, compress_hyper_2_91_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_82_chunk."time")) + Output: (PARTIAL sum(_hyper_1_82_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_82_chunk Output: _hyper_1_82_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_92_chunk Output: compress_hyper_2_92_chunk._ts_meta_count, compress_hyper_2_92_chunk._ts_meta_sequence_num, compress_hyper_2_92_chunk.segment_by_value, compress_hyper_2_92_chunk._ts_meta_min_1, compress_hyper_2_92_chunk._ts_meta_max_1, compress_hyper_2_92_chunk."time", compress_hyper_2_92_chunk.int_value, compress_hyper_2_92_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_83_chunk."time")) + Output: (PARTIAL sum(_hyper_1_83_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_83_chunk Output: _hyper_1_83_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_93_chunk Output: compress_hyper_2_93_chunk._ts_meta_count, compress_hyper_2_93_chunk._ts_meta_sequence_num, compress_hyper_2_93_chunk.segment_by_value, compress_hyper_2_93_chunk._ts_meta_min_1, compress_hyper_2_93_chunk._ts_meta_max_1, compress_hyper_2_93_chunk."time", compress_hyper_2_93_chunk.int_value, compress_hyper_2_93_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_84_chunk."time")) + Output: (PARTIAL sum(_hyper_1_84_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_84_chunk Output: _hyper_1_84_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_94_chunk Output: compress_hyper_2_94_chunk._ts_meta_count, compress_hyper_2_94_chunk._ts_meta_sequence_num, compress_hyper_2_94_chunk.segment_by_value, compress_hyper_2_94_chunk._ts_meta_min_1, compress_hyper_2_94_chunk._ts_meta_max_1, compress_hyper_2_94_chunk."time", compress_hyper_2_94_chunk.int_value, compress_hyper_2_94_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_85_chunk."time")) + Output: (PARTIAL sum(_hyper_1_85_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_85_chunk Output: _hyper_1_85_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_95_chunk Output: compress_hyper_2_95_chunk._ts_meta_count, compress_hyper_2_95_chunk._ts_meta_sequence_num, compress_hyper_2_95_chunk.segment_by_value, compress_hyper_2_95_chunk._ts_meta_min_1, compress_hyper_2_95_chunk._ts_meta_max_1, compress_hyper_2_95_chunk."time", compress_hyper_2_95_chunk.int_value, compress_hyper_2_95_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_86_chunk."time")) + Output: (PARTIAL sum(_hyper_1_86_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_86_chunk Output: _hyper_1_86_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_96_chunk Output: compress_hyper_2_96_chunk._ts_meta_count, compress_hyper_2_96_chunk._ts_meta_sequence_num, compress_hyper_2_96_chunk.segment_by_value, compress_hyper_2_96_chunk._ts_meta_min_1, compress_hyper_2_96_chunk._ts_meta_max_1, compress_hyper_2_96_chunk."time", compress_hyper_2_96_chunk.int_value, compress_hyper_2_96_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_87_chunk."time")) + Output: (PARTIAL sum(_hyper_1_87_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_87_chunk Output: _hyper_1_87_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_97_chunk Output: compress_hyper_2_97_chunk._ts_meta_count, compress_hyper_2_97_chunk._ts_meta_sequence_num, compress_hyper_2_97_chunk.segment_by_value, compress_hyper_2_97_chunk._ts_meta_min_1, compress_hyper_2_97_chunk._ts_meta_max_1, compress_hyper_2_97_chunk."time", compress_hyper_2_97_chunk.int_value, compress_hyper_2_97_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_88_chunk."time")) + Output: (PARTIAL sum(_hyper_1_88_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_88_chunk Output: _hyper_1_88_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_98_chunk Output: compress_hyper_2_98_chunk._ts_meta_count, compress_hyper_2_98_chunk._ts_meta_sequence_num, compress_hyper_2_98_chunk.segment_by_value, compress_hyper_2_98_chunk._ts_meta_min_1, compress_hyper_2_98_chunk._ts_meta_max_1, compress_hyper_2_98_chunk."time", compress_hyper_2_98_chunk.int_value, compress_hyper_2_98_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_89_chunk."time")) + Output: (PARTIAL sum(_hyper_1_89_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_89_chunk Output: _hyper_1_89_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_99_chunk Output: compress_hyper_2_99_chunk._ts_meta_count, compress_hyper_2_99_chunk._ts_meta_sequence_num, compress_hyper_2_99_chunk.segment_by_value, compress_hyper_2_99_chunk._ts_meta_min_1, compress_hyper_2_99_chunk._ts_meta_max_1, compress_hyper_2_99_chunk."time", compress_hyper_2_99_chunk.int_value, compress_hyper_2_99_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_90_chunk."time")) + Output: (PARTIAL sum(_hyper_1_90_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_90_chunk Output: _hyper_1_90_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_100_chunk @@ -2452,67 +2452,67 @@ SELECT sum(segment_by_value1) FROM testtable2; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk."time") + Output: sum(_hyper_3_101_chunk.segment_by_value1) -> Gather - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk @@ -2530,67 +2530,67 @@ SELECT sum(segment_by_value2) FROM testtable2; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk."time") + Output: sum(_hyper_3_101_chunk.segment_by_value2) -> Gather - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value2)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk @@ -2609,76 +2609,76 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_102_chunk."time") + Output: sum(_hyper_3_102_chunk.segment_by_value1) -> Gather - Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value Filter: (compress_hyper_4_112_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value Filter: (compress_hyper_4_113_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value Filter: (compress_hyper_4_114_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value Filter: (compress_hyper_4_115_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value Filter: (compress_hyper_4_116_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value Filter: (compress_hyper_4_117_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value Filter: (compress_hyper_4_118_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value Filter: (compress_hyper_4_119_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value Filter: (compress_hyper_4_120_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk @@ -2691,76 +2691,76 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND se QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk."time") + Output: sum(_hyper_3_101_chunk.segment_by_value1) -> Gather - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value Filter: ((compress_hyper_4_119_chunk.segment_by_value1 > 0) AND (compress_hyper_4_119_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk @@ -2773,76 +2773,76 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND se QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk."time") + Output: sum(_hyper_3_101_chunk.segment_by_value1) -> Gather - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value Filter: ((compress_hyper_4_119_chunk.segment_by_value1 > 0) AND (compress_hyper_4_119_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk @@ -2860,80 +2860,80 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 1000 AND Finalize Aggregate Output: sum(_hyper_3_101_chunk.segment_by_value1) -> Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_101_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_101_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value Index Cond: (compress_hyper_4_111_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_102_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_102_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value Index Cond: (compress_hyper_4_112_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_103_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_103_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value Index Cond: (compress_hyper_4_113_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_104_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_104_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value Index Cond: (compress_hyper_4_114_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_105_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_105_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value Index Cond: (compress_hyper_4_115_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_106_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_106_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value Index Cond: (compress_hyper_4_116_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_107_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_107_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value Index Cond: (compress_hyper_4_117_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_108_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_108_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value Index Cond: (compress_hyper_4_118_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_109_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_109_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_119_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value Index Cond: (compress_hyper_4_119_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_110_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_110_chunk.int_value > 1000) From dfb92af43fe2de5e2e202bc359fec557d9c3a8c8 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 19:02:22 +0100 Subject: [PATCH 23/81] Revert "disable filters" This reverts commit 7852d55f3061b82a3ce1cf8d7575f64c2d14aa0b. --- tsl/src/nodes/vector_agg/plan.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 5259a9858a9..374852bfa1d 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -204,12 +204,6 @@ try_insert_vector_agg_node(Plan *plan) return plan; } - if (linitial(custom->custom_exprs) != NIL) - { - /* Even the vectorized filters are not supported at the moment. */ - return plan; - } - if (agg->numCols != 0) { /* No GROUP BY support for now. */ From 6ef84c1f4b88fa04fa0649b0e34f25e774f6dd9e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 19:01:07 +0100 Subject: [PATCH 24/81] support filters? --- tsl/src/compression/arrow_c_data_interface.h | 19 +++ tsl/src/nodes/vector_agg/exec.c | 163 +++++++++---------- tsl/test/expected/vector_agg_default.out | 38 ++++- tsl/test/sql/vector_agg_default.sql | 19 ++- 4 files changed, 140 insertions(+), 99 deletions(-) diff --git a/tsl/src/compression/arrow_c_data_interface.h b/tsl/src/compression/arrow_c_data_interface.h index 64393c91b60..8178881ab47 100644 --- a/tsl/src/compression/arrow_c_data_interface.h +++ b/tsl/src/compression/arrow_c_data_interface.h @@ -160,3 +160,22 @@ pad_to_multiple(uint64 pad_to, uint64 source_value) { return ((source_value + pad_to - 1) / pad_to) * pad_to; } + +static inline size_t +arrow_num_valid(uint64 *bitmap, size_t total_rows) +{ +#ifdef HAVE__BUILTIN_POPCOUNT + uint64 num_valid = 0; + const uint64 words = pad_to_multiple(64, total_rows) / 64; + for (uint64 i = 0; i < words; i++) + { + num_valid += __builtin_popcountll(bitmap[i]); + } +#else + for (size_t i = 0; i < total_rows; i++) + { + num_valid += arrow_row_is_valid(bitmap, i); + } +#endif + return num_valid; +} diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index f5ca3b4dba9..4fa2ebc1a10 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -45,7 +45,7 @@ vector_agg_rescan(CustomScanState *node) typedef struct { void (*agg_init)(Datum *agg_value, bool *agg_isnull); - void (*agg_vector_all)(ArrowArray *vector, Datum *agg_value, bool *agg_isnull); + void (*agg_vector)(ArrowArray *vector, uint64 *filter, Datum *agg_value, bool *agg_isnull); void (*agg_const)(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool *agg_isnull); } VectorAggregate; @@ -58,7 +58,7 @@ int4_sum_init(Datum *agg_value, bool *agg_isnull) } static void -int4_sum_vector_all(ArrowArray *vector, Datum *agg_value, bool *agg_isnull) +int4_sum_vector(ArrowArray *vector, uint64 *filter, Datum *agg_value, bool *agg_isnull) { Assert(vector != NULL); Assert(vector->length > 0); @@ -94,7 +94,8 @@ int4_sum_vector_all(ArrowArray *vector, Datum *agg_value, bool *agg_isnull) { const int row = outer + inner; const int32 arrow_value = ((int32 *) vector->buffers[1])[row]; - batch_sum += arrow_value * arrow_row_is_valid(vector->buffers[0], row); + const bool passes_filter = filter ? arrow_row_is_valid(filter, row) : true; + batch_sum += passes_filter * arrow_value * arrow_row_is_valid(vector->buffers[0], row); } } #undef INNER_LOOP_SIZE @@ -143,33 +144,67 @@ int4_sum_const(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool *agg_value = Int64GetDatum(tmp); } -/* - * Perform a vectorized aggregation. - */ +static VectorAggregate int4_sum_agg = { + .agg_init = int4_sum_init, + .agg_const = int4_sum_const, + .agg_vector = int4_sum_vector, +}; + static TupleTableSlot * -perform_vectorized_agg(VectorAggregate agg, CustomScanState *vector_agg_state, - DecompressChunkState *decompress_state) +vector_agg_exec(CustomScanState *vector_agg_state) { - DecompressContext *dcontext = &decompress_state->decompress_context; + DecompressChunkState *decompress_state = + (DecompressChunkState *) linitial(vector_agg_state->custom_ps); - /* Two columns are decompressed, the column that needs to be aggregated and the count column */ - Assert(dcontext->num_total_columns == 2); + /* + * The aggregated targetlist with Aggrefs is in the custom scan targetlist + * of the custom scan node that is performing the vectorized aggregation. + * We do this to avoid projections at this node, because the postgres + * projection functions complain when they see an Aggref in a custom + * node output targetlist. + * The output targetlist, in turn, consists of just the INDEX_VAR references + * into the custom_scan_tlist. + */ + List *aggregated_tlist = castNode(CustomScan, vector_agg_state->ss.ps.plan)->custom_scan_tlist; + Assert(list_length(aggregated_tlist) == 1); + + /* Checked by planner */ + Assert(ts_guc_enable_vectorized_aggregation); + Assert(ts_guc_enable_bulk_decompression); - CompressionColumnDescription *value_column_description = &dcontext->template_columns[0]; - CompressionColumnDescription *count_column_description = &dcontext->template_columns[1]; - if (count_column_description->type != COUNT_COLUMN) + /* Determine which kind of vectorized aggregation we should perform */ + TargetEntry *tlentry = (TargetEntry *) linitial(aggregated_tlist); + Assert(IsA(tlentry->expr, Aggref)); + Aggref *aggref = castNode(Aggref, tlentry->expr); + + my_print(aggref); + + /* Partial result is a int8 */ + Assert(aggref->aggtranstype == INT8OID); + + Assert(list_length(aggref->args) == 1); + + /* The aggregate should be a partial aggregate */ + Assert(aggref->aggsplit == AGGSPLIT_INITIAL_SERIAL); + + Var *var = castNode(Var, castNode(TargetEntry, linitial(aggref->args))->expr); + + DecompressContext *dcontext = &decompress_state->decompress_context; + + CompressionColumnDescription *value_column_description = NULL; + for (int i = 0; i < dcontext->num_total_columns; i++) { - /* - * The count and value columns can go in different order based on their - * order in compressed chunk, so check which one we are seeing. - */ - CompressionColumnDescription *tmp = value_column_description; - value_column_description = count_column_description; - count_column_description = tmp; + CompressionColumnDescription *current_column = &dcontext->template_columns[i]; + if (current_column->output_attno == var->varattno) + { + value_column_description = current_column; + break; + } } + Ensure(value_column_description != NULL, "aggregated compressed column not found"); + Assert(value_column_description->type == COMPRESSED_COLUMN || value_column_description->type == SEGMENTBY_COLUMN); - Assert(count_column_description->type == COUNT_COLUMN); BatchQueue *batch_queue = decompress_state->batch_queue; DecompressBatchState *batch_state = batch_array_get_at(&batch_queue->batch_array, 0); @@ -178,7 +213,10 @@ perform_vectorized_agg(VectorAggregate agg, CustomScanState *vector_agg_state, TupleTableSlot *aggregated_slot = vector_agg_state->ss.ps.ps_ResultTupleSlot; Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); - agg.agg_init(&aggregated_slot->tts_values[0], &aggregated_slot->tts_isnull[0]); + Assert(aggref->aggfnoid == F_SUM_INT4); + VectorAggregate *agg = &int4_sum_agg; + + agg->agg_init(&aggregated_slot->tts_values[0], &aggregated_slot->tts_isnull[0]); ExecClearTuple(aggregated_slot); TupleTableSlot *compressed_slot = ExecProcNode(linitial(decompress_state->csstate.custom_ps)); @@ -211,18 +249,28 @@ perform_vectorized_agg(VectorAggregate agg, CustomScanState *vector_agg_state, /* * To calculate the sum for a segment by value or default compressed * column value, we need to multiply this value with the number of - * compressed tuples in this batch. + * passing decompressed tuples in this batch. */ + int n = batch_state->total_batch_rows; + if (batch_state->vector_qual_result) + { + n = arrow_num_valid(batch_state->vector_qual_result, n); + Assert(n > 0); + } + int offs = AttrNumberGetAttrOffset(value_column_description->output_attno); - agg.agg_const(batch_state->decompressed_scan_slot_data.base.tts_values[offs], - batch_state->decompressed_scan_slot_data.base.tts_isnull[offs], - batch_state->total_batch_rows, - &aggregated_slot->tts_values[0], - &aggregated_slot->tts_isnull[0]); + agg->agg_const(batch_state->decompressed_scan_slot_data.base.tts_values[offs], + batch_state->decompressed_scan_slot_data.base.tts_isnull[offs], + n, + &aggregated_slot->tts_values[0], + &aggregated_slot->tts_isnull[0]); } else { - agg.agg_vector_all(arrow, &aggregated_slot->tts_values[0], &aggregated_slot->tts_isnull[0]); + agg->agg_vector(arrow, + batch_state->vector_qual_result, + &aggregated_slot->tts_values[0], + &aggregated_slot->tts_isnull[0]); } compressed_batch_discard_tuples(batch_state); @@ -233,61 +281,6 @@ perform_vectorized_agg(VectorAggregate agg, CustomScanState *vector_agg_state, return aggregated_slot; } -static TupleTableSlot * -vector_agg_exec(CustomScanState *vector_agg_state) -{ - DecompressChunkState *decompress_state = - (DecompressChunkState *) linitial(vector_agg_state->custom_ps); - - /* - * The aggregated targetlist with Aggrefs is in the custom scan targetlist - * of the custom scan node that is performing the vectorized aggregation. - * We do this to avoid projections at this node, because the postgres - * projection functions complain when they see an Aggref in a custom - * node output targetlist. - * The output targetlist, in turn, consists of just the INDEX_VAR references - * into the custom_scan_tlist. - */ - List *aggregated_tlist = castNode(CustomScan, vector_agg_state->ss.ps.plan)->custom_scan_tlist; - Assert(list_length(aggregated_tlist) == 1); - - /* Checked by planner */ - Assert(ts_guc_enable_vectorized_aggregation); - Assert(ts_guc_enable_bulk_decompression); - - /* Determine which kind of vectorized aggregation we should perform */ - TargetEntry *tlentry = (TargetEntry *) linitial(aggregated_tlist); - Assert(IsA(tlentry->expr, Aggref)); - Aggref *aggref = castNode(Aggref, tlentry->expr); - - /* Partial result is a int8 */ - Assert(aggref->aggtranstype == INT8OID); - - Assert(list_length(aggref->args) == 1); - - /* The aggregate should be a partial aggregate */ - Assert(aggref->aggsplit == AGGSPLIT_INITIAL_SERIAL); - - switch (aggref->aggfnoid) - { - case F_SUM_INT4: - { - VectorAggregate agg = { - .agg_init = int4_sum_init, - .agg_const = int4_sum_const, - .agg_vector_all = int4_sum_vector_all, - }; - return perform_vectorized_agg(agg, vector_agg_state, decompress_state); - } - default: - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("vectorized aggregation for function %d is not supported", - aggref->aggfnoid))); - pg_unreachable(); - } -} - static void vector_agg_explain(CustomScanState *node, List *ancestors, ExplainState *es) { diff --git a/tsl/test/expected/vector_agg_default.out b/tsl/test/expected/vector_agg_default.out index bf8211e84a3..95659df9f8a 100644 --- a/tsl/test/expected/vector_agg_default.out +++ b/tsl/test/expected/vector_agg_default.out @@ -1,7 +1,7 @@ -- This file and its contents are licensed under the Timescale License. -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. -create table t(a int); +create table t(a int, b int); select create_hypertable('t', 'a', chunk_time_interval => 1000); NOTICE: adding not-null constraint to column "a" create_hypertable @@ -9,7 +9,7 @@ NOTICE: adding not-null constraint to column "a" (1,public,t,t) (1 row) -insert into t select generate_series(1, 999); +insert into t select x, x % 5 from generate_series(1, 999) x; alter table t set (timescaledb.compress); WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes NOTICE: default segment by for hypertable "t" is set to "" @@ -20,8 +20,8 @@ select compress_chunk(show_chunks('t')); _timescaledb_internal._hyper_1_1_chunk (1 row) -alter table t add column b int default 7; -insert into t select x, 11 from generate_series(1001, 1999) x; +alter table t add column c int default 7; +insert into t select x, x % 5, 11 from generate_series(1001, 1999) x; select compress_chunk(show_chunks('t')); NOTICE: chunk "_hyper_1_1_chunk" is already compressed compress_chunk @@ -30,7 +30,7 @@ NOTICE: chunk "_hyper_1_1_chunk" is already compressed _timescaledb_internal._hyper_1_3_chunk (2 rows) -explain (costs off) select sum(b) from t; +explain (costs off) select sum(c) from t; QUERY PLAN ----------------------------------------------------------------------------- Finalize Aggregate @@ -45,12 +45,36 @@ explain (costs off) select sum(b) from t; -> Parallel Seq Scan on compress_hyper_2_4_chunk (10 rows) -select sum(b) from t; +select sum(c) from t; sum ------- 17982 (1 row) +select sum(c) from t where b >= 0; + sum +------- + 17982 +(1 row) + +select sum(c) from t where b = 0; + sum +------ + 3582 +(1 row) + +select sum(c) from t where b in (0, 1); + sum +------ + 7182 +(1 row) + +select sum(c) from t where b in (0, 1, 3); + sum +------- + 10782 +(1 row) + select decompress_chunk(show_chunks('t')); decompress_chunk ---------------------------------------- @@ -58,7 +82,7 @@ select decompress_chunk(show_chunks('t')); _timescaledb_internal._hyper_1_3_chunk (2 rows) -select sum(b) from t; +select sum(c) from t; sum ------- 17982 diff --git a/tsl/test/sql/vector_agg_default.sql b/tsl/test/sql/vector_agg_default.sql index ac5f6937ccf..f0dec294b9c 100644 --- a/tsl/test/sql/vector_agg_default.sql +++ b/tsl/test/sql/vector_agg_default.sql @@ -2,21 +2,26 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. -create table t(a int); +create table t(a int, b int); select create_hypertable('t', 'a', chunk_time_interval => 1000); -insert into t select generate_series(1, 999); +insert into t select x, x % 5 from generate_series(1, 999) x; alter table t set (timescaledb.compress); select compress_chunk(show_chunks('t')); -alter table t add column b int default 7; -insert into t select x, 11 from generate_series(1001, 1999) x; +alter table t add column c int default 7; +insert into t select x, x % 5, 11 from generate_series(1001, 1999) x; select compress_chunk(show_chunks('t')); -explain (costs off) select sum(b) from t; -select sum(b) from t; +explain (costs off) select sum(c) from t; +select sum(c) from t; + +select sum(c) from t where b >= 0; +select sum(c) from t where b = 0; +select sum(c) from t where b in (0, 1); +select sum(c) from t where b in (0, 1, 3); select decompress_chunk(show_chunks('t')); -select sum(b) from t; +select sum(c) from t; drop table t; From 4db7ceadb839ce9775d27e8442165fa1b63afe7d Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 18:59:56 +0100 Subject: [PATCH 25/81] fix outer_var resolution --- tsl/src/nodes/vector_agg/plan.c | 25 +- tsl/test/expected/vectorized_aggregation.out | 464 +++++++++---------- 2 files changed, 244 insertions(+), 245 deletions(-) diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 3663ca38b2e..5259a9858a9 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -62,7 +62,7 @@ build_trivial_custom_output_targetlist(List *scan_targetlist) } static Node * -replace_outer_special_vars_mutator(Node *node, void *context) +resolve_outer_special_vars_mutator(Node *node, void *context) { if (node == NULL) { @@ -71,7 +71,7 @@ replace_outer_special_vars_mutator(Node *node, void *context) if (!IsA(node, Var)) { - return expression_tree_mutator(node, replace_outer_special_vars_mutator, context); + return expression_tree_mutator(node, resolve_outer_special_vars_mutator, context); } Var *var = castNode(Var, node); @@ -80,22 +80,21 @@ replace_outer_special_vars_mutator(Node *node, void *context) return node; } - var = copyObject(var); - var->varno = DatumGetInt32(PointerGetDatum(context)); - return (Node *) var; + TargetEntry *decompress_chunk_tentry = + castNode(TargetEntry, list_nth(context, var->varattno - 1)); + Var *uncompressed_var = castNode(Var, decompress_chunk_tentry->expr); + return (Node *) copyObject(uncompressed_var); } /* - * Replace the OUTER_VAR special variables, that are used in the output - * targetlists of aggregation nodes, with the given other varno. + * Resolve the OUTER_VAR special variables, that are used in the output + * targetlists of aggregation nodes, replacing them with the uncompressed chunk + * variables. */ static List * -replace_outer_special_vars(List *input, int target_varno) +resolve_outer_special_vars(List *agg_tlist, List *outer_tlist) { - return castNode(List, - replace_outer_special_vars_mutator((Node *) input, - DatumGetPointer( - Int32GetDatum(target_varno)))); + return castNode(List, resolve_outer_special_vars_mutator((Node *) agg_tlist, outer_tlist)); } /* @@ -116,7 +115,7 @@ vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) * the scan targetlists. */ custom->custom_scan_tlist = - replace_outer_special_vars(agg->plan.targetlist, decompress_chunk->scan.scanrelid); + resolve_outer_special_vars(agg->plan.targetlist, decompress_chunk->scan.plan.targetlist); custom->scan.plan.targetlist = build_trivial_custom_output_targetlist(custom->custom_scan_tlist); diff --git a/tsl/test/expected/vectorized_aggregation.out b/tsl/test/expected/vectorized_aggregation.out index 680e6da3fb3..040f4a0a6ce 100644 --- a/tsl/test/expected/vectorized_aggregation.out +++ b/tsl/test/expected/vectorized_aggregation.out @@ -52,25 +52,25 @@ SELECT sum(segment_by_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.segment_by_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk @@ -111,24 +111,24 @@ SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.segment_by_value) -> Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value -> Index Scan using compress_hyper_2_11_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value Index Cond: (compress_hyper_2_11_chunk.segment_by_value > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value -> Index Scan using compress_hyper_2_12_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value Index Cond: (compress_hyper_2_12_chunk.segment_by_value > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value -> Index Scan using compress_hyper_2_13_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_13_chunk @@ -179,24 +179,24 @@ SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0 AND int_v Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) -> Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) -> Index Scan using compress_hyper_2_11_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value Index Cond: (compress_hyper_2_11_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_2_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) -> Index Scan using compress_hyper_2_12_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value Index Cond: (compress_hyper_2_12_chunk.segment_by_value > 0) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_3_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) @@ -250,22 +250,22 @@ SELECT sum(segment_by_value) FROM testtable WHERE int_value > 0; Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_2_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_3_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) @@ -318,22 +318,22 @@ SELECT sum(segment_by_value) FROM testtable WHERE float_value > 0; Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value Vectorized Filter: (_hyper_1_1_chunk.float_value > '0'::double precision) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_2_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value Vectorized Filter: (_hyper_1_2_chunk.float_value > '0'::double precision) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_3_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value Vectorized Filter: (_hyper_1_3_chunk.float_value > '0'::double precision) @@ -663,25 +663,25 @@ SELECT sum(int_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.int_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk @@ -814,67 +814,67 @@ SELECT sum(segment_by_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.segment_by_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -893,67 +893,67 @@ SELECT sum(int_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.int_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -977,67 +977,67 @@ SELECT sum(segment_by_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.segment_by_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -1060,67 +1060,67 @@ SELECT sum(int_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.int_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -1378,67 +1378,67 @@ SELECT sum(int_value), sum(int_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time"), sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.int_value), sum(_hyper_1_1_chunk.int_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -1455,67 +1455,67 @@ SELECT sum(segment_by_value), sum(segment_by_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_1_chunk."time"), sum(_hyper_1_1_chunk."time") + Output: sum(_hyper_1_1_chunk.segment_by_value), sum(_hyper_1_1_chunk.segment_by_value) -> Gather - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk."time")) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk."time")) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk."time")) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_4_chunk."time")) + Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_5_chunk."time")) + Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_6_chunk."time")) + Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_7_chunk."time")) + Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_8_chunk."time")) + Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_9_chunk."time")) + Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_10_chunk."time")) + Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -1859,76 +1859,76 @@ SELECT sum(int_value) FROM testtable WHERE segment_by_value > 5; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_41_chunk."time") + Output: sum(_hyper_1_41_chunk.int_value) -> Gather - Output: (PARTIAL sum(_hyper_1_41_chunk."time")) + Output: (PARTIAL sum(_hyper_1_41_chunk.int_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_41_chunk."time")) + Output: (PARTIAL sum(_hyper_1_41_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk Output: _hyper_1_41_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_51_chunk Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk._ts_meta_sequence_num, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value Filter: (compress_hyper_2_51_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_42_chunk."time")) + Output: (PARTIAL sum(_hyper_1_42_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk Output: _hyper_1_42_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_52_chunk Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk._ts_meta_sequence_num, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value Filter: (compress_hyper_2_52_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_43_chunk."time")) + Output: (PARTIAL sum(_hyper_1_43_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk Output: _hyper_1_43_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_53_chunk Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk._ts_meta_sequence_num, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value Filter: (compress_hyper_2_53_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_44_chunk."time")) + Output: (PARTIAL sum(_hyper_1_44_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk Output: _hyper_1_44_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_54_chunk Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk._ts_meta_sequence_num, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value Filter: (compress_hyper_2_54_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_45_chunk."time")) + Output: (PARTIAL sum(_hyper_1_45_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk Output: _hyper_1_45_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_55_chunk Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk._ts_meta_sequence_num, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value Filter: (compress_hyper_2_55_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_46_chunk."time")) + Output: (PARTIAL sum(_hyper_1_46_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk Output: _hyper_1_46_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_56_chunk Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk._ts_meta_sequence_num, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value Filter: (compress_hyper_2_56_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_47_chunk."time")) + Output: (PARTIAL sum(_hyper_1_47_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk Output: _hyper_1_47_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_57_chunk Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk._ts_meta_sequence_num, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value Filter: (compress_hyper_2_57_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_48_chunk."time")) + Output: (PARTIAL sum(_hyper_1_48_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk Output: _hyper_1_48_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_58_chunk Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk._ts_meta_sequence_num, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value Filter: (compress_hyper_2_58_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_49_chunk."time")) + Output: (PARTIAL sum(_hyper_1_49_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk Output: _hyper_1_49_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_59_chunk Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk._ts_meta_sequence_num, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value Filter: (compress_hyper_2_59_chunk.segment_by_value > 5) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_50_chunk."time")) + Output: (PARTIAL sum(_hyper_1_50_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk Output: _hyper_1_50_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_60_chunk @@ -1976,67 +1976,67 @@ SELECT sum(segment_by_value) FROM testtable; QUERY PLAN --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_41_chunk."time") + Output: sum(_hyper_1_41_chunk.segment_by_value) -> Gather - Output: (PARTIAL sum(_hyper_1_41_chunk."time")) + Output: (PARTIAL sum(_hyper_1_41_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_41_chunk."time")) + Output: (PARTIAL sum(_hyper_1_41_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_41_chunk Output: _hyper_1_41_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_51_chunk Output: compress_hyper_2_51_chunk._ts_meta_count, compress_hyper_2_51_chunk._ts_meta_sequence_num, compress_hyper_2_51_chunk.segment_by_value, compress_hyper_2_51_chunk._ts_meta_min_1, compress_hyper_2_51_chunk._ts_meta_max_1, compress_hyper_2_51_chunk."time", compress_hyper_2_51_chunk.int_value, compress_hyper_2_51_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_42_chunk."time")) + Output: (PARTIAL sum(_hyper_1_42_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_42_chunk Output: _hyper_1_42_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_52_chunk Output: compress_hyper_2_52_chunk._ts_meta_count, compress_hyper_2_52_chunk._ts_meta_sequence_num, compress_hyper_2_52_chunk.segment_by_value, compress_hyper_2_52_chunk._ts_meta_min_1, compress_hyper_2_52_chunk._ts_meta_max_1, compress_hyper_2_52_chunk."time", compress_hyper_2_52_chunk.int_value, compress_hyper_2_52_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_43_chunk."time")) + Output: (PARTIAL sum(_hyper_1_43_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_43_chunk Output: _hyper_1_43_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_53_chunk Output: compress_hyper_2_53_chunk._ts_meta_count, compress_hyper_2_53_chunk._ts_meta_sequence_num, compress_hyper_2_53_chunk.segment_by_value, compress_hyper_2_53_chunk._ts_meta_min_1, compress_hyper_2_53_chunk._ts_meta_max_1, compress_hyper_2_53_chunk."time", compress_hyper_2_53_chunk.int_value, compress_hyper_2_53_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_44_chunk."time")) + Output: (PARTIAL sum(_hyper_1_44_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_44_chunk Output: _hyper_1_44_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_54_chunk Output: compress_hyper_2_54_chunk._ts_meta_count, compress_hyper_2_54_chunk._ts_meta_sequence_num, compress_hyper_2_54_chunk.segment_by_value, compress_hyper_2_54_chunk._ts_meta_min_1, compress_hyper_2_54_chunk._ts_meta_max_1, compress_hyper_2_54_chunk."time", compress_hyper_2_54_chunk.int_value, compress_hyper_2_54_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_45_chunk."time")) + Output: (PARTIAL sum(_hyper_1_45_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_45_chunk Output: _hyper_1_45_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_55_chunk Output: compress_hyper_2_55_chunk._ts_meta_count, compress_hyper_2_55_chunk._ts_meta_sequence_num, compress_hyper_2_55_chunk.segment_by_value, compress_hyper_2_55_chunk._ts_meta_min_1, compress_hyper_2_55_chunk._ts_meta_max_1, compress_hyper_2_55_chunk."time", compress_hyper_2_55_chunk.int_value, compress_hyper_2_55_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_46_chunk."time")) + Output: (PARTIAL sum(_hyper_1_46_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_46_chunk Output: _hyper_1_46_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_56_chunk Output: compress_hyper_2_56_chunk._ts_meta_count, compress_hyper_2_56_chunk._ts_meta_sequence_num, compress_hyper_2_56_chunk.segment_by_value, compress_hyper_2_56_chunk._ts_meta_min_1, compress_hyper_2_56_chunk._ts_meta_max_1, compress_hyper_2_56_chunk."time", compress_hyper_2_56_chunk.int_value, compress_hyper_2_56_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_47_chunk."time")) + Output: (PARTIAL sum(_hyper_1_47_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_47_chunk Output: _hyper_1_47_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_57_chunk Output: compress_hyper_2_57_chunk._ts_meta_count, compress_hyper_2_57_chunk._ts_meta_sequence_num, compress_hyper_2_57_chunk.segment_by_value, compress_hyper_2_57_chunk._ts_meta_min_1, compress_hyper_2_57_chunk._ts_meta_max_1, compress_hyper_2_57_chunk."time", compress_hyper_2_57_chunk.int_value, compress_hyper_2_57_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_48_chunk."time")) + Output: (PARTIAL sum(_hyper_1_48_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_48_chunk Output: _hyper_1_48_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_58_chunk Output: compress_hyper_2_58_chunk._ts_meta_count, compress_hyper_2_58_chunk._ts_meta_sequence_num, compress_hyper_2_58_chunk.segment_by_value, compress_hyper_2_58_chunk._ts_meta_min_1, compress_hyper_2_58_chunk._ts_meta_max_1, compress_hyper_2_58_chunk."time", compress_hyper_2_58_chunk.int_value, compress_hyper_2_58_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_49_chunk."time")) + Output: (PARTIAL sum(_hyper_1_49_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_49_chunk Output: _hyper_1_49_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_59_chunk Output: compress_hyper_2_59_chunk._ts_meta_count, compress_hyper_2_59_chunk._ts_meta_sequence_num, compress_hyper_2_59_chunk.segment_by_value, compress_hyper_2_59_chunk._ts_meta_min_1, compress_hyper_2_59_chunk._ts_meta_max_1, compress_hyper_2_59_chunk."time", compress_hyper_2_59_chunk.int_value, compress_hyper_2_59_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_50_chunk."time")) + Output: (PARTIAL sum(_hyper_1_50_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_50_chunk Output: _hyper_1_50_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_60_chunk @@ -2161,67 +2161,67 @@ SELECT sum(segment_by_value) FROM testtable; QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_81_chunk."time") + Output: sum(_hyper_1_81_chunk.segment_by_value) -> Gather - Output: (PARTIAL sum(_hyper_1_81_chunk."time")) + Output: (PARTIAL sum(_hyper_1_81_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_81_chunk."time")) + Output: (PARTIAL sum(_hyper_1_81_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_81_chunk Output: _hyper_1_81_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_91_chunk Output: compress_hyper_2_91_chunk._ts_meta_count, compress_hyper_2_91_chunk._ts_meta_sequence_num, compress_hyper_2_91_chunk.segment_by_value, compress_hyper_2_91_chunk._ts_meta_min_1, compress_hyper_2_91_chunk._ts_meta_max_1, compress_hyper_2_91_chunk."time", compress_hyper_2_91_chunk.int_value, compress_hyper_2_91_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_82_chunk."time")) + Output: (PARTIAL sum(_hyper_1_82_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_82_chunk Output: _hyper_1_82_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_92_chunk Output: compress_hyper_2_92_chunk._ts_meta_count, compress_hyper_2_92_chunk._ts_meta_sequence_num, compress_hyper_2_92_chunk.segment_by_value, compress_hyper_2_92_chunk._ts_meta_min_1, compress_hyper_2_92_chunk._ts_meta_max_1, compress_hyper_2_92_chunk."time", compress_hyper_2_92_chunk.int_value, compress_hyper_2_92_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_83_chunk."time")) + Output: (PARTIAL sum(_hyper_1_83_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_83_chunk Output: _hyper_1_83_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_93_chunk Output: compress_hyper_2_93_chunk._ts_meta_count, compress_hyper_2_93_chunk._ts_meta_sequence_num, compress_hyper_2_93_chunk.segment_by_value, compress_hyper_2_93_chunk._ts_meta_min_1, compress_hyper_2_93_chunk._ts_meta_max_1, compress_hyper_2_93_chunk."time", compress_hyper_2_93_chunk.int_value, compress_hyper_2_93_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_84_chunk."time")) + Output: (PARTIAL sum(_hyper_1_84_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_84_chunk Output: _hyper_1_84_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_94_chunk Output: compress_hyper_2_94_chunk._ts_meta_count, compress_hyper_2_94_chunk._ts_meta_sequence_num, compress_hyper_2_94_chunk.segment_by_value, compress_hyper_2_94_chunk._ts_meta_min_1, compress_hyper_2_94_chunk._ts_meta_max_1, compress_hyper_2_94_chunk."time", compress_hyper_2_94_chunk.int_value, compress_hyper_2_94_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_85_chunk."time")) + Output: (PARTIAL sum(_hyper_1_85_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_85_chunk Output: _hyper_1_85_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_95_chunk Output: compress_hyper_2_95_chunk._ts_meta_count, compress_hyper_2_95_chunk._ts_meta_sequence_num, compress_hyper_2_95_chunk.segment_by_value, compress_hyper_2_95_chunk._ts_meta_min_1, compress_hyper_2_95_chunk._ts_meta_max_1, compress_hyper_2_95_chunk."time", compress_hyper_2_95_chunk.int_value, compress_hyper_2_95_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_86_chunk."time")) + Output: (PARTIAL sum(_hyper_1_86_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_86_chunk Output: _hyper_1_86_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_96_chunk Output: compress_hyper_2_96_chunk._ts_meta_count, compress_hyper_2_96_chunk._ts_meta_sequence_num, compress_hyper_2_96_chunk.segment_by_value, compress_hyper_2_96_chunk._ts_meta_min_1, compress_hyper_2_96_chunk._ts_meta_max_1, compress_hyper_2_96_chunk."time", compress_hyper_2_96_chunk.int_value, compress_hyper_2_96_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_87_chunk."time")) + Output: (PARTIAL sum(_hyper_1_87_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_87_chunk Output: _hyper_1_87_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_97_chunk Output: compress_hyper_2_97_chunk._ts_meta_count, compress_hyper_2_97_chunk._ts_meta_sequence_num, compress_hyper_2_97_chunk.segment_by_value, compress_hyper_2_97_chunk._ts_meta_min_1, compress_hyper_2_97_chunk._ts_meta_max_1, compress_hyper_2_97_chunk."time", compress_hyper_2_97_chunk.int_value, compress_hyper_2_97_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_88_chunk."time")) + Output: (PARTIAL sum(_hyper_1_88_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_88_chunk Output: _hyper_1_88_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_98_chunk Output: compress_hyper_2_98_chunk._ts_meta_count, compress_hyper_2_98_chunk._ts_meta_sequence_num, compress_hyper_2_98_chunk.segment_by_value, compress_hyper_2_98_chunk._ts_meta_min_1, compress_hyper_2_98_chunk._ts_meta_max_1, compress_hyper_2_98_chunk."time", compress_hyper_2_98_chunk.int_value, compress_hyper_2_98_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_89_chunk."time")) + Output: (PARTIAL sum(_hyper_1_89_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_89_chunk Output: _hyper_1_89_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_99_chunk Output: compress_hyper_2_99_chunk._ts_meta_count, compress_hyper_2_99_chunk._ts_meta_sequence_num, compress_hyper_2_99_chunk.segment_by_value, compress_hyper_2_99_chunk._ts_meta_min_1, compress_hyper_2_99_chunk._ts_meta_max_1, compress_hyper_2_99_chunk."time", compress_hyper_2_99_chunk.int_value, compress_hyper_2_99_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_90_chunk."time")) + Output: (PARTIAL sum(_hyper_1_90_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_90_chunk Output: _hyper_1_90_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_100_chunk @@ -2233,67 +2233,67 @@ SELECT sum(int_value) FROM testtable; QUERY PLAN ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_1_81_chunk."time") + Output: sum(_hyper_1_81_chunk.int_value) -> Gather - Output: (PARTIAL sum(_hyper_1_81_chunk."time")) + Output: (PARTIAL sum(_hyper_1_81_chunk.int_value)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_81_chunk."time")) + Output: (PARTIAL sum(_hyper_1_81_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_81_chunk Output: _hyper_1_81_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_91_chunk Output: compress_hyper_2_91_chunk._ts_meta_count, compress_hyper_2_91_chunk._ts_meta_sequence_num, compress_hyper_2_91_chunk.segment_by_value, compress_hyper_2_91_chunk._ts_meta_min_1, compress_hyper_2_91_chunk._ts_meta_max_1, compress_hyper_2_91_chunk."time", compress_hyper_2_91_chunk.int_value, compress_hyper_2_91_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_82_chunk."time")) + Output: (PARTIAL sum(_hyper_1_82_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_82_chunk Output: _hyper_1_82_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_92_chunk Output: compress_hyper_2_92_chunk._ts_meta_count, compress_hyper_2_92_chunk._ts_meta_sequence_num, compress_hyper_2_92_chunk.segment_by_value, compress_hyper_2_92_chunk._ts_meta_min_1, compress_hyper_2_92_chunk._ts_meta_max_1, compress_hyper_2_92_chunk."time", compress_hyper_2_92_chunk.int_value, compress_hyper_2_92_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_83_chunk."time")) + Output: (PARTIAL sum(_hyper_1_83_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_83_chunk Output: _hyper_1_83_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_93_chunk Output: compress_hyper_2_93_chunk._ts_meta_count, compress_hyper_2_93_chunk._ts_meta_sequence_num, compress_hyper_2_93_chunk.segment_by_value, compress_hyper_2_93_chunk._ts_meta_min_1, compress_hyper_2_93_chunk._ts_meta_max_1, compress_hyper_2_93_chunk."time", compress_hyper_2_93_chunk.int_value, compress_hyper_2_93_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_84_chunk."time")) + Output: (PARTIAL sum(_hyper_1_84_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_84_chunk Output: _hyper_1_84_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_94_chunk Output: compress_hyper_2_94_chunk._ts_meta_count, compress_hyper_2_94_chunk._ts_meta_sequence_num, compress_hyper_2_94_chunk.segment_by_value, compress_hyper_2_94_chunk._ts_meta_min_1, compress_hyper_2_94_chunk._ts_meta_max_1, compress_hyper_2_94_chunk."time", compress_hyper_2_94_chunk.int_value, compress_hyper_2_94_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_85_chunk."time")) + Output: (PARTIAL sum(_hyper_1_85_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_85_chunk Output: _hyper_1_85_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_95_chunk Output: compress_hyper_2_95_chunk._ts_meta_count, compress_hyper_2_95_chunk._ts_meta_sequence_num, compress_hyper_2_95_chunk.segment_by_value, compress_hyper_2_95_chunk._ts_meta_min_1, compress_hyper_2_95_chunk._ts_meta_max_1, compress_hyper_2_95_chunk."time", compress_hyper_2_95_chunk.int_value, compress_hyper_2_95_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_86_chunk."time")) + Output: (PARTIAL sum(_hyper_1_86_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_86_chunk Output: _hyper_1_86_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_96_chunk Output: compress_hyper_2_96_chunk._ts_meta_count, compress_hyper_2_96_chunk._ts_meta_sequence_num, compress_hyper_2_96_chunk.segment_by_value, compress_hyper_2_96_chunk._ts_meta_min_1, compress_hyper_2_96_chunk._ts_meta_max_1, compress_hyper_2_96_chunk."time", compress_hyper_2_96_chunk.int_value, compress_hyper_2_96_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_87_chunk."time")) + Output: (PARTIAL sum(_hyper_1_87_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_87_chunk Output: _hyper_1_87_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_97_chunk Output: compress_hyper_2_97_chunk._ts_meta_count, compress_hyper_2_97_chunk._ts_meta_sequence_num, compress_hyper_2_97_chunk.segment_by_value, compress_hyper_2_97_chunk._ts_meta_min_1, compress_hyper_2_97_chunk._ts_meta_max_1, compress_hyper_2_97_chunk."time", compress_hyper_2_97_chunk.int_value, compress_hyper_2_97_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_88_chunk."time")) + Output: (PARTIAL sum(_hyper_1_88_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_88_chunk Output: _hyper_1_88_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_98_chunk Output: compress_hyper_2_98_chunk._ts_meta_count, compress_hyper_2_98_chunk._ts_meta_sequence_num, compress_hyper_2_98_chunk.segment_by_value, compress_hyper_2_98_chunk._ts_meta_min_1, compress_hyper_2_98_chunk._ts_meta_max_1, compress_hyper_2_98_chunk."time", compress_hyper_2_98_chunk.int_value, compress_hyper_2_98_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_89_chunk."time")) + Output: (PARTIAL sum(_hyper_1_89_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_89_chunk Output: _hyper_1_89_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_99_chunk Output: compress_hyper_2_99_chunk._ts_meta_count, compress_hyper_2_99_chunk._ts_meta_sequence_num, compress_hyper_2_99_chunk.segment_by_value, compress_hyper_2_99_chunk._ts_meta_min_1, compress_hyper_2_99_chunk._ts_meta_max_1, compress_hyper_2_99_chunk."time", compress_hyper_2_99_chunk.int_value, compress_hyper_2_99_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_90_chunk."time")) + Output: (PARTIAL sum(_hyper_1_90_chunk.int_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_90_chunk Output: _hyper_1_90_chunk.int_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_100_chunk @@ -2452,67 +2452,67 @@ SELECT sum(segment_by_value1) FROM testtable2; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk."time") + Output: sum(_hyper_3_101_chunk.segment_by_value1) -> Gather - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk @@ -2530,67 +2530,67 @@ SELECT sum(segment_by_value2) FROM testtable2; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk."time") + Output: sum(_hyper_3_101_chunk.segment_by_value2) -> Gather - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value2)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value2 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk @@ -2609,76 +2609,76 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_102_chunk."time") + Output: sum(_hyper_3_102_chunk.segment_by_value1) -> Gather - Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value Filter: (compress_hyper_4_112_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value Filter: (compress_hyper_4_113_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value Filter: (compress_hyper_4_114_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value Filter: (compress_hyper_4_115_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value Filter: (compress_hyper_4_116_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value Filter: (compress_hyper_4_117_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value Filter: (compress_hyper_4_118_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value Filter: (compress_hyper_4_119_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value Filter: (compress_hyper_4_120_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk @@ -2691,76 +2691,76 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND se QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk."time") + Output: sum(_hyper_3_101_chunk.segment_by_value1) -> Gather - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value Filter: ((compress_hyper_4_119_chunk.segment_by_value1 > 0) AND (compress_hyper_4_119_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk @@ -2773,76 +2773,76 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND se QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_101_chunk."time") + Output: sum(_hyper_3_101_chunk.segment_by_value1) -> Gather - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) Workers Planned: 2 -> Parallel Append -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_101_chunk."time")) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_102_chunk."time")) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_103_chunk."time")) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_104_chunk."time")) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_105_chunk."time")) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_106_chunk."time")) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_107_chunk."time")) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_108_chunk."time")) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_109_chunk."time")) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value Filter: ((compress_hyper_4_119_chunk.segment_by_value1 > 0) AND (compress_hyper_4_119_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk."time")) + Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value1 -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk @@ -2860,80 +2860,80 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 1000 AND Finalize Aggregate Output: sum(_hyper_3_101_chunk.segment_by_value1) -> Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_101_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_101_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value Index Cond: (compress_hyper_4_111_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_102_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_102_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value Index Cond: (compress_hyper_4_112_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_103_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_103_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value Index Cond: (compress_hyper_4_113_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_104_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_104_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value Index Cond: (compress_hyper_4_114_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_105_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_105_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value Index Cond: (compress_hyper_4_115_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_106_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_106_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value Index Cond: (compress_hyper_4_116_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_107_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_107_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value Index Cond: (compress_hyper_4_117_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_108_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_108_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value Index Cond: (compress_hyper_4_118_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_109_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_109_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_119_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value Index Cond: (compress_hyper_4_119_chunk.segment_by_value1 > 1000) - -> Partial Aggregate - Output: PARTIAL sum(_hyper_3_110_chunk.segment_by_value1) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_110_chunk.int_value > 1000) From 287f3b4f1a0bf34f0e06666a469292085345ed5c Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 19:04:10 +0100 Subject: [PATCH 26/81] fix ref --- tsl/test/expected/vectorized_aggregation.out | 76 ++++++++++---------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/tsl/test/expected/vectorized_aggregation.out b/tsl/test/expected/vectorized_aggregation.out index 040f4a0a6ce..74c9da5ea39 100644 --- a/tsl/test/expected/vectorized_aggregation.out +++ b/tsl/test/expected/vectorized_aggregation.out @@ -179,24 +179,24 @@ SELECT sum(segment_by_value) FROM testtable WHERE segment_by_value > 0 AND int_v Finalize Aggregate Output: sum(_hyper_1_1_chunk.segment_by_value) -> Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) -> Index Scan using compress_hyper_2_11_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value Index Cond: (compress_hyper_2_11_chunk.segment_by_value > 0) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_2_chunk.segment_by_value) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) -> Index Scan using compress_hyper_2_12_chunk_segment_by_value__ts_meta_sequenc_idx on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value Index Cond: (compress_hyper_2_12_chunk.segment_by_value > 0) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_3_chunk.segment_by_value) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) @@ -250,22 +250,22 @@ SELECT sum(segment_by_value) FROM testtable WHERE int_value > 0; Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value Vectorized Filter: (_hyper_1_1_chunk.int_value > 0) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_2_chunk.segment_by_value) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value Vectorized Filter: (_hyper_1_2_chunk.int_value > 0) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_3_chunk.segment_by_value) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value Vectorized Filter: (_hyper_1_3_chunk.int_value > 0) @@ -318,22 +318,22 @@ SELECT sum(segment_by_value) FROM testtable WHERE float_value > 0; Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.segment_by_value Vectorized Filter: (_hyper_1_1_chunk.float_value > '0'::double precision) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_2_chunk.segment_by_value) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.segment_by_value Vectorized Filter: (_hyper_1_2_chunk.float_value > '0'::double precision) -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_3_chunk.segment_by_value) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.segment_by_value Vectorized Filter: (_hyper_1_3_chunk.float_value > '0'::double precision) @@ -2860,80 +2860,80 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 1000 AND Finalize Aggregate Output: sum(_hyper_3_101_chunk.segment_by_value1) -> Append - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_3_101_chunk.segment_by_value1) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_101_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value Index Cond: (compress_hyper_4_111_chunk.segment_by_value1 > 1000) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_3_102_chunk.segment_by_value1) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_102_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value Index Cond: (compress_hyper_4_112_chunk.segment_by_value1 > 1000) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_3_103_chunk.segment_by_value1) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_103_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value Index Cond: (compress_hyper_4_113_chunk.segment_by_value1 > 1000) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_3_104_chunk.segment_by_value1) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_104_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value Index Cond: (compress_hyper_4_114_chunk.segment_by_value1 > 1000) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_3_105_chunk.segment_by_value1) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_105_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value Index Cond: (compress_hyper_4_115_chunk.segment_by_value1 > 1000) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_3_106_chunk.segment_by_value1) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_106_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value Index Cond: (compress_hyper_4_116_chunk.segment_by_value1 > 1000) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_3_107_chunk.segment_by_value1) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_107_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value Index Cond: (compress_hyper_4_117_chunk.segment_by_value1 > 1000) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_3_108_chunk.segment_by_value1) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_108_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value Index Cond: (compress_hyper_4_118_chunk.segment_by_value1 > 1000) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_3_109_chunk.segment_by_value1) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_109_chunk.int_value > 1000) -> Index Scan using compress_hyper_4_119_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_119_chunk Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value Index Cond: (compress_hyper_4_119_chunk.segment_by_value1 > 1000) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) + -> Partial Aggregate + Output: PARTIAL sum(_hyper_3_110_chunk.segment_by_value1) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk Output: _hyper_3_110_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_110_chunk.int_value > 1000) From 9bdae30f17c7ac485325fccfd076eb0986725546 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 19:14:06 +0100 Subject: [PATCH 27/81] fix for filtered out batches --- tsl/src/nodes/vector_agg/exec.c | 24 +++++++++++++++--------- tsl/test/expected/vector_agg_default.out | 23 +++++++++++++++++++++++ tsl/test/sql/vector_agg_default.sql | 3 +++ 3 files changed, 41 insertions(+), 9 deletions(-) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index 4fa2ebc1a10..4762c2c69ba 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -177,8 +177,6 @@ vector_agg_exec(CustomScanState *vector_agg_state) Assert(IsA(tlentry->expr, Aggref)); Aggref *aggref = castNode(Aggref, tlentry->expr); - my_print(aggref); - /* Partial result is a int8 */ Assert(aggref->aggtranstype == INT8OID); @@ -219,15 +217,23 @@ vector_agg_exec(CustomScanState *vector_agg_state) agg->agg_init(&aggregated_slot->tts_values[0], &aggregated_slot->tts_isnull[0]); ExecClearTuple(aggregated_slot); - TupleTableSlot *compressed_slot = ExecProcNode(linitial(decompress_state->csstate.custom_ps)); - - if (TupIsNull(compressed_slot)) + /* + * Have to skip the batches that are fully filtered out. This condition also + * handles the batch that was consumed on the previous step. + */ + while (batch_state->next_batch_row >= batch_state->total_batch_rows) { - /* All values are processed. */ - return NULL; - } + TupleTableSlot *compressed_slot = + ExecProcNode(linitial(decompress_state->csstate.custom_ps)); - compressed_batch_set_compressed_tuple(dcontext, batch_state, compressed_slot); + if (TupIsNull(compressed_slot)) + { + /* All values are processed. */ + return NULL; + } + + compressed_batch_set_compressed_tuple(dcontext, batch_state, compressed_slot); + } ArrowArray *arrow = NULL; if (value_column_description->type == COMPRESSED_COLUMN) diff --git a/tsl/test/expected/vector_agg_default.out b/tsl/test/expected/vector_agg_default.out index 95659df9f8a..55f5bb4344c 100644 --- a/tsl/test/expected/vector_agg_default.out +++ b/tsl/test/expected/vector_agg_default.out @@ -75,6 +75,29 @@ select sum(c) from t where b in (0, 1, 3); 10782 (1 row) +select sum(c) from t where b > 10; + sum +----- + +(1 row) + +explain (costs off) select sum(c) from t where b in (0, 1, 3); + QUERY PLAN +------------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + Vectorized Filter: (b = ANY ('{0,1,3}'::integer[])) + -> Parallel Seq Scan on compress_hyper_2_2_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + Vectorized Filter: (b = ANY ('{0,1,3}'::integer[])) + -> Parallel Seq Scan on compress_hyper_2_4_chunk +(12 rows) + select decompress_chunk(show_chunks('t')); decompress_chunk ---------------------------------------- diff --git a/tsl/test/sql/vector_agg_default.sql b/tsl/test/sql/vector_agg_default.sql index f0dec294b9c..c5841325c2b 100644 --- a/tsl/test/sql/vector_agg_default.sql +++ b/tsl/test/sql/vector_agg_default.sql @@ -20,6 +20,9 @@ select sum(c) from t where b >= 0; select sum(c) from t where b = 0; select sum(c) from t where b in (0, 1); select sum(c) from t where b in (0, 1, 3); +select sum(c) from t where b > 10; + +explain (costs off) select sum(c) from t where b in (0, 1, 3); select decompress_chunk(show_chunks('t')); select sum(c) from t; From 398f3170746c85f9ed4021e3cab4397167f0c1c7 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 19:19:35 +0100 Subject: [PATCH 28/81] benchmark vectorized agg with filter (2024-03-29 no. 4) From eaca282f881c61775f1c66a659c0883457a98917 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 29 Mar 2024 21:50:47 +0100 Subject: [PATCH 29/81] fix build on windows --- tsl/src/compression/arrow_c_data_interface.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsl/src/compression/arrow_c_data_interface.h b/tsl/src/compression/arrow_c_data_interface.h index 8178881ab47..5a34870dff0 100644 --- a/tsl/src/compression/arrow_c_data_interface.h +++ b/tsl/src/compression/arrow_c_data_interface.h @@ -164,8 +164,8 @@ pad_to_multiple(uint64 pad_to, uint64 source_value) static inline size_t arrow_num_valid(uint64 *bitmap, size_t total_rows) { -#ifdef HAVE__BUILTIN_POPCOUNT uint64 num_valid = 0; +#ifdef HAVE__BUILTIN_POPCOUNT const uint64 words = pad_to_multiple(64, total_rows) / 64; for (uint64 i = 0; i < words; i++) { From fdca7a7e7d979f43c7decb5047cf7aed5c6af3e7 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Tue, 2 Apr 2024 15:31:24 +0200 Subject: [PATCH 30/81] something that doesn't work --- .../nodes/decompress_chunk/compressed_batch.c | 13 +++- tsl/src/nodes/vector_agg/plan.c | 18 ++++- tsl/test/expected/vector_agg_param.out | 67 +++++++++++++++++++ tsl/test/sql/CMakeLists.txt | 8 ++- tsl/test/sql/vector_agg_param.sql | 28 ++++++++ 5 files changed, 127 insertions(+), 7 deletions(-) create mode 100644 tsl/test/expected/vector_agg_param.out create mode 100644 tsl/test/sql/vector_agg_param.sql diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index c7a9041fb98..0be304b56ea 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -574,8 +574,17 @@ compressed_batch_discard_tuples(DecompressBatchState *batch_state) if (batch_state->per_batch_context != NULL) { - ExecClearTuple(batch_state->compressed_slot); - ExecClearTuple(&batch_state->decompressed_scan_slot_data.base); + if (batch_state->compressed_slot != NULL) + { + /* + * The compressed slot is not initialized by the current prototype + * vectorized aggregation code. It's going to be refactored to make + * it more uniform with the normal flow, but for now we have this + * special check here. + */ + ExecClearTuple(batch_state->compressed_slot); + ExecClearTuple(&batch_state->decompressed_scan_slot_data.base); + } MemoryContextReset(batch_state->per_batch_context); } else diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 374852bfa1d..2e50f965208 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -149,14 +149,28 @@ try_insert_vector_agg_node(Plan *plan) plan->righttree = try_insert_vector_agg_node(plan->righttree); } + List *append_plans = NIL; if (IsA(plan, Append)) { - List *plans = castNode(Append, plan)->appendplans; + append_plans = castNode(Append, plan)->appendplans; + } + else if (IsA(plan, CustomScan)) + { + CustomScan *custom = castNode(CustomScan, plan); + if (strcmp("ChunkAppend", custom->methods->CustomName) == 0) + { + append_plans = custom->custom_plans; + } + } + + if (append_plans) + { ListCell *lc; - foreach (lc, plans) + foreach (lc, append_plans) { lfirst(lc) = try_insert_vector_agg_node(lfirst(lc)); } + return plan; } if (plan->type != T_Agg) diff --git a/tsl/test/expected/vector_agg_param.out b/tsl/test/expected/vector_agg_param.out new file mode 100644 index 00000000000..a6286cc4979 --- /dev/null +++ b/tsl/test/expected/vector_agg_param.out @@ -0,0 +1,67 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Test parameterized vector aggregation plans. +create table pvagg(s int, a int); +select create_hypertable('pvagg', 'a', chunk_time_interval => 1000); +NOTICE: adding not-null constraint to column "a" + create_hypertable +-------------------- + (1,public,pvagg,t) +(1 row) + +insert into pvagg select 1, generate_series(1, 999); +insert into pvagg select 2, generate_series(1001, 1999); +alter table pvagg set (timescaledb.compress, timescaledb.compress_segmentby = 's'); +NOTICE: default order by for hypertable "pvagg" is set to "a DESC" +select count(compress_chunk(x)) from show_chunks('pvagg') x; + count +------- + 2 +(1 row) + +analyze pvagg; +explain (costs off) +select x, sum(a) from pvagg, generate_series(1, 2000, 500) x where a < x group by x; + QUERY PLAN +--------------------------------------------------------------------------- + HashAggregate + Group Key: x.x + -> Nested Loop + Join Filter: (_hyper_1_1_chunk.a < x.x) + -> Function Scan on generate_series x + -> Materialize + -> Append + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Seq Scan on compress_hyper_2_3_chunk + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk + -> Seq Scan on compress_hyper_2_4_chunk +(11 rows) + +select x, sum(a) from pvagg, generate_series(1, 2000, 500) x where a < x group by x; + x | sum +------+--------- + 1001 | 499500 + 501 | 125250 + 1501 | 1124750 +(3 rows) + +explain (costs off) +select * from unnest(array[0, 1, 2]::int[]) x, lateral (select sum(a) from pvagg where s = x) xx; + QUERY PLAN +--------------------------------------------------------------------------- + Nested Loop + -> Function Scan on unnest x + -> Finalize Aggregate + -> Custom Scan (ChunkAppend) on pvagg + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Seq Scan on compress_hyper_2_3_chunk + Filter: (s = x.x) + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk + -> Seq Scan on compress_hyper_2_4_chunk + Filter: (s = x.x) +(12 rows) + +drop table pvagg; diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 5a72eef3db2..1b190d27443 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -36,11 +36,13 @@ set(TEST_FILES partialize_finalize.sql policy_generalization.sql reorder.sql - skip_scan.sql size_utils_tsl.sql + skip_scan.sql transparent_decompression_join_index.sql - vectorized_aggregation.sql - vector_agg_default.sql) + + vector_agg_default.sql + vector_agg_param.sql + vectorized_aggregation.sql) if(USE_TELEMETRY) list(APPEND TEST_FILES bgw_telemetry.sql) diff --git a/tsl/test/sql/vector_agg_param.sql b/tsl/test/sql/vector_agg_param.sql new file mode 100644 index 00000000000..491a877556d --- /dev/null +++ b/tsl/test/sql/vector_agg_param.sql @@ -0,0 +1,28 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +-- Test parameterized vector aggregation plans. + + +create table pvagg(s int, a int); + +select create_hypertable('pvagg', 'a', chunk_time_interval => 1000); + +insert into pvagg select 1, generate_series(1, 999); +insert into pvagg select 2, generate_series(1001, 1999); + +alter table pvagg set (timescaledb.compress, timescaledb.compress_segmentby = 's'); + +select count(compress_chunk(x)) from show_chunks('pvagg') x; + +analyze pvagg; + + +explain (costs off) +select * from unnest(array[0, 1, 2]::int[]) x, lateral (select sum(a) from pvagg where s = x) xx; + +select * from unnest(array[0, 1, 2]::int[]) x, lateral (select sum(a) from pvagg where s = x) xx; + + +drop table pvagg; From 60c6eab33361b8675b9a38d3793bcb5c00b7f5e8 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Tue, 2 Apr 2024 15:34:00 +0200 Subject: [PATCH 31/81] fix --- tsl/test/expected/vector_agg_param.out | 33 +++++++------------------- tsl/test/sql/CMakeLists.txt | 5 ++-- 2 files changed, 10 insertions(+), 28 deletions(-) diff --git a/tsl/test/expected/vector_agg_param.out b/tsl/test/expected/vector_agg_param.out index a6286cc4979..b481d9c8a97 100644 --- a/tsl/test/expected/vector_agg_param.out +++ b/tsl/test/expected/vector_agg_param.out @@ -21,31 +21,6 @@ select count(compress_chunk(x)) from show_chunks('pvagg') x; (1 row) analyze pvagg; -explain (costs off) -select x, sum(a) from pvagg, generate_series(1, 2000, 500) x where a < x group by x; - QUERY PLAN ---------------------------------------------------------------------------- - HashAggregate - Group Key: x.x - -> Nested Loop - Join Filter: (_hyper_1_1_chunk.a < x.x) - -> Function Scan on generate_series x - -> Materialize - -> Append - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk - -> Seq Scan on compress_hyper_2_3_chunk - -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk - -> Seq Scan on compress_hyper_2_4_chunk -(11 rows) - -select x, sum(a) from pvagg, generate_series(1, 2000, 500) x where a < x group by x; - x | sum -------+--------- - 1001 | 499500 - 501 | 125250 - 1501 | 1124750 -(3 rows) - explain (costs off) select * from unnest(array[0, 1, 2]::int[]) x, lateral (select sum(a) from pvagg where s = x) xx; QUERY PLAN @@ -64,4 +39,12 @@ select * from unnest(array[0, 1, 2]::int[]) x, lateral (select sum(a) from pvagg Filter: (s = x.x) (12 rows) +select * from unnest(array[0, 1, 2]::int[]) x, lateral (select sum(a) from pvagg where s = x) xx; + x | sum +---+--------- + 0 | + 1 | 499500 + 2 | 1498500 +(3 rows) + drop table pvagg; diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 1b190d27443..b0b8d1bbb8d 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -39,9 +39,8 @@ set(TEST_FILES size_utils_tsl.sql skip_scan.sql transparent_decompression_join_index.sql - - vector_agg_default.sql - vector_agg_param.sql + vector_agg_default.sql + vector_agg_param.sql vectorized_aggregation.sql) if(USE_TELEMETRY) From 7887ebd4c79ed9be0bfc7ceefb0d9fcbb677f1f5 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Tue, 2 Apr 2024 15:34:55 +0200 Subject: [PATCH 32/81] fix --- tsl/src/nodes/decompress_chunk/compressed_batch.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index 0be304b56ea..c7a9041fb98 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -574,17 +574,8 @@ compressed_batch_discard_tuples(DecompressBatchState *batch_state) if (batch_state->per_batch_context != NULL) { - if (batch_state->compressed_slot != NULL) - { - /* - * The compressed slot is not initialized by the current prototype - * vectorized aggregation code. It's going to be refactored to make - * it more uniform with the normal flow, but for now we have this - * special check here. - */ - ExecClearTuple(batch_state->compressed_slot); - ExecClearTuple(&batch_state->decompressed_scan_slot_data.base); - } + ExecClearTuple(batch_state->compressed_slot); + ExecClearTuple(&batch_state->decompressed_scan_slot_data.base); MemoryContextReset(batch_state->per_batch_context); } else From 84b93b7358ea85b89d09a5d819ddf261b1d9bcc2 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 3 Apr 2024 16:16:27 +0200 Subject: [PATCH 33/81] separate file for functions --- tsl/src/nodes/vector_agg/CMakeLists.txt | 5 +- tsl/src/nodes/vector_agg/exec.c | 111 +------------------- tsl/src/nodes/vector_agg/functions.c | 130 ++++++++++++++++++++++++ tsl/src/nodes/vector_agg/functions.h | 27 +++++ tsl/src/nodes/vector_agg/plan.c | 4 +- 5 files changed, 164 insertions(+), 113 deletions(-) create mode 100644 tsl/src/nodes/vector_agg/functions.c create mode 100644 tsl/src/nodes/vector_agg/functions.h diff --git a/tsl/src/nodes/vector_agg/CMakeLists.txt b/tsl/src/nodes/vector_agg/CMakeLists.txt index 22a6b93ef0b..428e589d92e 100644 --- a/tsl/src/nodes/vector_agg/CMakeLists.txt +++ b/tsl/src/nodes/vector_agg/CMakeLists.txt @@ -1,3 +1,4 @@ -set(SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/exec.c - ${CMAKE_CURRENT_SOURCE_DIR}/plan.c) +set(SOURCES + ${CMAKE_CURRENT_SOURCE_DIR}/exec.c ${CMAKE_CURRENT_SOURCE_DIR}/functions.c + ${CMAKE_CURRENT_SOURCE_DIR}/plan.c) target_sources(${TSL_LIBRARY_NAME} PRIVATE ${SOURCES}) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index 4762c2c69ba..eb0045fc096 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -18,6 +18,7 @@ #include "nodes/decompress_chunk/compressed_batch.h" #include "nodes/decompress_chunk/exec.h" #include "guc.h" +#include "functions.h" static void vector_agg_begin(CustomScanState *node, EState *estate, int eflags) @@ -42,114 +43,6 @@ vector_agg_rescan(CustomScanState *node) ExecReScan(linitial(node->custom_ps)); } -typedef struct -{ - void (*agg_init)(Datum *agg_value, bool *agg_isnull); - void (*agg_vector)(ArrowArray *vector, uint64 *filter, Datum *agg_value, bool *agg_isnull); - void (*agg_const)(Datum constvalue, bool constisnull, int n, Datum *agg_value, - bool *agg_isnull); -} VectorAggregate; - -static void -int4_sum_init(Datum *agg_value, bool *agg_isnull) -{ - *agg_value = Int64GetDatum(0); - *agg_isnull = true; -} - -static void -int4_sum_vector(ArrowArray *vector, uint64 *filter, Datum *agg_value, bool *agg_isnull) -{ - Assert(vector != NULL); - Assert(vector->length > 0); - - /* - * We accumulate the sum as int64, so we can sum INT_MAX = 2^31 - 1 - * at least 2^31 times without incurring an overflow of the int64 - * accumulator. The same is true for negative numbers. The - * compressed batch size is currently capped at 1000 rows, but even - * if it's changed in the future, it's unlikely that we support - * batches larger than 65536 rows, not to mention 2^31. Therefore, - * we don't need to check for overflows within the loop, which would - * slow down the calculation. - */ - Assert(vector->length <= INT_MAX); - - int64 batch_sum = 0; - - /* - * This loop is not unrolled automatically, so do it manually as usual. - * The value buffer is padded to an even multiple of 64 bytes, i.e. to - * 64 / 4 = 16 elements. The bitmap is an even multiple of 64 elements. - * The number of elements in the inner loop must be less than both these - * values so that we don't go out of bounds. The particular value was - * chosen because it gives some speedup, and the larger values blow up - * the generated code with no performance benefit (checked on clang 16). - */ -#define INNER_LOOP_SIZE 4 - const int outer_boundary = pad_to_multiple(INNER_LOOP_SIZE, vector->length); - for (int outer = 0; outer < outer_boundary; outer += INNER_LOOP_SIZE) - { - for (int inner = 0; inner < INNER_LOOP_SIZE; inner++) - { - const int row = outer + inner; - const int32 arrow_value = ((int32 *) vector->buffers[1])[row]; - const bool passes_filter = filter ? arrow_row_is_valid(filter, row) : true; - batch_sum += passes_filter * arrow_value * arrow_row_is_valid(vector->buffers[0], row); - } - } -#undef INNER_LOOP_SIZE - - int64 tmp = DatumGetInt64(*agg_value); - if (unlikely(pg_add_s64_overflow(tmp, batch_sum, &tmp))) - { - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); - } - *agg_value = Int64GetDatum(tmp); - - *agg_isnull = false; -} - -static void -int4_sum_const(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool *agg_isnull) -{ - Assert(n > 0); - - if (constisnull) - { - return; - } - - int32 intvalue = DatumGetInt32(constvalue); - int64 batch_sum = 0; - - /* We have at least one value */ - *agg_isnull = false; - - /* Multiply the number of tuples with the actual value */ - if (unlikely(pg_mul_s64_overflow(intvalue, n, &batch_sum))) - { - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); - } - - /* Add the value to our sum */ - int64 tmp = DatumGetInt64(*agg_value); - if (unlikely(pg_add_s64_overflow(tmp, batch_sum, &tmp))) - { - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); - } - *agg_value = Int64GetDatum(tmp); -} - -static VectorAggregate int4_sum_agg = { - .agg_init = int4_sum_init, - .agg_const = int4_sum_const, - .agg_vector = int4_sum_vector, -}; - static TupleTableSlot * vector_agg_exec(CustomScanState *vector_agg_state) { @@ -212,7 +105,7 @@ vector_agg_exec(CustomScanState *vector_agg_state) Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); Assert(aggref->aggfnoid == F_SUM_INT4); - VectorAggregate *agg = &int4_sum_agg; + VectorAggregate *agg = get_vector_aggregate(aggref->aggfnoid); agg->agg_init(&aggregated_slot->tts_values[0], &aggregated_slot->tts_isnull[0]); ExecClearTuple(aggregated_slot); diff --git a/tsl/src/nodes/vector_agg/functions.c b/tsl/src/nodes/vector_agg/functions.c new file mode 100644 index 00000000000..879ce9d4222 --- /dev/null +++ b/tsl/src/nodes/vector_agg/functions.c @@ -0,0 +1,130 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#include + +#include + +#include +#include + +#include "functions.h" + +/* + * Vectorized implementation of int4_sum. + */ + +static void +int4_sum_init(Datum *agg_value, bool *agg_isnull) +{ + *agg_value = Int64GetDatum(0); + *agg_isnull = true; +} + +static void +int4_sum_vector(ArrowArray *vector, uint64 *filter, Datum *agg_value, bool *agg_isnull) +{ + Assert(vector != NULL); + Assert(vector->length > 0); + + /* + * We accumulate the sum as int64, so we can sum INT_MAX = 2^31 - 1 + * at least 2^31 times without incurring an overflow of the int64 + * accumulator. The same is true for negative numbers. The + * compressed batch size is currently capped at 1000 rows, but even + * if it's changed in the future, it's unlikely that we support + * batches larger than 65536 rows, not to mention 2^31. Therefore, + * we don't need to check for overflows within the loop, which would + * slow down the calculation. + */ + Assert(vector->length <= INT_MAX); + + int64 batch_sum = 0; + + /* + * This loop is not unrolled automatically, so do it manually as usual. + * The value buffer is padded to an even multiple of 64 bytes, i.e. to + * 64 / 4 = 16 elements. The bitmap is an even multiple of 64 elements. + * The number of elements in the inner loop must be less than both these + * values so that we don't go out of bounds. The particular value was + * chosen because it gives some speedup, and the larger values blow up + * the generated code with no performance benefit (checked on clang 16). + */ +#define INNER_LOOP_SIZE 4 + const int outer_boundary = pad_to_multiple(INNER_LOOP_SIZE, vector->length); + for (int outer = 0; outer < outer_boundary; outer += INNER_LOOP_SIZE) + { + for (int inner = 0; inner < INNER_LOOP_SIZE; inner++) + { + const int row = outer + inner; + const int32 arrow_value = ((int32 *) vector->buffers[1])[row]; + const bool passes_filter = filter ? arrow_row_is_valid(filter, row) : true; + batch_sum += passes_filter * arrow_value * arrow_row_is_valid(vector->buffers[0], row); + } + } +#undef INNER_LOOP_SIZE + + int64 tmp = DatumGetInt64(*agg_value); + if (unlikely(pg_add_s64_overflow(tmp, batch_sum, &tmp))) + { + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); + } + *agg_value = Int64GetDatum(tmp); + + *agg_isnull = false; +} + +static void +int4_sum_const(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool *agg_isnull) +{ + Assert(n > 0); + + if (constisnull) + { + return; + } + + int32 intvalue = DatumGetInt32(constvalue); + int64 batch_sum = 0; + + /* We have at least one value */ + *agg_isnull = false; + + /* Multiply the number of tuples with the actual value */ + if (unlikely(pg_mul_s64_overflow(intvalue, n, &batch_sum))) + { + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); + } + + /* Add the value to our sum */ + int64 tmp = DatumGetInt64(*agg_value); + if (unlikely(pg_add_s64_overflow(tmp, batch_sum, &tmp))) + { + ereport(ERROR, + (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); + } + *agg_value = Int64GetDatum(tmp); +} + +static VectorAggregate int4_sum_agg = { + .agg_init = int4_sum_init, + .agg_const = int4_sum_const, + .agg_vector = int4_sum_vector, +}; + +VectorAggregate * +get_vector_aggregate(Oid aggfnoid) +{ + switch (aggfnoid) + { + case F_SUM_INT4: + return &int4_sum_agg; + default: + return NULL; + } +} diff --git a/tsl/src/nodes/vector_agg/functions.h b/tsl/src/nodes/vector_agg/functions.h new file mode 100644 index 00000000000..5f31fbf5960 --- /dev/null +++ b/tsl/src/nodes/vector_agg/functions.h @@ -0,0 +1,27 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#pragma once + +#include + +/* + * Function table for a vectorized implementation of an aggregate function. + */ +typedef struct +{ + /* Initialize the aggregate function state pointed to by agg_value and agg_isnull. */ + void (*agg_init)(Datum *agg_value, bool *agg_isnull); + + /* Aggregate a given arrow array. */ + void (*agg_vector)(ArrowArray *vector, uint64 *filter, Datum *agg_value, bool *agg_isnull); + + /* Aggregate a constant (like segmentby or column with default value). */ + void (*agg_const)(Datum constvalue, bool constisnull, int n, Datum *agg_value, + bool *agg_isnull); +} VectorAggregate; + +VectorAggregate *get_vector_aggregate(Oid aggfnoid); diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 2e50f965208..fe5fb219ac2 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -17,6 +17,7 @@ #include "plan.h" #include "exec.h" +#include "functions.h" #include "utils.h" #include "nodes/decompress_chunk/planner.h" @@ -260,9 +261,8 @@ try_insert_vector_agg_node(Plan *plan) return plan; } - if (aggref->aggfnoid != F_SUM_INT4) + if (get_vector_aggregate(aggref->aggfnoid) == NULL) { - /* We only support sum(int4) at the moment. */ return plan; } From cdf0b58ac5864c38eea7cf8773c55b6c79528b13 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 3 Apr 2024 19:33:22 +0200 Subject: [PATCH 34/81] adjust chunk sizes to avoid sorting in parallel append --- tsl/test/expected/vectorized_aggregation.out | 365 ++++++++----------- tsl/test/sql/vectorized_aggregation.sql | 4 +- 2 files changed, 165 insertions(+), 204 deletions(-) diff --git a/tsl/test/expected/vectorized_aggregation.out b/tsl/test/expected/vectorized_aggregation.out index 040f4a0a6ce..87a9df6a150 100644 --- a/tsl/test/expected/vectorized_aggregation.out +++ b/tsl/test/expected/vectorized_aggregation.out @@ -2421,7 +2421,7 @@ value2 AS segment_by_value2, value1 AS int_value, value1 AS float_value FROM -generate_series('1980-01-01 00:00:00-00', '1980-03-01 00:00:00-00', INTERVAL '1 day') AS g1(time), +generate_series('1980-01-03 00:00:00-00', '1980-03-04 00:00:00-00', INTERVAL '1 day') AS g1(time), generate_series(-10, 25, 1) AS g2(value1), generate_series(-30, 20, 1) AS g3(value2) ORDER BY time; @@ -2429,7 +2429,7 @@ ORDER BY time; SELECT sum(segment_by_value1), sum(segment_by_value2) FROM testtable2; sum | sum --------+--------- - 839970 | -559980 + 853740 | -569160 (1 row) SELECT compress_chunk(ch) FROM show_chunks('testtable2') ch; @@ -2444,9 +2444,9 @@ SELECT compress_chunk(ch) FROM show_chunks('testtable2') ch; _timescaledb_internal._hyper_3_107_chunk _timescaledb_internal._hyper_3_108_chunk _timescaledb_internal._hyper_3_109_chunk - _timescaledb_internal._hyper_3_110_chunk -(10 rows) +(9 rows) +ANALYZE testtable2; :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2; QUERY PLAN @@ -2461,68 +2461,62 @@ SELECT sum(segment_by_value1) FROM testtable2; Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_110_chunk + Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk._ts_meta_sequence_num, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: _hyper_3_110_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value -(66 rows) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value +(60 rows) SELECT sum(segment_by_value1) FROM testtable2; sum -------- - 839970 + 853740 (1 row) :EXPLAIN @@ -2539,68 +2533,62 @@ SELECT sum(segment_by_value2) FROM testtable2; Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value2 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_110_chunk + Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk._ts_meta_sequence_num, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value2 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value2 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value2 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value2 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value2 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value2 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value2 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value2)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value2 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value2)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: _hyper_3_110_chunk.segment_by_value2 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value -(66 rows) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value +(60 rows) SELECT sum(segment_by_value2) FROM testtable2; sum --------- - -559980 + -569160 (1 row) -- Vectorization possible - filter on segment_by @@ -2609,82 +2597,75 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0; QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- Finalize Aggregate - Output: sum(_hyper_3_102_chunk.segment_by_value1) + Output: sum(_hyper_3_101_chunk.segment_by_value1) -> Gather - Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) Workers Planned: 2 -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk + Output: _hyper_3_101_chunk.segment_by_value1 + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_110_chunk + Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk._ts_meta_sequence_num, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value + Filter: (compress_hyper_4_110_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Filter: (compress_hyper_4_112_chunk.segment_by_value1 > 0) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + Filter: (compress_hyper_4_111_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Filter: (compress_hyper_4_113_chunk.segment_by_value1 > 0) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + Filter: (compress_hyper_4_112_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Filter: (compress_hyper_4_114_chunk.segment_by_value1 > 0) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + Filter: (compress_hyper_4_113_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Filter: (compress_hyper_4_115_chunk.segment_by_value1 > 0) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + Filter: (compress_hyper_4_114_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Filter: (compress_hyper_4_116_chunk.segment_by_value1 > 0) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + Filter: (compress_hyper_4_115_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Filter: (compress_hyper_4_117_chunk.segment_by_value1 > 0) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + Filter: (compress_hyper_4_116_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Filter: (compress_hyper_4_118_chunk.segment_by_value1 > 0) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + Filter: (compress_hyper_4_117_chunk.segment_by_value1 > 0) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - Filter: (compress_hyper_4_119_chunk.segment_by_value1 > 0) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: _hyper_3_110_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value - Filter: (compress_hyper_4_120_chunk.segment_by_value1 > 0) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk - Output: _hyper_3_101_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Filter: (compress_hyper_4_111_chunk.segment_by_value1 > 0) -(76 rows) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + Filter: (compress_hyper_4_118_chunk.segment_by_value1 > 0) +(69 rows) :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND segment_by_value2 > 0; @@ -2700,73 +2681,66 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND se Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_110_chunk + Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk._ts_meta_sequence_num, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value + Filter: ((compress_hyper_4_110_chunk.segment_by_value1 > 0) AND (compress_hyper_4_110_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - Filter: ((compress_hyper_4_119_chunk.segment_by_value1 > 0) AND (compress_hyper_4_119_chunk.segment_by_value2 > 0)) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: _hyper_3_110_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value - Filter: ((compress_hyper_4_120_chunk.segment_by_value1 > 0) AND (compress_hyper_4_120_chunk.segment_by_value2 > 0)) -(76 rows) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) +(69 rows) :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND segment_by_value2 > 0 AND 2>1; @@ -2782,73 +2756,66 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 0 AND se Output: (PARTIAL sum(_hyper_3_101_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_110_chunk + Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk._ts_meta_sequence_num, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value + Filter: ((compress_hyper_4_110_chunk.segment_by_value1 > 0) AND (compress_hyper_4_110_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + Filter: ((compress_hyper_4_111_chunk.segment_by_value1 > 0) AND (compress_hyper_4_111_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + Filter: ((compress_hyper_4_112_chunk.segment_by_value1 > 0) AND (compress_hyper_4_112_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + Filter: ((compress_hyper_4_113_chunk.segment_by_value1 > 0) AND (compress_hyper_4_113_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + Filter: ((compress_hyper_4_114_chunk.segment_by_value1 > 0) AND (compress_hyper_4_114_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + Filter: ((compress_hyper_4_115_chunk.segment_by_value1 > 0) AND (compress_hyper_4_115_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + Filter: ((compress_hyper_4_116_chunk.segment_by_value1 > 0) AND (compress_hyper_4_116_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + Filter: ((compress_hyper_4_117_chunk.segment_by_value1 > 0) AND (compress_hyper_4_117_chunk.segment_by_value2 > 0)) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - Filter: ((compress_hyper_4_119_chunk.segment_by_value1 > 0) AND (compress_hyper_4_119_chunk.segment_by_value2 > 0)) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: _hyper_3_110_chunk.segment_by_value1 - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value - Filter: ((compress_hyper_4_120_chunk.segment_by_value1 > 0) AND (compress_hyper_4_120_chunk.segment_by_value2 > 0)) -(76 rows) + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + Filter: ((compress_hyper_4_118_chunk.segment_by_value1 > 0) AND (compress_hyper_4_118_chunk.segment_by_value2 > 0)) +(69 rows) -- Vectorization not possible filter on segment_by and compressed value -- Disable parallel worker to get deterministic query plans on i386 @@ -2865,81 +2832,73 @@ SELECT sum(segment_by_value1) FROM testtable2 WHERE segment_by_value1 > 1000 AND -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_101_chunk Output: _hyper_3_101_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_101_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk - Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value - Index Cond: (compress_hyper_4_111_chunk.segment_by_value1 > 1000) + -> Index Scan using compress_hyper_4_110_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_110_chunk + Output: compress_hyper_4_110_chunk._ts_meta_count, compress_hyper_4_110_chunk._ts_meta_sequence_num, compress_hyper_4_110_chunk.segment_by_value1, compress_hyper_4_110_chunk.segment_by_value2, compress_hyper_4_110_chunk._ts_meta_min_1, compress_hyper_4_110_chunk._ts_meta_max_1, compress_hyper_4_110_chunk."time", compress_hyper_4_110_chunk.int_value, compress_hyper_4_110_chunk.float_value + Index Cond: (compress_hyper_4_110_chunk.segment_by_value1 > 1000) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_102_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_102_chunk Output: _hyper_3_102_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_102_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk - Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value - Index Cond: (compress_hyper_4_112_chunk.segment_by_value1 > 1000) + -> Index Scan using compress_hyper_4_111_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_111_chunk + Output: compress_hyper_4_111_chunk._ts_meta_count, compress_hyper_4_111_chunk._ts_meta_sequence_num, compress_hyper_4_111_chunk.segment_by_value1, compress_hyper_4_111_chunk.segment_by_value2, compress_hyper_4_111_chunk._ts_meta_min_1, compress_hyper_4_111_chunk._ts_meta_max_1, compress_hyper_4_111_chunk."time", compress_hyper_4_111_chunk.int_value, compress_hyper_4_111_chunk.float_value + Index Cond: (compress_hyper_4_111_chunk.segment_by_value1 > 1000) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_103_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_103_chunk Output: _hyper_3_103_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_103_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk - Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value - Index Cond: (compress_hyper_4_113_chunk.segment_by_value1 > 1000) + -> Index Scan using compress_hyper_4_112_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_112_chunk + Output: compress_hyper_4_112_chunk._ts_meta_count, compress_hyper_4_112_chunk._ts_meta_sequence_num, compress_hyper_4_112_chunk.segment_by_value1, compress_hyper_4_112_chunk.segment_by_value2, compress_hyper_4_112_chunk._ts_meta_min_1, compress_hyper_4_112_chunk._ts_meta_max_1, compress_hyper_4_112_chunk."time", compress_hyper_4_112_chunk.int_value, compress_hyper_4_112_chunk.float_value + Index Cond: (compress_hyper_4_112_chunk.segment_by_value1 > 1000) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_104_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_104_chunk Output: _hyper_3_104_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_104_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk - Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value - Index Cond: (compress_hyper_4_114_chunk.segment_by_value1 > 1000) + -> Index Scan using compress_hyper_4_113_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_113_chunk + Output: compress_hyper_4_113_chunk._ts_meta_count, compress_hyper_4_113_chunk._ts_meta_sequence_num, compress_hyper_4_113_chunk.segment_by_value1, compress_hyper_4_113_chunk.segment_by_value2, compress_hyper_4_113_chunk._ts_meta_min_1, compress_hyper_4_113_chunk._ts_meta_max_1, compress_hyper_4_113_chunk."time", compress_hyper_4_113_chunk.int_value, compress_hyper_4_113_chunk.float_value + Index Cond: (compress_hyper_4_113_chunk.segment_by_value1 > 1000) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_105_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_105_chunk Output: _hyper_3_105_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_105_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk - Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value - Index Cond: (compress_hyper_4_115_chunk.segment_by_value1 > 1000) + -> Index Scan using compress_hyper_4_114_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_114_chunk + Output: compress_hyper_4_114_chunk._ts_meta_count, compress_hyper_4_114_chunk._ts_meta_sequence_num, compress_hyper_4_114_chunk.segment_by_value1, compress_hyper_4_114_chunk.segment_by_value2, compress_hyper_4_114_chunk._ts_meta_min_1, compress_hyper_4_114_chunk._ts_meta_max_1, compress_hyper_4_114_chunk."time", compress_hyper_4_114_chunk.int_value, compress_hyper_4_114_chunk.float_value + Index Cond: (compress_hyper_4_114_chunk.segment_by_value1 > 1000) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_106_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_106_chunk Output: _hyper_3_106_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_106_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk - Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value - Index Cond: (compress_hyper_4_116_chunk.segment_by_value1 > 1000) + -> Index Scan using compress_hyper_4_115_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_115_chunk + Output: compress_hyper_4_115_chunk._ts_meta_count, compress_hyper_4_115_chunk._ts_meta_sequence_num, compress_hyper_4_115_chunk.segment_by_value1, compress_hyper_4_115_chunk.segment_by_value2, compress_hyper_4_115_chunk._ts_meta_min_1, compress_hyper_4_115_chunk._ts_meta_max_1, compress_hyper_4_115_chunk."time", compress_hyper_4_115_chunk.int_value, compress_hyper_4_115_chunk.float_value + Index Cond: (compress_hyper_4_115_chunk.segment_by_value1 > 1000) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_107_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_107_chunk Output: _hyper_3_107_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_107_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk - Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value - Index Cond: (compress_hyper_4_117_chunk.segment_by_value1 > 1000) + -> Index Scan using compress_hyper_4_116_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_116_chunk + Output: compress_hyper_4_116_chunk._ts_meta_count, compress_hyper_4_116_chunk._ts_meta_sequence_num, compress_hyper_4_116_chunk.segment_by_value1, compress_hyper_4_116_chunk.segment_by_value2, compress_hyper_4_116_chunk._ts_meta_min_1, compress_hyper_4_116_chunk._ts_meta_max_1, compress_hyper_4_116_chunk."time", compress_hyper_4_116_chunk.int_value, compress_hyper_4_116_chunk.float_value + Index Cond: (compress_hyper_4_116_chunk.segment_by_value1 > 1000) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_108_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_108_chunk Output: _hyper_3_108_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_108_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk - Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value - Index Cond: (compress_hyper_4_118_chunk.segment_by_value1 > 1000) + -> Index Scan using compress_hyper_4_117_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_117_chunk + Output: compress_hyper_4_117_chunk._ts_meta_count, compress_hyper_4_117_chunk._ts_meta_sequence_num, compress_hyper_4_117_chunk.segment_by_value1, compress_hyper_4_117_chunk.segment_by_value2, compress_hyper_4_117_chunk._ts_meta_min_1, compress_hyper_4_117_chunk._ts_meta_max_1, compress_hyper_4_117_chunk."time", compress_hyper_4_117_chunk.int_value, compress_hyper_4_117_chunk.float_value + Index Cond: (compress_hyper_4_117_chunk.segment_by_value1 > 1000) -> Custom Scan (VectorAgg) Output: (PARTIAL sum(_hyper_3_109_chunk.segment_by_value1)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_109_chunk Output: _hyper_3_109_chunk.segment_by_value1 Vectorized Filter: (_hyper_3_109_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_119_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_119_chunk - Output: compress_hyper_4_119_chunk._ts_meta_count, compress_hyper_4_119_chunk._ts_meta_sequence_num, compress_hyper_4_119_chunk.segment_by_value1, compress_hyper_4_119_chunk.segment_by_value2, compress_hyper_4_119_chunk._ts_meta_min_1, compress_hyper_4_119_chunk._ts_meta_max_1, compress_hyper_4_119_chunk."time", compress_hyper_4_119_chunk.int_value, compress_hyper_4_119_chunk.float_value - Index Cond: (compress_hyper_4_119_chunk.segment_by_value1 > 1000) - -> Custom Scan (VectorAgg) - Output: (PARTIAL sum(_hyper_3_110_chunk.segment_by_value1)) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_3_110_chunk - Output: _hyper_3_110_chunk.segment_by_value1 - Vectorized Filter: (_hyper_3_110_chunk.int_value > 1000) - -> Index Scan using compress_hyper_4_120_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_120_chunk - Output: compress_hyper_4_120_chunk._ts_meta_count, compress_hyper_4_120_chunk._ts_meta_sequence_num, compress_hyper_4_120_chunk.segment_by_value1, compress_hyper_4_120_chunk.segment_by_value2, compress_hyper_4_120_chunk._ts_meta_min_1, compress_hyper_4_120_chunk._ts_meta_max_1, compress_hyper_4_120_chunk."time", compress_hyper_4_120_chunk.int_value, compress_hyper_4_120_chunk.float_value - Index Cond: (compress_hyper_4_120_chunk.segment_by_value1 > 1000) -(83 rows) + -> Index Scan using compress_hyper_4_118_chunk_segment_by_value1_segment_by_val_idx on _timescaledb_internal.compress_hyper_4_118_chunk + Output: compress_hyper_4_118_chunk._ts_meta_count, compress_hyper_4_118_chunk._ts_meta_sequence_num, compress_hyper_4_118_chunk.segment_by_value1, compress_hyper_4_118_chunk.segment_by_value2, compress_hyper_4_118_chunk._ts_meta_min_1, compress_hyper_4_118_chunk._ts_meta_max_1, compress_hyper_4_118_chunk."time", compress_hyper_4_118_chunk.int_value, compress_hyper_4_118_chunk.float_value + Index Cond: (compress_hyper_4_118_chunk.segment_by_value1 > 1000) +(75 rows) RESET max_parallel_workers_per_gather; diff --git a/tsl/test/sql/vectorized_aggregation.sql b/tsl/test/sql/vectorized_aggregation.sql index b8cc7847f41..6ab98169f87 100644 --- a/tsl/test/sql/vectorized_aggregation.sql +++ b/tsl/test/sql/vectorized_aggregation.sql @@ -349,7 +349,7 @@ value2 AS segment_by_value2, value1 AS int_value, value1 AS float_value FROM -generate_series('1980-01-01 00:00:00-00', '1980-03-01 00:00:00-00', INTERVAL '1 day') AS g1(time), +generate_series('1980-01-03 00:00:00-00', '1980-03-04 00:00:00-00', INTERVAL '1 day') AS g1(time), generate_series(-10, 25, 1) AS g2(value1), generate_series(-30, 20, 1) AS g3(value2) ORDER BY time; @@ -359,6 +359,8 @@ SELECT sum(segment_by_value1), sum(segment_by_value2) FROM testtable2; SELECT compress_chunk(ch) FROM show_chunks('testtable2') ch; +ANALYZE testtable2; + :EXPLAIN SELECT sum(segment_by_value1) FROM testtable2; From bb3a2e279a6a9bbc91e40ffd3ccb49056aa69934 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 4 Apr 2024 13:20:50 +0200 Subject: [PATCH 35/81] compat --- tsl/src/nodes/vector_agg/functions.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tsl/src/nodes/vector_agg/functions.c b/tsl/src/nodes/vector_agg/functions.c index 879ce9d4222..5a2a0a41aa5 100644 --- a/tsl/src/nodes/vector_agg/functions.c +++ b/tsl/src/nodes/vector_agg/functions.c @@ -13,6 +13,8 @@ #include "functions.h" +#include "compat/compat.h" + /* * Vectorized implementation of int4_sum. */ From f5bbfb06a488c47cd41cc4208472301502259bfd Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 4 Apr 2024 14:41:32 +0200 Subject: [PATCH 36/81] benchmark vectorized agg in separate node (2024-04-04 no. 1) From 3b6d263babb077cff1f75e216716a9b75eef11ec Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 4 Apr 2024 17:10:24 +0200 Subject: [PATCH 37/81] benchmark vectorized agg in separate node (2024-04-04 no. 2) From d93ef39e6b7817c51871045b18bd46e17dc5f2a0 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 4 Apr 2024 17:20:02 +0200 Subject: [PATCH 38/81] benchmark vectorized agg in separate node (2024-04-04 no. 3) From 76dc6dceecc2427df4d8a7820f3739050e29ac85 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 4 Apr 2024 19:56:18 +0200 Subject: [PATCH 39/81] fix runtime chunk exclusion with vectorized aggregation --- src/nodes/chunk_append/planner.c | 32 +++++++++++++++++--- tsl/test/expected/decompress_vector_qual.out | 8 ++--- tsl/test/expected/vector_agg_default.out | 15 +++++++++ tsl/test/sql/decompress_vector_qual.sql | 8 ++--- tsl/test/sql/vector_agg_default.sql | 6 ++++ 5 files changed, 57 insertions(+), 12 deletions(-) diff --git a/src/nodes/chunk_append/planner.c b/src/nodes/chunk_append/planner.c index 8d00a47439e..b3ca10e5d15 100644 --- a/src/nodes/chunk_append/planner.c +++ b/src/nodes/chunk_append/planner.c @@ -404,11 +404,35 @@ ts_chunk_append_get_scan_plan(Plan *plan) return (Scan *) plan; break; case T_CustomScan: - if (castNode(CustomScan, plan)->scan.scanrelid > 0) + { + CustomScan *custom = castNode(CustomScan, plan); + if (custom->scan.scanrelid > 0) + { + /* + * The custom plan node is a scan itself. This handles the + * DecompressChunk node. + */ return (Scan *) plan; - else - return NULL; - break; + } + + if (strcmp(custom->methods->CustomName, "VectorAgg") == 0) + { + /* + * This is a vectorized aggregation node, we have to recurse + * into its child, similar to the normal aggregation node. + * + * Unfortunately we have to hardcode the node name here, because + * we can't depend on the TSL library. + */ + return ts_chunk_append_get_scan_plan(linitial(custom->custom_plans)); + } + + /* + * This is some other unknown custom scan node, we can't recurse + * into it. + */ + return NULL; + } case T_Agg: if (plan->lefttree != NULL) { diff --git a/tsl/test/expected/decompress_vector_qual.out b/tsl/test/expected/decompress_vector_qual.out index 8e77da74fc6..d62b2d4f935 100644 --- a/tsl/test/expected/decompress_vector_qual.out +++ b/tsl/test/expected/decompress_vector_qual.out @@ -2,7 +2,7 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. \c :TEST_DBNAME :ROLE_SUPERUSER -create function stable_identity(x anyelement) returns anyelement as $$ select x $$ language sql stable; +create function stable_abs(x int4) returns int4 as 'int4abs' language internal stable; create table vectorqual(metric1 int8, ts timestamp, metric2 int8, device int8); select create_hypertable('vectorqual', 'ts'); WARNING: column type "timestamp without time zone" used for "ts" does not follow best practices @@ -206,7 +206,7 @@ execute p(33); deallocate p; -- Also try query parameter in combination with a stable function. -prepare p(int4) as select count(*) from vectorqual where metric3 = stable_identity($1); +prepare p(int4) as select count(*) from vectorqual where metric3 = stable_abs($1); execute p(33); count ------- @@ -387,7 +387,7 @@ select count(*) from vectorqual where metric3 !!! 666 and (metric3 !!! 777 or me 5 (1 row) -select count(*) from vectorqual where metric3 !!! 666 and (metric3 !!! 777 or metric3 !!! stable_identity(888)); +select count(*) from vectorqual where metric3 !!! 666 and (metric3 !!! 777 or metric3 !!! stable_abs(888)); count ------- 5 @@ -465,7 +465,7 @@ select count(*) from vectorqual where metric3 = 777 or metric4 is not null; 4 (1 row) -select count(*) from vectorqual where metric3 = stable_identity(777) or metric4 is null; +select count(*) from vectorqual where metric3 = stable_abs(777) or metric4 is null; count ------- 3 diff --git a/tsl/test/expected/vector_agg_default.out b/tsl/test/expected/vector_agg_default.out index 55f5bb4344c..0ab244037b8 100644 --- a/tsl/test/expected/vector_agg_default.out +++ b/tsl/test/expected/vector_agg_default.out @@ -1,6 +1,8 @@ -- This file and its contents are licensed under the Timescale License. -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +create function stable_abs(x int4) returns int4 as 'int4abs' language internal stable; create table t(a int, b int); select create_hypertable('t', 'a', chunk_time_interval => 1000); NOTICE: adding not-null constraint to column "a" @@ -98,6 +100,19 @@ explain (costs off) select sum(c) from t where b in (0, 1, 3); -> Parallel Seq Scan on compress_hyper_2_4_chunk (12 rows) +-- The runtime chunk exclusion should work. +explain (costs off) select sum(c) from t where a < stable_abs(1000); + QUERY PLAN +--------------------------------------------------------------------- + Finalize Aggregate + -> Custom Scan (ChunkAppend) on t + Chunks excluded during startup: 1 + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + Vectorized Filter: (a < stable_abs(1000)) + -> Seq Scan on compress_hyper_2_2_chunk +(7 rows) + select decompress_chunk(show_chunks('t')); decompress_chunk ---------------------------------------- diff --git a/tsl/test/sql/decompress_vector_qual.sql b/tsl/test/sql/decompress_vector_qual.sql index 074c546282e..1ec27e2979f 100644 --- a/tsl/test/sql/decompress_vector_qual.sql +++ b/tsl/test/sql/decompress_vector_qual.sql @@ -4,7 +4,7 @@ \c :TEST_DBNAME :ROLE_SUPERUSER -create function stable_identity(x anyelement) returns anyelement as $$ select x $$ language sql stable; +create function stable_abs(x int4) returns int4 as 'int4abs' language internal stable; create table vectorqual(metric1 int8, ts timestamp, metric2 int8, device int8); select create_hypertable('vectorqual', 'ts'); @@ -89,7 +89,7 @@ execute p(33); deallocate p; -- Also try query parameter in combination with a stable function. -prepare p(int4) as select count(*) from vectorqual where metric3 = stable_identity($1); +prepare p(int4) as select count(*) from vectorqual where metric3 = stable_abs($1); execute p(33); deallocate p; @@ -164,7 +164,7 @@ select count(*) from vectorqual where metric3 !!! 777; select count(*) from vectorqual where metric3 !!! any(array[777, 888]); select count(*) from vectorqual where metric3 !!! 777 or metric3 !!! 888; select count(*) from vectorqual where metric3 !!! 666 and (metric3 !!! 777 or metric3 !!! 888); -select count(*) from vectorqual where metric3 !!! 666 and (metric3 !!! 777 or metric3 !!! stable_identity(888)); +select count(*) from vectorqual where metric3 !!! 666 and (metric3 !!! 777 or metric3 !!! stable_abs(888)); set timescaledb.debug_require_vector_qual to 'forbid'; select count(*) from vectorqual where not metric3 !!! 777; @@ -187,7 +187,7 @@ set timescaledb.debug_require_vector_qual to 'only'; select count(*) from vectorqual where metric4 is null; select count(*) from vectorqual where metric4 is not null; select count(*) from vectorqual where metric3 = 777 or metric4 is not null; -select count(*) from vectorqual where metric3 = stable_identity(777) or metric4 is null; +select count(*) from vectorqual where metric3 = stable_abs(777) or metric4 is null; -- Can't vectorize conditions on system columns. Have to check this on a single diff --git a/tsl/test/sql/vector_agg_default.sql b/tsl/test/sql/vector_agg_default.sql index c5841325c2b..628d24cbf2f 100644 --- a/tsl/test/sql/vector_agg_default.sql +++ b/tsl/test/sql/vector_agg_default.sql @@ -2,6 +2,9 @@ -- Please see the included NOTICE for copyright information and -- LICENSE-TIMESCALE for a copy of the license. +\c :TEST_DBNAME :ROLE_SUPERUSER +create function stable_abs(x int4) returns int4 as 'int4abs' language internal stable; + create table t(a int, b int); select create_hypertable('t', 'a', chunk_time_interval => 1000); @@ -24,6 +27,9 @@ select sum(c) from t where b > 10; explain (costs off) select sum(c) from t where b in (0, 1, 3); +-- The runtime chunk exclusion should work. +explain (costs off) select sum(c) from t where a < stable_abs(1000); + select decompress_chunk(show_chunks('t')); select sum(c) from t; From cd3e862b0e47fe80e3f219e3a0d5bc1f7817f941 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 4 Apr 2024 21:41:54 +0200 Subject: [PATCH 40/81] Don't copy compressed slot to compressed batch struct There is overhead associated with copying the heap tuple and (un)pinning the respective heap buffers, which becomes apparent in vectorized aggregation. --- .../nodes/decompress_chunk/compressed_batch.c | 149 +++++++++++------- .../nodes/decompress_chunk/compressed_batch.h | 6 - .../decompress_chunk/decompress_context.h | 15 -- 3 files changed, 89 insertions(+), 81 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index 0eb9be2e035..63f778d8336 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -156,7 +156,8 @@ get_max_text_datum_size(ArrowArray *text_array) } static void -decompress_column(DecompressContext *dcontext, DecompressBatchState *batch_state, int i) +decompress_column(DecompressContext *dcontext, DecompressBatchState *batch_state, int i, + TupleTableSlot *compressed_slot) { CompressionColumnDescription *column_description = &dcontext->template_columns[i]; CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; @@ -168,9 +169,7 @@ decompress_column(DecompressContext *dcontext, DecompressBatchState *batch_state Assert(value_bytes != 0); bool isnull; - Datum value = slot_getattr(batch_state->compressed_slot, - column_description->compressed_scan_attno, - &isnull); + Datum value = slot_getattr(compressed_slot, column_description->compressed_scan_attno, &isnull); if (isnull) { @@ -330,8 +329,8 @@ translate_bitmap_from_dictionary(const ArrowArray *arrow, const uint64 *dict_res } static void -compute_plain_qual(DecompressContext *dcontext, DecompressBatchState *batch_state, Node *qual, - uint64 *restrict result) +compute_plain_qual(DecompressContext *dcontext, DecompressBatchState *batch_state, + TupleTableSlot *compressed_slot, Node *qual, uint64 *restrict result) { /* * Some predicates can be evaluated to a Const at run time. @@ -423,7 +422,7 @@ compute_plain_qual(DecompressContext *dcontext, DecompressBatchState *batch_stat * skip decompressing some columns if the entire batch doesn't pass * the quals. */ - decompress_column(dcontext, batch_state, column_index); + decompress_column(dcontext, batch_state, column_index, compressed_slot); Assert(column_values->decompression_type != DT_Invalid); } @@ -566,16 +565,16 @@ compute_plain_qual(DecompressContext *dcontext, DecompressBatchState *batch_stat } static void compute_one_qual(DecompressContext *dcontext, DecompressBatchState *batch_state, - Node *qual, uint64 *restrict result); + TupleTableSlot *compressed_slot, Node *qual, uint64 *restrict result); static void compute_qual_conjunction(DecompressContext *dcontext, DecompressBatchState *batch_state, - List *quals, uint64 *restrict result) + TupleTableSlot *compressed_slot, List *quals, uint64 *restrict result) { ListCell *lc; foreach (lc, quals) { - compute_one_qual(dcontext, batch_state, lfirst(lc), result); + compute_one_qual(dcontext, batch_state, compressed_slot, lfirst(lc), result); if (get_vector_qual_summary(result, batch_state->total_batch_rows) == NoRowsPass) { /* @@ -589,7 +588,7 @@ compute_qual_conjunction(DecompressContext *dcontext, DecompressBatchState *batc static void compute_qual_disjunction(DecompressContext *dcontext, DecompressBatchState *batch_state, - List *quals, uint64 *restrict result) + TupleTableSlot *compressed_slot, List *quals, uint64 *restrict result) { const size_t n_rows = batch_state->total_batch_rows; const size_t n_result_words = (n_rows + 63) / 64; @@ -608,7 +607,7 @@ compute_qual_disjunction(DecompressContext *dcontext, DecompressBatchState *batc { one_qual_result[i] = (uint64) -1; } - compute_one_qual(dcontext, batch_state, lfirst(lc), one_qual_result); + compute_one_qual(dcontext, batch_state, compressed_slot, lfirst(lc), one_qual_result); for (size_t i = 0; i < n_result_words; i++) { or_result[i] |= one_qual_result[i]; @@ -631,19 +630,19 @@ compute_qual_disjunction(DecompressContext *dcontext, DecompressBatchState *batc } static void -compute_one_qual(DecompressContext *dcontext, DecompressBatchState *batch_state, Node *qual, - uint64 *restrict result) +compute_one_qual(DecompressContext *dcontext, DecompressBatchState *batch_state, + TupleTableSlot *compressed_slot, Node *qual, uint64 *restrict result) { if (!IsA(qual, BoolExpr)) { - compute_plain_qual(dcontext, batch_state, qual, result); + compute_plain_qual(dcontext, batch_state, compressed_slot, qual, result); return; } BoolExpr *boolexpr = castNode(BoolExpr, qual); if (boolexpr->boolop == AND_EXPR) { - compute_qual_conjunction(dcontext, batch_state, boolexpr->args, result); + compute_qual_conjunction(dcontext, batch_state, compressed_slot, boolexpr->args, result); return; } @@ -652,7 +651,7 @@ compute_one_qual(DecompressContext *dcontext, DecompressBatchState *batch_state, * NOT and consider it non-vectorizable at planning time. So only OR is left. */ Ensure(boolexpr->boolop == OR_EXPR, "expected OR"); - compute_qual_disjunction(dcontext, batch_state, boolexpr->args, result); + compute_qual_disjunction(dcontext, batch_state, compressed_slot, boolexpr->args, result); } /* @@ -661,7 +660,8 @@ compute_one_qual(DecompressContext *dcontext, DecompressBatchState *batch_state, * optimizations. */ static VectorQualSummary -compute_vector_quals(DecompressContext *dcontext, DecompressBatchState *batch_state) +compute_vector_quals(DecompressContext *dcontext, DecompressBatchState *batch_state, + TupleTableSlot *compressed_slot) { /* * Allocate the bitmap that will hold the vectorized qual results. We will @@ -688,6 +688,7 @@ compute_vector_quals(DecompressContext *dcontext, DecompressBatchState *batch_st */ compute_qual_conjunction(dcontext, batch_state, + compressed_slot, dcontext->vectorized_quals_constified, batch_state->vector_qual_result); @@ -709,7 +710,6 @@ compressed_batch_discard_tuples(DecompressBatchState *batch_state) if (batch_state->per_batch_context != NULL) { - ExecClearTuple(batch_state->compressed_slot); ExecClearTuple(&batch_state->decompressed_scan_slot_data.base); MemoryContextReset(batch_state->per_batch_context); } @@ -720,7 +720,6 @@ compressed_batch_discard_tuples(DecompressBatchState *batch_state) */ Assert(IsA(&batch_state->decompressed_scan_slot_data, Invalid)); Assert(batch_state->decompressed_scan_slot_data.base.tts_ops == NULL); - Assert(batch_state->compressed_slot == NULL); } } @@ -730,24 +729,12 @@ compressed_batch_discard_tuples(DecompressBatchState *batch_state) * relatively expensive. */ static void -compressed_batch_lazy_init(DecompressContext *dcontext, DecompressBatchState *batch_state, - TupleTableSlot *compressed_slot) +compressed_batch_lazy_init(DecompressContext *dcontext, DecompressBatchState *batch_state) { /* Init memory context */ batch_state->per_batch_context = create_per_batch_mctx(dcontext); Assert(batch_state->per_batch_context != NULL); - Assert(batch_state->compressed_slot == NULL); - - /* Create a non ref-counted copy of the compressed tuple descriptor */ - if (dcontext->compressed_slot_tdesc == NULL) - dcontext->compressed_slot_tdesc = - CreateTupleDescCopyConstr(compressed_slot->tts_tupleDescriptor); - Assert(dcontext->compressed_slot_tdesc->tdrefcount == -1); - - batch_state->compressed_slot = - MakeSingleTupleTableSlot(dcontext->compressed_slot_tdesc, compressed_slot->tts_ops); - /* Get a reference to the output TupleTableSlot */ TupleTableSlot *decompressed_slot = dcontext->decompressed_slot; @@ -771,11 +758,19 @@ compressed_batch_lazy_init(DecompressContext *dcontext, DecompressBatchState *ba slot->tts_mcxt = CurrentMemoryContext; slot->tts_nvalid = 0; - slot->tts_values = palloc(MAXALIGN(slot->tts_tupleDescriptor->natts * sizeof(Datum)) + - MAXALIGN(slot->tts_tupleDescriptor->natts * sizeof(bool))); + slot->tts_values = palloc0(MAXALIGN(slot->tts_tupleDescriptor->natts * sizeof(Datum)) + + MAXALIGN(slot->tts_tupleDescriptor->natts * sizeof(bool))); slot->tts_isnull = (bool *) ((char *) slot->tts_values) + MAXALIGN(slot->tts_tupleDescriptor->natts * sizeof(Datum)); + /* + * Have to initially set nulls to true, because this is the uncompressed chunk + * tuple, and some of its columns might be not even decompressed. The tuple + * slot functions will get confused by them, because they expect a non-null + * value for attributes not marked as null. + */ + memset(slot->tts_isnull, true, slot->tts_tupleDescriptor->natts * sizeof(bool)); + /* * DecompressChunk produces virtual tuple slots. */ @@ -788,7 +783,8 @@ compressed_batch_lazy_init(DecompressContext *dcontext, DecompressBatchState *ba */ void compressed_batch_set_compressed_tuple(DecompressContext *dcontext, - DecompressBatchState *batch_state, TupleTableSlot *subslot) + DecompressBatchState *batch_state, + TupleTableSlot *compressed_slot) { Assert(TupIsNull(compressed_batch_current_tuple(batch_state))); @@ -798,23 +794,10 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, */ if (batch_state->per_batch_context == NULL) { - compressed_batch_lazy_init(dcontext, batch_state, subslot); - } - else - { - Assert(batch_state->compressed_slot != NULL); + compressed_batch_lazy_init(dcontext, batch_state); } - - /* Ensure that all fields are empty. Calling ExecClearTuple is not enough - * because some attributes might not be populated (e.g., due to a dropped - * column) and these attributes need to be set to null. */ TupleTableSlot *decompressed_tuple = compressed_batch_current_tuple(batch_state); Assert(decompressed_tuple != NULL); - ExecStoreAllNullTuple(decompressed_tuple); - ExecClearTuple(decompressed_tuple); - - ExecCopySlot(batch_state->compressed_slot, subslot); - Assert(!TupIsNull(batch_state->compressed_slot)); batch_state->total_batch_rows = 0; batch_state->next_batch_row = 0; @@ -849,15 +832,33 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, */ AttrNumber attr = AttrNumberGetAttrOffset(column_description->output_attno); decompressed_tuple->tts_values[attr] = - slot_getattr(batch_state->compressed_slot, + slot_getattr(compressed_slot, column_description->compressed_scan_attno, &decompressed_tuple->tts_isnull[attr]); + + // fprintf(stderr, "segmentby column [%d]: value %p, null %d\n", + // attr, (void*) decompressed_tuple->tts_values[attr], + // decompressed_tuple->tts_isnull[attr]); + + /* + * Note that if it's not a by-value type, we should copy it into + * the slot context. + */ + if (!get_typbyval(column_description->typid) && + DatumGetPointer(decompressed_tuple->tts_values[attr]) != NULL) + { + MemoryContext old = MemoryContextSwitchTo(decompressed_tuple->tts_mcxt); + decompressed_tuple->tts_values[attr] = PointerGetDatum(pg_detoast_datum_copy( + (struct varlena *) decompressed_tuple->tts_values[attr])); + MemoryContextSwitchTo(old); + } + break; } case COUNT_COLUMN: { bool isnull; - Datum value = slot_getattr(batch_state->compressed_slot, + Datum value = slot_getattr(compressed_slot, column_description->compressed_scan_attno, &isnull); /* count column should never be NULL */ @@ -885,9 +886,10 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, } } - VectorQualSummary vector_qual_summary = dcontext->vectorized_quals_constified != NIL ? - compute_vector_quals(dcontext, batch_state) : - AllRowsPass; + VectorQualSummary vector_qual_summary = + dcontext->vectorized_quals_constified != NIL ? + compute_vector_quals(dcontext, batch_state, compressed_slot) : + AllRowsPass; if (vector_qual_summary == NoRowsPass && !dcontext->batch_sorted_merge) { /* @@ -917,7 +919,7 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; if (column_values->decompression_type == DT_Invalid) { - decompress_column(dcontext, batch_state, i); + decompress_column(dcontext, batch_state, i, compressed_slot); Assert(column_values->decompression_type != DT_Invalid); } } @@ -931,6 +933,9 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, batch_state->vector_qual_result = NULL; } } + + /* FIXME this is for testing, better done by the caller. */ + ExecClearTuple(compressed_slot); } static void @@ -960,6 +965,8 @@ make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_com Assert(batch_state->total_batch_rows > 0); Assert(batch_state->next_batch_row < batch_state->total_batch_rows); + // fprintf(stderr, "make next tuple [%d]\n", batch_state->next_batch_row); + for (int i = 0; i < num_compressed_columns; i++) { CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; @@ -975,6 +982,10 @@ make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_com *column_values->output_isnull = result.is_null; *column_values->output_value = result.val; + + // fprintf(stderr, "iterator column #%d: value %p, null %d\n", + // i, (void*) *column_values->output_value, + // *column_values->output_isnull); } else if (column_values->decompression_type > SIZEOF_DATUM) { @@ -989,6 +1000,10 @@ make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_com *column_values->output_value = PointerGetDatum(&src[value_bytes * arrow_row]); *column_values->output_isnull = !arrow_row_is_valid(column_values->buffers[0], arrow_row); + + // fprintf(stderr, "by-ref column #%d: value %p, null %d\n", + // i, (void*) *column_values->output_value, + // *column_values->output_isnull); } else if (column_values->decompression_type > 0) { @@ -1006,12 +1021,20 @@ make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_com memcpy(column_values->output_value, &src[value_bytes * arrow_row], SIZEOF_DATUM); *column_values->output_isnull = !arrow_row_is_valid(column_values->buffers[0], arrow_row); + + // fprintf(stderr, "by-val column #%d: value %p, null %d\n", + // i, (void*) *column_values->output_value, + // *column_values->output_isnull); } else if (column_values->decompression_type == DT_ArrowText) { store_text_datum(column_values, arrow_row); *column_values->output_isnull = !arrow_row_is_valid(column_values->buffers[0], arrow_row); + + // fprintf(stderr, "arrow text column #%d: value %p, null %d\n", + // i, (void*) *column_values->output_value, + // *column_values->output_isnull); } else if (column_values->decompression_type == DT_ArrowTextDict) { @@ -1019,11 +1042,19 @@ make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_com store_text_datum(column_values, index); *column_values->output_isnull = !arrow_row_is_valid(column_values->buffers[0], arrow_row); + + // fprintf(stderr, "arrow text dict column #%d: value %p, null %d\n", + // i, (void*) *column_values->output_value, + // *column_values->output_isnull); } else { /* A compressed column with default value, do nothing. */ Assert(column_values->decompression_type == DT_Default); + + // fprintf(stderr, "default column #%d: value %p, null %d\n", + // i, (void*) *column_values->output_value, + // *column_values->output_isnull); } } @@ -1225,16 +1256,14 @@ compressed_batch_destroy(DecompressBatchState *batch_state) batch_state->per_batch_context = NULL; } - if (batch_state->compressed_slot != NULL) + if (batch_state->decompressed_scan_slot_data.base.tts_values != NULL) { /* * Can be separately NULL in the current simplified prototype for * vectorized aggregation, but ideally it should change together with * per-batch context. */ - ExecDropSingleTupleTableSlot(batch_state->compressed_slot); - batch_state->compressed_slot = NULL; - pfree(batch_state->decompressed_scan_slot_data.base.tts_values); + batch_state->decompressed_scan_slot_data.base.tts_values = NULL; } } diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.h b/tsl/src/nodes/decompress_chunk/compressed_batch.h index bbde12a7119..96bd5721d24 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.h +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.h @@ -82,12 +82,6 @@ typedef struct DecompressBatchState */ VirtualTupleTableSlot decompressed_scan_slot_data; - /* - * Compressed target slot. We have to keep a local copy when doing batch - * sorted merge, because the segmentby column values might reference the - * original tuple, and a batch outlives its source tuple. - */ - TupleTableSlot *compressed_slot; uint16 total_batch_rows; uint16 next_batch_row; MemoryContext per_batch_context; diff --git a/tsl/src/nodes/decompress_chunk/decompress_context.h b/tsl/src/nodes/decompress_chunk/decompress_context.h index 7670163b0e2..7d16e5ac069 100644 --- a/tsl/src/nodes/decompress_chunk/decompress_context.h +++ b/tsl/src/nodes/decompress_chunk/decompress_context.h @@ -63,21 +63,6 @@ typedef struct DecompressContext TupleTableSlot *decompressed_slot; - /* - * Make non-refcounted copies of the tupdesc for reuse across all batch states - * and avoid spending CPU in ResourceOwner when creating a big number of table - * slots. This happens because each new slot pins its tuple descriptor using - * PinTupleDesc, and for reference-counting tuples this involves adding a new - * reference to ResourceOwner, which is not very efficient for a large number of - * references. - * - * We don't have to do this for the decompressed slot tuple descriptor, - * because there we use custom tuple slot (de)initialization functions, which - * don't use reference counting and just use a raw pointer to the tuple - * descriptor. - */ - TupleDesc compressed_slot_tdesc; - PlanState *ps; /* Set for filtering and instrumentation */ Detoaster detoaster; From 6693d9ff58ce4d1d8b8b6a5349bc4e516c9c2fd2 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 4 Apr 2024 21:52:36 +0200 Subject: [PATCH 41/81] Always copy out when detoasting as well --- tsl/src/compression/compression.c | 8 ++++---- .../nodes/decompress_chunk/compressed_batch.c | 15 +++++++------- tsl/src/nodes/decompress_chunk/detoaster.c | 20 ++++++++++++++----- tsl/src/nodes/decompress_chunk/detoaster.h | 4 ++-- tsl/src/nodes/decompress_chunk/exec.c | 9 ++++----- 5 files changed, 33 insertions(+), 23 deletions(-) diff --git a/tsl/src/compression/compression.c b/tsl/src/compression/compression.c index 86e09ac00fb..920bbbb276b 100644 --- a/tsl/src/compression/compression.c +++ b/tsl/src/compression/compression.c @@ -1652,10 +1652,10 @@ decompress_batch(RowDecompressor *decompressor) /* Normal compressed column. */ Datum compressed_datum = PointerGetDatum( - detoaster_detoast_attr((struct varlena *) DatumGetPointer( - decompressor->compressed_datums[input_column]), - &decompressor->detoaster, - CurrentMemoryContext)); + detoaster_detoast_attr_copy((struct varlena *) DatumGetPointer( + decompressor->compressed_datums[input_column]), + &decompressor->detoaster, + CurrentMemoryContext)); CompressedDataHeader *header = get_compressed_data_header(compressed_datum); column_info->iterator = definitions[header->compression_algorithm] diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index 63f778d8336..ecff996dc93 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -187,9 +187,9 @@ decompress_column(DecompressContext *dcontext, DecompressBatchState *batch_state } /* Detoast the compressed datum. */ - value = PointerGetDatum(detoaster_detoast_attr((struct varlena *) DatumGetPointer(value), - &dcontext->detoaster, - batch_state->per_batch_context)); + value = PointerGetDatum(detoaster_detoast_attr_copy((struct varlena *) DatumGetPointer(value), + &dcontext->detoaster, + batch_state->per_batch_context)); /* Decompress the entire batch if it is supported. */ CompressedDataHeader *header = (CompressedDataHeader *) value; @@ -847,10 +847,11 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, if (!get_typbyval(column_description->typid) && DatumGetPointer(decompressed_tuple->tts_values[attr]) != NULL) { - MemoryContext old = MemoryContextSwitchTo(decompressed_tuple->tts_mcxt); - decompressed_tuple->tts_values[attr] = PointerGetDatum(pg_detoast_datum_copy( - (struct varlena *) decompressed_tuple->tts_values[attr])); - MemoryContextSwitchTo(old); + decompressed_tuple->tts_values[attr] = PointerGetDatum( + detoaster_detoast_attr_copy((struct varlena *) + decompressed_tuple->tts_values[attr], + &dcontext->detoaster, + batch_state->per_batch_context)); } break; diff --git a/tsl/src/nodes/decompress_chunk/detoaster.c b/tsl/src/nodes/decompress_chunk/detoaster.c index b04d3369937..981998ce362 100644 --- a/tsl/src/nodes/decompress_chunk/detoaster.c +++ b/tsl/src/nodes/decompress_chunk/detoaster.c @@ -217,7 +217,7 @@ ts_fetch_toast(Detoaster *detoaster, struct varatt_external *toast_pointer, stru /* * The memory context is used to store intermediate data, and is supposed to - * live over the calls to detoaster_detoast_attr(). + * live over the calls to detoaster_detoast_attr_copy(). * That function itself can be called in a short-lived memory context. */ void @@ -338,15 +338,25 @@ ts_toast_decompress_datum(struct varlena *attr) /* * Modification of Postgres' detoast_attr() where we use the stateful Detoaster - * and skip some cases that don't occur for the toasted compressed data. + * and skip some cases that don't occur for the toasted compressed data. Even if + * the data is inline and no detoasting is needed, copies it into the destination + * memory context. */ struct varlena * -detoaster_detoast_attr(struct varlena *attr, Detoaster *detoaster, MemoryContext dest_mctx) +detoaster_detoast_attr_copy(struct varlena *attr, Detoaster *detoaster, MemoryContext dest_mctx) { if (!VARATT_IS_EXTENDED(attr)) { - /* Nothing to do here. */ - return attr; + /* + * This case is unlikely because the compressed data is almost always + * toasted and not inline, but we still have to copy the data into the + * destination memory context. The source compressed tuple may have + * independent unknown lifetime. + */ + Size len = VARSIZE(attr); + struct varlena *result = (struct varlena *) MemoryContextAlloc(dest_mctx, len); + memcpy(result, attr, len); + return result; } if (VARATT_IS_EXTERNAL_ONDISK(attr)) diff --git a/tsl/src/nodes/decompress_chunk/detoaster.h b/tsl/src/nodes/decompress_chunk/detoaster.h index b27ff6ffa5f..90c78d54590 100644 --- a/tsl/src/nodes/decompress_chunk/detoaster.h +++ b/tsl/src/nodes/decompress_chunk/detoaster.h @@ -27,5 +27,5 @@ typedef struct Detoaster void detoaster_init(Detoaster *detoaster, MemoryContext mctx); void detoaster_close(Detoaster *detoaster); -struct varlena *detoaster_detoast_attr(struct varlena *attr, Detoaster *detoaster, - MemoryContext dest_mctx); +struct varlena *detoaster_detoast_attr_copy(struct varlena *attr, Detoaster *detoaster, + MemoryContext dest_mctx); diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index 238b52f927b..09ffa2ce492 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -543,11 +543,10 @@ perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref) /* We have at least one value */ decompressed_scan_slot->tts_isnull[0] = false; - CompressedDataHeader *header = - (CompressedDataHeader *) detoaster_detoast_attr((struct varlena *) DatumGetPointer( - value), - &dcontext->detoaster, - CurrentMemoryContext); + CompressedDataHeader *header = (CompressedDataHeader *) + detoaster_detoast_attr_copy((struct varlena *) DatumGetPointer(value), + &dcontext->detoaster, + CurrentMemoryContext); ArrowArray *arrow = NULL; From 991711a1121b23a10113abb353fc52170cf404ca Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 4 Apr 2024 22:22:51 +0200 Subject: [PATCH 42/81] cache the byvalue flag --- tsl/src/nodes/decompress_chunk/compressed_batch.c | 5 +---- tsl/src/nodes/decompress_chunk/decompress_context.h | 3 ++- tsl/src/nodes/decompress_chunk/exec.c | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index ecff996dc93..5271ff3ceb4 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -844,7 +844,7 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, * Note that if it's not a by-value type, we should copy it into * the slot context. */ - if (!get_typbyval(column_description->typid) && + if (!column_description->by_value && DatumGetPointer(decompressed_tuple->tts_values[attr]) != NULL) { decompressed_tuple->tts_values[attr] = PointerGetDatum( @@ -934,9 +934,6 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, batch_state->vector_qual_result = NULL; } } - - /* FIXME this is for testing, better done by the caller. */ - ExecClearTuple(compressed_slot); } static void diff --git a/tsl/src/nodes/decompress_chunk/decompress_context.h b/tsl/src/nodes/decompress_chunk/decompress_context.h index 7d16e5ac069..0d084ace1ff 100644 --- a/tsl/src/nodes/decompress_chunk/decompress_context.h +++ b/tsl/src/nodes/decompress_chunk/decompress_context.h @@ -27,7 +27,8 @@ typedef struct CompressionColumnDescription { CompressionColumnType type; Oid typid; - int value_bytes; + int16 value_bytes; + bool by_value; /* * Attno of the decompressed column in the output of DecompressChunk node. diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index 09ffa2ce492..e9bebc5bb06 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -305,7 +305,7 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) TupleDescAttr(desc, AttrNumberGetAttrOffset(column.output_attno)); column.typid = attribute->atttypid; - column.value_bytes = get_typlen(column.typid); + get_typlenbyval(column.typid, &column.value_bytes, &column.by_value); } if (list_nth_int(chunk_state->is_segmentby_column, compressed_index)) From cad424e90a9cdfd2bfa6484e9f267bdfc7d2d682 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 4 Apr 2024 22:28:52 +0200 Subject: [PATCH 43/81] benchmark vectorized agg all together (2024-04-04 no. 4) From e34ad75933c58ac59e8b8f554a633c607df043d2 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 4 Apr 2024 22:50:19 +0200 Subject: [PATCH 44/81] fixed-length by-reference types --- .../nodes/decompress_chunk/compressed_batch.c | 25 ++++++++++++++----- 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index 5271ff3ceb4..2e83bf8b3b1 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -847,13 +847,26 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, if (!column_description->by_value && DatumGetPointer(decompressed_tuple->tts_values[attr]) != NULL) { - decompressed_tuple->tts_values[attr] = PointerGetDatum( - detoaster_detoast_attr_copy((struct varlena *) - decompressed_tuple->tts_values[attr], - &dcontext->detoaster, - batch_state->per_batch_context)); + if (column_description->value_bytes < 0) + { + /* This is a varlena type. */ + decompressed_tuple->tts_values[attr] = PointerGetDatum( + detoaster_detoast_attr_copy((struct varlena *) + decompressed_tuple->tts_values[attr], + &dcontext->detoaster, + batch_state->per_batch_context)); + } + else + { + /* This is a fixed-length by-reference type. */ + void *tmp = MemoryContextAlloc(batch_state->per_batch_context, + column_description->value_bytes); + memcpy(tmp, + DatumGetPointer(decompressed_tuple->tts_values[attr]), + column_description->value_bytes); + decompressed_tuple->tts_values[attr] = PointerGetDatum(tmp); + } } - break; } case COUNT_COLUMN: From fe855596feedf0cbcfdef8f08b05ce3d84d8e479 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 4 Apr 2024 22:58:06 +0200 Subject: [PATCH 45/81] benchmark no compressed copy (2024-04-04 no. 5) From 119220cf0aae5e4d59b8b0ea35c78678fdbe3374 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 4 Apr 2024 23:00:00 +0200 Subject: [PATCH 46/81] cleanup --- tsl/src/nodes/decompress_chunk/compressed_batch.c | 8 ++++---- tsl/src/nodes/decompress_chunk/compressed_batch.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index 2e83bf8b3b1..91cb4ce429e 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -156,8 +156,8 @@ get_max_text_datum_size(ArrowArray *text_array) } static void -decompress_column(DecompressContext *dcontext, DecompressBatchState *batch_state, int i, - TupleTableSlot *compressed_slot) +decompress_column(DecompressContext *dcontext, DecompressBatchState *batch_state, + TupleTableSlot *compressed_slot, int i) { CompressionColumnDescription *column_description = &dcontext->template_columns[i]; CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; @@ -422,7 +422,7 @@ compute_plain_qual(DecompressContext *dcontext, DecompressBatchState *batch_stat * skip decompressing some columns if the entire batch doesn't pass * the quals. */ - decompress_column(dcontext, batch_state, column_index, compressed_slot); + decompress_column(dcontext, batch_state, compressed_slot, column_index); Assert(column_values->decompression_type != DT_Invalid); } @@ -933,7 +933,7 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; if (column_values->decompression_type == DT_Invalid) { - decompress_column(dcontext, batch_state, i, compressed_slot); + decompress_column(dcontext, batch_state, compressed_slot, i); Assert(column_values->decompression_type != DT_Invalid); } } diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.h b/tsl/src/nodes/decompress_chunk/compressed_batch.h index 96bd5721d24..486f3e9c637 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.h +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.h @@ -98,7 +98,7 @@ typedef struct DecompressBatchState extern void compressed_batch_set_compressed_tuple(DecompressContext *dcontext, DecompressBatchState *batch_state, - TupleTableSlot *subslot); + TupleTableSlot *compressed_slot); extern void compressed_batch_advance(DecompressContext *dcontext, DecompressBatchState *batch_state); From 16e539024c37f59d3d637b2a0c4bb47e8c74ba18 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 5 Apr 2024 13:45:15 +0200 Subject: [PATCH 47/81] boop the CI From ae4151afe2f3f37507f52a800c4ecc817e8155b5 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 5 Apr 2024 14:58:09 +0200 Subject: [PATCH 48/81] review fixes --- src/nodes/chunk_append/planner.c | 3 ++- src/nodes/vector_agg.h | 13 +++++++++++++ tsl/src/nodes/vector_agg/exec.c | 12 ++++-------- tsl/src/nodes/vector_agg/exec.h | 2 ++ tsl/src/nodes/vector_agg/functions.c | 15 +++++++++++---- tsl/src/nodes/vector_agg/plan.c | 2 +- 6 files changed, 33 insertions(+), 14 deletions(-) create mode 100644 src/nodes/vector_agg.h diff --git a/src/nodes/chunk_append/planner.c b/src/nodes/chunk_append/planner.c index b3ca10e5d15..500b25c245a 100644 --- a/src/nodes/chunk_append/planner.c +++ b/src/nodes/chunk_append/planner.c @@ -23,6 +23,7 @@ #include "nodes/chunk_append/chunk_append.h" #include "nodes/chunk_append/transform.h" #include "nodes/hypertable_modify.h" +#include "nodes/vector_agg.h" #include "import/planner.h" #include "guc.h" @@ -415,7 +416,7 @@ ts_chunk_append_get_scan_plan(Plan *plan) return (Scan *) plan; } - if (strcmp(custom->methods->CustomName, "VectorAgg") == 0) + if (strcmp(custom->methods->CustomName, VECTOR_AGG_NODE_NAME) == 0) { /* * This is a vectorized aggregation node, we have to recurse diff --git a/src/nodes/vector_agg.h b/src/nodes/vector_agg.h new file mode 100644 index 00000000000..e9da1f8a21f --- /dev/null +++ b/src/nodes/vector_agg.h @@ -0,0 +1,13 @@ +/* + * This file and its contents are licensed under the Apache License 2.0. + * Please see the included NOTICE for copyright information and + * LICENSE-APACHE for a copy of the license. + */ +#pragma once + +/* + * This file defines the node name of Vector Aggregation custom node, to be + * used in the Apache part of the Timescale extension. The node itself is in the + * the TSL part. + */ +#define VECTOR_AGG_NODE_NAME "VectorAgg" diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index eb0045fc096..19d4e9c7236 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -70,9 +70,6 @@ vector_agg_exec(CustomScanState *vector_agg_state) Assert(IsA(tlentry->expr, Aggref)); Aggref *aggref = castNode(Aggref, tlentry->expr); - /* Partial result is a int8 */ - Assert(aggref->aggtranstype == INT8OID); - Assert(list_length(aggref->args) == 1); /* The aggregate should be a partial aggregate */ @@ -104,8 +101,8 @@ vector_agg_exec(CustomScanState *vector_agg_state) TupleTableSlot *aggregated_slot = vector_agg_state->ss.ps.ps_ResultTupleSlot; Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); - Assert(aggref->aggfnoid == F_SUM_INT4); VectorAggregate *agg = get_vector_aggregate(aggref->aggfnoid); + Assert(agg != NULL); agg->agg_init(&aggregated_slot->tts_values[0], &aggregated_slot->tts_isnull[0]); ExecClearTuple(aggregated_slot); @@ -173,8 +170,7 @@ vector_agg_exec(CustomScanState *vector_agg_state) } compressed_batch_discard_tuples(batch_state); - /* Use Int64GetDatum to store the result since a 64-bit value is not pass-by-value on 32-bit - * systems */ + ExecStoreVirtualTuple(aggregated_slot); return aggregated_slot; @@ -183,11 +179,11 @@ vector_agg_exec(CustomScanState *vector_agg_state) static void vector_agg_explain(CustomScanState *node, List *ancestors, ExplainState *es) { - /* noop? */ + /* No additional output is needed. */ } static struct CustomExecMethods exec_methods = { - .CustomName = "VectorAgg", + .CustomName = VECTOR_AGG_NODE_NAME, .BeginCustomScan = vector_agg_begin, .ExecCustomScan = vector_agg_exec, .EndCustomScan = vector_agg_end, diff --git a/tsl/src/nodes/vector_agg/exec.h b/tsl/src/nodes/vector_agg/exec.h index 61b0837b076..fb64dca6512 100644 --- a/tsl/src/nodes/vector_agg/exec.h +++ b/tsl/src/nodes/vector_agg/exec.h @@ -4,6 +4,8 @@ * LICENSE-TIMESCALE for a copy of the license. */ +#pragma once + #include #include diff --git a/tsl/src/nodes/vector_agg/functions.c b/tsl/src/nodes/vector_agg/functions.c index 5a2a0a41aa5..fa9a185c25b 100644 --- a/tsl/src/nodes/vector_agg/functions.c +++ b/tsl/src/nodes/vector_agg/functions.c @@ -75,8 +75,12 @@ int4_sum_vector(ArrowArray *vector, uint64 *filter, Datum *agg_value, bool *agg_ ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); } - *agg_value = Int64GetDatum(tmp); + /* + * Use Int64GetDatum to store the result since a 64-bit value is not + * pass-by-value on 32-bit systems. + */ + *agg_value = Int64GetDatum(tmp); *agg_isnull = false; } @@ -93,9 +97,6 @@ int4_sum_const(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool int32 intvalue = DatumGetInt32(constvalue); int64 batch_sum = 0; - /* We have at least one value */ - *agg_isnull = false; - /* Multiply the number of tuples with the actual value */ if (unlikely(pg_mul_s64_overflow(intvalue, n, &batch_sum))) { @@ -110,7 +111,13 @@ int4_sum_const(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); } + + /* + * Use Int64GetDatum to store the result since a 64-bit value is not + * pass-by-value on 32-bit systems. + */ *agg_value = Int64GetDatum(tmp); + *agg_isnull = false; } static VectorAggregate int4_sum_agg = { diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index fe5fb219ac2..832c66ce4c3 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -21,7 +21,7 @@ #include "utils.h" #include "nodes/decompress_chunk/planner.h" -static struct CustomScanMethods scan_methods = { .CustomName = "VectorAgg", +static struct CustomScanMethods scan_methods = { .CustomName = VECTOR_AGG_NODE_NAME, .CreateCustomScanState = vector_agg_state_create }; void From f27c162490ec33810be3d77c72b672a9cd0bf0a7 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 5 Apr 2024 15:11:28 +0200 Subject: [PATCH 49/81] some coverage --- tsl/src/nodes/vector_agg/exec.c | 5 ++-- tsl/src/nodes/vector_agg/plan.c | 3 +- tsl/test/expected/vector_agg_default.out | 38 ++++++++++++++++++++++++ tsl/test/sql/vector_agg_default.sql | 14 +++++++++ 4 files changed, 57 insertions(+), 3 deletions(-) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index 19d4e9c7236..a212826caad 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -15,10 +15,11 @@ #include "exec.h" #include "compression/arrow_c_data_interface.h" +#include "functions.h" +#include "guc.h" #include "nodes/decompress_chunk/compressed_batch.h" #include "nodes/decompress_chunk/exec.h" -#include "guc.h" -#include "functions.h" +#include "nodes/vector_agg.h" static void vector_agg_begin(CustomScanState *node, EState *estate, int eflags) diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 832c66ce4c3..a11ff4f0f44 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -18,8 +18,9 @@ #include "exec.h" #include "functions.h" -#include "utils.h" #include "nodes/decompress_chunk/planner.h" +#include "nodes/vector_agg.h" +#include "utils.h" static struct CustomScanMethods scan_methods = { .CustomName = VECTOR_AGG_NODE_NAME, .CreateCustomScanState = vector_agg_state_create }; diff --git a/tsl/test/expected/vector_agg_default.out b/tsl/test/expected/vector_agg_default.out index 0ab244037b8..ed711ebea4a 100644 --- a/tsl/test/expected/vector_agg_default.out +++ b/tsl/test/expected/vector_agg_default.out @@ -32,6 +32,8 @@ NOTICE: chunk "_hyper_1_1_chunk" is already compressed _timescaledb_internal._hyper_1_3_chunk (2 rows) +-- Just the most basic vectorized aggregation query on a table with default +-- compressed column. explain (costs off) select sum(c) from t; QUERY PLAN ----------------------------------------------------------------------------- @@ -53,6 +55,7 @@ select sum(c) from t; 17982 (1 row) +-- Vectorized aggregation should work with vectorized filters. select sum(c) from t where b >= 0; sum ------- @@ -113,6 +116,41 @@ explain (costs off) select sum(c) from t where a < stable_abs(1000); -> Seq Scan on compress_hyper_2_2_chunk (7 rows) +-- Some negative cases. +explain (costs off) select sum(c) from t group by grouping sets ((), (a)); + QUERY PLAN +----------------------------------------------------------------------------------- + MixedAggregate + Hash Key: _hyper_1_1_chunk.a + Group Key: () + -> Append + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Sort + Sort Key: compress_hyper_2_2_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_2_2_chunk + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + -> Sort + Sort Key: compress_hyper_2_4_chunk._ts_meta_sequence_num DESC + -> Seq Scan on compress_hyper_2_4_chunk +(12 rows) + +explain (costs off) select sum(c) from t having sum(c) > 0; + QUERY PLAN +----------------------------------------------------------------------------- + Finalize Aggregate + Filter: (sum(_hyper_1_1_chunk.c) > 0) + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Parallel Seq Scan on compress_hyper_2_2_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + -> Parallel Seq Scan on compress_hyper_2_4_chunk +(11 rows) + +-- As a reference, the result on decompressed table. select decompress_chunk(show_chunks('t')); decompress_chunk ---------------------------------------- diff --git a/tsl/test/sql/vector_agg_default.sql b/tsl/test/sql/vector_agg_default.sql index 628d24cbf2f..5cab70b0b75 100644 --- a/tsl/test/sql/vector_agg_default.sql +++ b/tsl/test/sql/vector_agg_default.sql @@ -16,9 +16,14 @@ alter table t add column c int default 7; insert into t select x, x % 5, 11 from generate_series(1001, 1999) x; select compress_chunk(show_chunks('t')); + +-- Just the most basic vectorized aggregation query on a table with default +-- compressed column. explain (costs off) select sum(c) from t; select sum(c) from t; + +-- Vectorized aggregation should work with vectorized filters. select sum(c) from t where b >= 0; select sum(c) from t where b = 0; select sum(c) from t where b in (0, 1); @@ -27,9 +32,18 @@ select sum(c) from t where b > 10; explain (costs off) select sum(c) from t where b in (0, 1, 3); + -- The runtime chunk exclusion should work. explain (costs off) select sum(c) from t where a < stable_abs(1000); + +-- Some negative cases. +explain (costs off) select sum(c) from t group by grouping sets ((), (a)); + +explain (costs off) select sum(c) from t having sum(c) > 0; + + +-- As a reference, the result on decompressed table. select decompress_chunk(show_chunks('t')); select sum(c) from t; From 8f000451e7f526e53f469209aa7503f28a9fcce2 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 5 Apr 2024 15:12:49 +0200 Subject: [PATCH 50/81] assert --- tsl/src/nodes/vector_agg/plan.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index a11ff4f0f44..000940501bb 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -249,10 +249,7 @@ try_insert_vector_agg_node(Plan *plan) } Node *expr_node = (Node *) castNode(TargetEntry, linitial(agg->plan.targetlist))->expr; - if (!IsA(expr_node, Aggref)) - { - return plan; - } + Assert(IsA(expr_node, Aggref)); Aggref *aggref = castNode(Aggref, expr_node); From 0695543b93d13c14d736c409fb27b114377b0f3a Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 5 Apr 2024 17:29:43 +0200 Subject: [PATCH 51/81] copy more fields from agg plan --- tsl/src/nodes/decompress_chunk/exec.c | 2 ++ tsl/src/nodes/vector_agg/plan.c | 14 ++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index db5035c284d..82b98482dc1 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -18,6 +18,8 @@ #include #include +#include + #include "compat/compat.h" #include "compression/array.h" #include "compression/arrow_c_data_interface.h" diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 000940501bb..5f415a983f0 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -131,6 +131,20 @@ vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) custom->scan.plan.startup_cost = agg->plan.startup_cost; custom->scan.plan.total_cost = agg->plan.total_cost; + custom->scan.plan.parallel_aware = false; + custom->scan.plan.parallel_safe = decompress_chunk->scan.plan.parallel_safe; + + custom->scan.plan.async_capable = false; + + custom->scan.plan.plan_node_id = agg->plan.plan_node_id; + + Assert(agg->plan.qual == NIL); + + custom->scan.plan.initPlan = agg->plan.initPlan; + + custom->scan.plan.extParam = bms_copy(agg->plan.extParam); + custom->scan.plan.allParam = bms_copy(agg->plan.allParam); + return (Plan *) custom; } From 0b9709fa2673a9a0ef9899b746eb8dd242f8afb3 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 5 Apr 2024 17:58:12 +0200 Subject: [PATCH 52/81] fix build on 13 --- tsl/src/nodes/vector_agg/plan.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 5f415a983f0..28514b47dfa 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -134,7 +134,9 @@ vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) custom->scan.plan.parallel_aware = false; custom->scan.plan.parallel_safe = decompress_chunk->scan.plan.parallel_safe; +#if PG14_GE custom->scan.plan.async_capable = false; +#endif custom->scan.plan.plan_node_id = agg->plan.plan_node_id; From 0646f6c236a6cd9f24e13263b8134736ba3468bc Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 5 Apr 2024 20:15:42 +0200 Subject: [PATCH 53/81] drafts --- tsl/src/nodes/vector_agg/exec.c | 82 +++++++++++++++++++++++++--- tsl/src/nodes/vector_agg/exec.h | 23 +++++++- tsl/src/nodes/vector_agg/functions.c | 11 +++- tsl/src/nodes/vector_agg/functions.h | 7 ++- 4 files changed, 111 insertions(+), 12 deletions(-) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index a212826caad..ade6994d8bf 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -27,6 +27,60 @@ vector_agg_begin(CustomScanState *node, EState *estate, int eflags) CustomScan *cscan = castNode(CustomScan, node->ss.ps.plan); node->custom_ps = lappend(node->custom_ps, ExecInitNode(linitial(cscan->custom_plans), estate, eflags)); + + VectorAggState *vector_agg_state = (VectorAggState *) node; + vector_agg_state->input_ended = false; + + DecompressChunkState *decompress_state = + (DecompressChunkState *) linitial(vector_agg_state->custom.custom_ps); + + List *aggregated_tlist = + castNode(CustomScan, vector_agg_state->custom.ss.ps.plan)->custom_scan_tlist; + ListCell *lc; + foreach (lc, aggregated_tlist) + { + /* Determine which kind of vectorized aggregation we should perform */ + TargetEntry *tlentry = (TargetEntry *) lfirst(lc); + Assert(IsA(tlentry->expr, Aggref)); + Aggref *aggref = castNode(Aggref, tlentry->expr); + + Assert(list_length(aggref->args) == 1); + + /* The aggregate should be a partial aggregate */ + Assert(aggref->aggsplit == AGGSPLIT_INITIAL_SERIAL); + + Var *var = castNode(Var, castNode(TargetEntry, linitial(aggref->args))->expr); + + DecompressContext *dcontext = &decompress_state->decompress_context; + + CompressionColumnDescription *value_column_description = NULL; + for (int i = 0; i < dcontext->num_total_columns; i++) + { + CompressionColumnDescription *current_column = &dcontext->template_columns[i]; + if (current_column->output_attno == var->varattno) + { + value_column_description = current_column; + break; + } + } + Ensure(value_column_description != NULL, "aggregated compressed column not found"); + + Assert(value_column_description->type == COMPRESSED_COLUMN || + value_column_description->type == SEGMENTBY_COLUMN); + + VectorAggDef *def = palloc(sizeof(VectorAggDef)); + VectorAggFunctions *func = get_vector_aggregate(aggref->aggfnoid); + Assert(func != NULL); + def->func = func; + def->column = value_column_description - dcontext->template_columns; + + vector_agg_state->agg_defs = lappend(vector_agg_state->agg_defs, def); + + vector_agg_state->agg_state_row_bytes += MAXALIGN(func->state_bytes); + } + + vector_agg_state->num_agg_state_rows = 1; + vector_agg_state->agg_states = palloc(vector_agg_state->agg_state_row_bytes); } static void @@ -42,13 +96,25 @@ vector_agg_rescan(CustomScanState *node) UpdateChangedParamSet(linitial(node->custom_ps), node->ss.ps.chgParam); ExecReScan(linitial(node->custom_ps)); + + VectorAggState *state = (VectorAggState *) node; + state->input_ended = false; } static TupleTableSlot * -vector_agg_exec(CustomScanState *vector_agg_state) +vector_agg_exec(CustomScanState *node) { + /* + * Early exit if the input has ended. + */ + VectorAggState *vector_agg_state = (VectorAggState *) node; + if (vector_agg_state->input_ended) + { + return NULL; + } + DecompressChunkState *decompress_state = - (DecompressChunkState *) linitial(vector_agg_state->custom_ps); + (DecompressChunkState *) linitial(vector_agg_state->custom.custom_ps); /* * The aggregated targetlist with Aggrefs is in the custom scan targetlist @@ -59,7 +125,8 @@ vector_agg_exec(CustomScanState *vector_agg_state) * The output targetlist, in turn, consists of just the INDEX_VAR references * into the custom_scan_tlist. */ - List *aggregated_tlist = castNode(CustomScan, vector_agg_state->ss.ps.plan)->custom_scan_tlist; + List *aggregated_tlist = + castNode(CustomScan, vector_agg_state->custom.ss.ps.plan)->custom_scan_tlist; Assert(list_length(aggregated_tlist) == 1); /* Checked by planner */ @@ -99,10 +166,10 @@ vector_agg_exec(CustomScanState *vector_agg_state) DecompressBatchState *batch_state = batch_array_get_at(&batch_queue->batch_array, 0); /* Get a reference the the output TupleTableSlot */ - TupleTableSlot *aggregated_slot = vector_agg_state->ss.ps.ps_ResultTupleSlot; + TupleTableSlot *aggregated_slot = vector_agg_state->custom.ss.ps.ps_ResultTupleSlot; Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); - VectorAggregate *agg = get_vector_aggregate(aggref->aggfnoid); + VectorAggFunctions *agg = get_vector_aggregate(aggref->aggfnoid); Assert(agg != NULL); agg->agg_init(&aggregated_slot->tts_values[0], &aggregated_slot->tts_isnull[0]); @@ -120,6 +187,7 @@ vector_agg_exec(CustomScanState *vector_agg_state) if (TupIsNull(compressed_slot)) { /* All values are processed. */ + vector_agg_state->input_ended = true; return NULL; } @@ -195,7 +263,7 @@ static struct CustomExecMethods exec_methods = { Node * vector_agg_state_create(CustomScan *cscan) { - CustomScanState *state = makeNode(CustomScanState); - state->methods = &exec_methods; + VectorAggState *state = (VectorAggState *) newNode(sizeof(VectorAggState), T_CustomScanState); + state->custom.methods = &exec_methods; return (Node *) state; } diff --git a/tsl/src/nodes/vector_agg/exec.h b/tsl/src/nodes/vector_agg/exec.h index fb64dca6512..a8cc52125d0 100644 --- a/tsl/src/nodes/vector_agg/exec.h +++ b/tsl/src/nodes/vector_agg/exec.h @@ -10,9 +10,30 @@ #include -typedef struct VectorAggState +#include "functions.h" + +typedef struct +{ + VectorAggFunctions *func; + int column; +} VectorAggDef; + +typedef struct { CustomScanState custom; + + List *agg_defs; + + int agg_state_row_bytes; + int num_agg_state_rows; + void *agg_states; + + /* + * We can't call the underlying scan after it has ended, or it will be + * restarted. This is the behavior of Postgres heap scans. So we have to + * track whether it has ended to avoid this. + */ + bool input_ended; } VectorAggState; extern Node *vector_agg_state_create(CustomScan *cscan); diff --git a/tsl/src/nodes/vector_agg/functions.c b/tsl/src/nodes/vector_agg/functions.c index fa9a185c25b..c030f6fe419 100644 --- a/tsl/src/nodes/vector_agg/functions.c +++ b/tsl/src/nodes/vector_agg/functions.c @@ -120,13 +120,20 @@ int4_sum_const(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool *agg_isnull = false; } -static VectorAggregate int4_sum_agg = { +typedef struct +{ + int64 result; + bool is_null; +} Int4SumState; + +static VectorAggFunctions int4_sum_agg = { + .state_bytes = sizeof(Int4SumState), .agg_init = int4_sum_init, .agg_const = int4_sum_const, .agg_vector = int4_sum_vector, }; -VectorAggregate * +VectorAggFunctions * get_vector_aggregate(Oid aggfnoid) { switch (aggfnoid) diff --git a/tsl/src/nodes/vector_agg/functions.h b/tsl/src/nodes/vector_agg/functions.h index 5f31fbf5960..a2dc005ed5f 100644 --- a/tsl/src/nodes/vector_agg/functions.h +++ b/tsl/src/nodes/vector_agg/functions.h @@ -13,6 +13,9 @@ */ typedef struct { + /* Size of the aggregate function state. */ + size_t state_bytes; + /* Initialize the aggregate function state pointed to by agg_value and agg_isnull. */ void (*agg_init)(Datum *agg_value, bool *agg_isnull); @@ -22,6 +25,6 @@ typedef struct /* Aggregate a constant (like segmentby or column with default value). */ void (*agg_const)(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool *agg_isnull); -} VectorAggregate; +} VectorAggFunctions; -VectorAggregate *get_vector_aggregate(Oid aggfnoid); +VectorAggFunctions *get_vector_aggregate(Oid aggfnoid); From fa0c3b21d1e75ba76113847c99fdeb973ba88f12 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Sat, 6 Apr 2024 14:03:08 +0200 Subject: [PATCH 54/81] use dedicated function state --- tsl/src/nodes/vector_agg/exec.c | 18 +++++----- tsl/src/nodes/vector_agg/functions.c | 53 ++++++++++++++-------------- tsl/src/nodes/vector_agg/functions.h | 10 +++--- 3 files changed, 41 insertions(+), 40 deletions(-) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index ade6994d8bf..d0a937caef6 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -172,7 +172,7 @@ vector_agg_exec(CustomScanState *node) VectorAggFunctions *agg = get_vector_aggregate(aggref->aggfnoid); Assert(agg != NULL); - agg->agg_init(&aggregated_slot->tts_values[0], &aggregated_slot->tts_isnull[0]); + agg->agg_init(vector_agg_state->agg_states); ExecClearTuple(aggregated_slot); /* @@ -224,22 +224,22 @@ vector_agg_exec(CustomScanState *node) } int offs = AttrNumberGetAttrOffset(value_column_description->output_attno); - agg->agg_const(batch_state->decompressed_scan_slot_data.base.tts_values[offs], + agg->agg_const(vector_agg_state->agg_states, + batch_state->decompressed_scan_slot_data.base.tts_values[offs], batch_state->decompressed_scan_slot_data.base.tts_isnull[offs], - n, - &aggregated_slot->tts_values[0], - &aggregated_slot->tts_isnull[0]); + n); } else { - agg->agg_vector(arrow, - batch_state->vector_qual_result, - &aggregated_slot->tts_values[0], - &aggregated_slot->tts_isnull[0]); + agg->agg_vector(vector_agg_state->agg_states, arrow, batch_state->vector_qual_result); } compressed_batch_discard_tuples(batch_state); + agg->agg_emit(vector_agg_state->agg_states, + &aggregated_slot->tts_values[0], + &aggregated_slot->tts_isnull[0]); + ExecStoreVirtualTuple(aggregated_slot); return aggregated_slot; diff --git a/tsl/src/nodes/vector_agg/functions.c b/tsl/src/nodes/vector_agg/functions.c index c030f6fe419..70ef775b6cd 100644 --- a/tsl/src/nodes/vector_agg/functions.c +++ b/tsl/src/nodes/vector_agg/functions.c @@ -18,17 +18,25 @@ /* * Vectorized implementation of int4_sum. */ +typedef struct +{ + int64 result; + bool isnull; +} Int4SumState; static void -int4_sum_init(Datum *agg_value, bool *agg_isnull) +int4_sum_init(void *agg_state) { - *agg_value = Int64GetDatum(0); - *agg_isnull = true; + Int4SumState *state = (Int4SumState *) agg_state; + state->result = 0; + state->isnull = true; } static void -int4_sum_vector(ArrowArray *vector, uint64 *filter, Datum *agg_value, bool *agg_isnull) +int4_sum_vector(void *agg_state, ArrowArray *vector, uint64 *filter) { + Int4SumState *state = (Int4SumState *) agg_state; + Assert(vector != NULL); Assert(vector->length > 0); @@ -69,25 +77,19 @@ int4_sum_vector(ArrowArray *vector, uint64 *filter, Datum *agg_value, bool *agg_ } #undef INNER_LOOP_SIZE - int64 tmp = DatumGetInt64(*agg_value); - if (unlikely(pg_add_s64_overflow(tmp, batch_sum, &tmp))) + if (unlikely(pg_add_s64_overflow(state->result, batch_sum, &state->result))) { ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); } - /* - * Use Int64GetDatum to store the result since a 64-bit value is not - * pass-by-value on 32-bit systems. - */ - *agg_value = Int64GetDatum(tmp); - *agg_isnull = false; + state->isnull = false; } static void -int4_sum_const(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool *agg_isnull) +int4_sum_const(void *agg_state, Datum constvalue, bool constisnull, int n) { - Assert(n > 0); + Int4SumState *state = (Int4SumState *) agg_state; if (constisnull) { @@ -98,6 +100,7 @@ int4_sum_const(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool int64 batch_sum = 0; /* Multiply the number of tuples with the actual value */ + Assert(n > 0); if (unlikely(pg_mul_s64_overflow(intvalue, n, &batch_sum))) { ereport(ERROR, @@ -105,32 +108,28 @@ int4_sum_const(Datum constvalue, bool constisnull, int n, Datum *agg_value, bool } /* Add the value to our sum */ - int64 tmp = DatumGetInt64(*agg_value); - if (unlikely(pg_add_s64_overflow(tmp, batch_sum, &tmp))) + if (unlikely(pg_add_s64_overflow(state->result, batch_sum, &state->result))) { ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("bigint out of range"))); } - - /* - * Use Int64GetDatum to store the result since a 64-bit value is not - * pass-by-value on 32-bit systems. - */ - *agg_value = Int64GetDatum(tmp); - *agg_isnull = false; + state->isnull = false; } -typedef struct +static void +int4_sum_emit(void *agg_state, Datum *out_result, bool *out_isnull) { - int64 result; - bool is_null; -} Int4SumState; + Int4SumState *state = (Int4SumState *) agg_state; + *out_result = Int64GetDatum(state->result); + *out_isnull = state->isnull; +} static VectorAggFunctions int4_sum_agg = { .state_bytes = sizeof(Int4SumState), .agg_init = int4_sum_init, .agg_const = int4_sum_const, .agg_vector = int4_sum_vector, + .agg_emit = int4_sum_emit, }; VectorAggFunctions * diff --git a/tsl/src/nodes/vector_agg/functions.h b/tsl/src/nodes/vector_agg/functions.h index a2dc005ed5f..e92dd5a5639 100644 --- a/tsl/src/nodes/vector_agg/functions.h +++ b/tsl/src/nodes/vector_agg/functions.h @@ -17,14 +17,16 @@ typedef struct size_t state_bytes; /* Initialize the aggregate function state pointed to by agg_value and agg_isnull. */ - void (*agg_init)(Datum *agg_value, bool *agg_isnull); + void (*agg_init)(void *agg_state); /* Aggregate a given arrow array. */ - void (*agg_vector)(ArrowArray *vector, uint64 *filter, Datum *agg_value, bool *agg_isnull); + void (*agg_vector)(void *agg_state, ArrowArray *vector, uint64 *filter); /* Aggregate a constant (like segmentby or column with default value). */ - void (*agg_const)(Datum constvalue, bool constisnull, int n, Datum *agg_value, - bool *agg_isnull); + void (*agg_const)(void *agg_state, Datum constvalue, bool constisnull, int n); + + /* Emit a parital result. */ + void (*agg_emit)(void *agg_state, Datum *out_result, bool *out_isnull); } VectorAggFunctions; VectorAggFunctions *get_vector_aggregate(Oid aggfnoid); From 9ffc75cdff0915033a7db5fc04a52a2387228809 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Sat, 6 Apr 2024 18:58:14 +0200 Subject: [PATCH 55/81] use the agg definition --- tsl/src/nodes/vector_agg/exec.c | 76 ++++++++++----------------------- 1 file changed, 22 insertions(+), 54 deletions(-) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index d0a937caef6..b6ffa7bc5fe 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -34,6 +34,15 @@ vector_agg_begin(CustomScanState *node, EState *estate, int eflags) DecompressChunkState *decompress_state = (DecompressChunkState *) linitial(vector_agg_state->custom.custom_ps); + /* + * The aggregated targetlist with Aggrefs is in the custom scan targetlist + * of the custom scan node that is performing the vectorized aggregation. + * We do this to avoid projections at this node, because the postgres + * projection functions complain when they see an Aggref in a custom + * node output targetlist. + * The output targetlist, in turn, consists of just the INDEX_VAR references + * into the custom_scan_tlist. + */ List *aggregated_tlist = castNode(CustomScan, vector_agg_state->custom.ss.ps.plan)->custom_scan_tlist; ListCell *lc; @@ -116,48 +125,10 @@ vector_agg_exec(CustomScanState *node) DecompressChunkState *decompress_state = (DecompressChunkState *) linitial(vector_agg_state->custom.custom_ps); - /* - * The aggregated targetlist with Aggrefs is in the custom scan targetlist - * of the custom scan node that is performing the vectorized aggregation. - * We do this to avoid projections at this node, because the postgres - * projection functions complain when they see an Aggref in a custom - * node output targetlist. - * The output targetlist, in turn, consists of just the INDEX_VAR references - * into the custom_scan_tlist. - */ - List *aggregated_tlist = - castNode(CustomScan, vector_agg_state->custom.ss.ps.plan)->custom_scan_tlist; - Assert(list_length(aggregated_tlist) == 1); - - /* Checked by planner */ - Assert(ts_guc_enable_vectorized_aggregation); - Assert(ts_guc_enable_bulk_decompression); - - /* Determine which kind of vectorized aggregation we should perform */ - TargetEntry *tlentry = (TargetEntry *) linitial(aggregated_tlist); - Assert(IsA(tlentry->expr, Aggref)); - Aggref *aggref = castNode(Aggref, tlentry->expr); - - Assert(list_length(aggref->args) == 1); - - /* The aggregate should be a partial aggregate */ - Assert(aggref->aggsplit == AGGSPLIT_INITIAL_SERIAL); - - Var *var = castNode(Var, castNode(TargetEntry, linitial(aggref->args))->expr); - + VectorAggDef *def = (VectorAggDef *) linitial(vector_agg_state->agg_defs); DecompressContext *dcontext = &decompress_state->decompress_context; - - CompressionColumnDescription *value_column_description = NULL; - for (int i = 0; i < dcontext->num_total_columns; i++) - { - CompressionColumnDescription *current_column = &dcontext->template_columns[i]; - if (current_column->output_attno == var->varattno) - { - value_column_description = current_column; - break; - } - } - Ensure(value_column_description != NULL, "aggregated compressed column not found"); + CompressionColumnDescription *value_column_description = + &dcontext->template_columns[def->column]; Assert(value_column_description->type == COMPRESSED_COLUMN || value_column_description->type == SEGMENTBY_COLUMN); @@ -169,10 +140,7 @@ vector_agg_exec(CustomScanState *node) TupleTableSlot *aggregated_slot = vector_agg_state->custom.ss.ps.ps_ResultTupleSlot; Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); - VectorAggFunctions *agg = get_vector_aggregate(aggref->aggfnoid); - Assert(agg != NULL); - - agg->agg_init(vector_agg_state->agg_states); + def->func->agg_init(vector_agg_state->agg_states); ExecClearTuple(aggregated_slot); /* @@ -200,7 +168,7 @@ vector_agg_exec(CustomScanState *node) Assert(dcontext->enable_bulk_decompression); Assert(value_column_description->bulk_decompression_supported); CompressedColumnValues *values = - &batch_state->compressed_columns[value_column_description - dcontext->template_columns]; + &batch_state->compressed_columns[def->column]; Assert(values->decompression_type != DT_Invalid); arrow = values->arrow; } @@ -224,21 +192,21 @@ vector_agg_exec(CustomScanState *node) } int offs = AttrNumberGetAttrOffset(value_column_description->output_attno); - agg->agg_const(vector_agg_state->agg_states, - batch_state->decompressed_scan_slot_data.base.tts_values[offs], - batch_state->decompressed_scan_slot_data.base.tts_isnull[offs], - n); + def->func->agg_const(vector_agg_state->agg_states, + batch_state->decompressed_scan_slot_data.base.tts_values[offs], + batch_state->decompressed_scan_slot_data.base.tts_isnull[offs], + n); } else { - agg->agg_vector(vector_agg_state->agg_states, arrow, batch_state->vector_qual_result); + def->func->agg_vector(vector_agg_state->agg_states, arrow, batch_state->vector_qual_result); } compressed_batch_discard_tuples(batch_state); - agg->agg_emit(vector_agg_state->agg_states, - &aggregated_slot->tts_values[0], - &aggregated_slot->tts_isnull[0]); + def->func->agg_emit(vector_agg_state->agg_states, + &aggregated_slot->tts_values[0], + &aggregated_slot->tts_isnull[0]); ExecStoreVirtualTuple(aggregated_slot); From d4716c0074f558aa866881d5591424feee74f050 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Tue, 9 Apr 2024 13:03:18 +0200 Subject: [PATCH 56/81] remove debug prints --- .../nodes/decompress_chunk/compressed_batch.c | 30 ------------------- 1 file changed, 30 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index 91cb4ce429e..c25c156a1fd 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -836,10 +836,6 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, column_description->compressed_scan_attno, &decompressed_tuple->tts_isnull[attr]); - // fprintf(stderr, "segmentby column [%d]: value %p, null %d\n", - // attr, (void*) decompressed_tuple->tts_values[attr], - // decompressed_tuple->tts_isnull[attr]); - /* * Note that if it's not a by-value type, we should copy it into * the slot context. @@ -976,8 +972,6 @@ make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_com Assert(batch_state->total_batch_rows > 0); Assert(batch_state->next_batch_row < batch_state->total_batch_rows); - // fprintf(stderr, "make next tuple [%d]\n", batch_state->next_batch_row); - for (int i = 0; i < num_compressed_columns; i++) { CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; @@ -993,10 +987,6 @@ make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_com *column_values->output_isnull = result.is_null; *column_values->output_value = result.val; - - // fprintf(stderr, "iterator column #%d: value %p, null %d\n", - // i, (void*) *column_values->output_value, - // *column_values->output_isnull); } else if (column_values->decompression_type > SIZEOF_DATUM) { @@ -1011,10 +1001,6 @@ make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_com *column_values->output_value = PointerGetDatum(&src[value_bytes * arrow_row]); *column_values->output_isnull = !arrow_row_is_valid(column_values->buffers[0], arrow_row); - - // fprintf(stderr, "by-ref column #%d: value %p, null %d\n", - // i, (void*) *column_values->output_value, - // *column_values->output_isnull); } else if (column_values->decompression_type > 0) { @@ -1032,20 +1018,12 @@ make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_com memcpy(column_values->output_value, &src[value_bytes * arrow_row], SIZEOF_DATUM); *column_values->output_isnull = !arrow_row_is_valid(column_values->buffers[0], arrow_row); - - // fprintf(stderr, "by-val column #%d: value %p, null %d\n", - // i, (void*) *column_values->output_value, - // *column_values->output_isnull); } else if (column_values->decompression_type == DT_ArrowText) { store_text_datum(column_values, arrow_row); *column_values->output_isnull = !arrow_row_is_valid(column_values->buffers[0], arrow_row); - - // fprintf(stderr, "arrow text column #%d: value %p, null %d\n", - // i, (void*) *column_values->output_value, - // *column_values->output_isnull); } else if (column_values->decompression_type == DT_ArrowTextDict) { @@ -1053,19 +1031,11 @@ make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_com store_text_datum(column_values, index); *column_values->output_isnull = !arrow_row_is_valid(column_values->buffers[0], arrow_row); - - // fprintf(stderr, "arrow text dict column #%d: value %p, null %d\n", - // i, (void*) *column_values->output_value, - // *column_values->output_isnull); } else { /* A compressed column with default value, do nothing. */ Assert(column_values->decompression_type == DT_Default); - - // fprintf(stderr, "default column #%d: value %p, null %d\n", - // i, (void*) *column_values->output_value, - // *column_values->output_isnull); } } From be91171052f4e572cf59d020758c35ceaa93e4ee Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Tue, 9 Apr 2024 13:29:40 +0200 Subject: [PATCH 57/81] rename table --- tsl/test/expected/vector_agg_default.out | 56 ++++++++++++------------ tsl/test/sql/vector_agg_default.sql | 44 +++++++++---------- 2 files changed, 50 insertions(+), 50 deletions(-) diff --git a/tsl/test/expected/vector_agg_default.out b/tsl/test/expected/vector_agg_default.out index ed711ebea4a..58e8185f7e9 100644 --- a/tsl/test/expected/vector_agg_default.out +++ b/tsl/test/expected/vector_agg_default.out @@ -3,28 +3,28 @@ -- LICENSE-TIMESCALE for a copy of the license. \c :TEST_DBNAME :ROLE_SUPERUSER create function stable_abs(x int4) returns int4 as 'int4abs' language internal stable; -create table t(a int, b int); -select create_hypertable('t', 'a', chunk_time_interval => 1000); +create table dvagg(a int, b int); +select create_hypertable('dvagg', 'a', chunk_time_interval => 1000); NOTICE: adding not-null constraint to column "a" - create_hypertable -------------------- - (1,public,t,t) + create_hypertable +-------------------- + (1,public,dvagg,t) (1 row) -insert into t select x, x % 5 from generate_series(1, 999) x; -alter table t set (timescaledb.compress); +insert into dvagg select x, x % 5 from generate_series(1, 999) x; +alter table dvagg set (timescaledb.compress); WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes -NOTICE: default segment by for hypertable "t" is set to "" -NOTICE: default order by for hypertable "t" is set to "a DESC" -select compress_chunk(show_chunks('t')); +NOTICE: default segment by for hypertable "dvagg" is set to "" +NOTICE: default order by for hypertable "dvagg" is set to "a DESC" +select compress_chunk(show_chunks('dvagg')); compress_chunk ---------------------------------------- _timescaledb_internal._hyper_1_1_chunk (1 row) -alter table t add column c int default 7; -insert into t select x, x % 5, 11 from generate_series(1001, 1999) x; -select compress_chunk(show_chunks('t')); +alter table dvagg add column c int default 7; +insert into dvagg select x, x % 5, 11 from generate_series(1001, 1999) x; +select compress_chunk(show_chunks('dvagg')); NOTICE: chunk "_hyper_1_1_chunk" is already compressed compress_chunk ---------------------------------------- @@ -34,7 +34,7 @@ NOTICE: chunk "_hyper_1_1_chunk" is already compressed -- Just the most basic vectorized aggregation query on a table with default -- compressed column. -explain (costs off) select sum(c) from t; +explain (costs off) select sum(c) from dvagg; QUERY PLAN ----------------------------------------------------------------------------- Finalize Aggregate @@ -49,44 +49,44 @@ explain (costs off) select sum(c) from t; -> Parallel Seq Scan on compress_hyper_2_4_chunk (10 rows) -select sum(c) from t; +select sum(c) from dvagg; sum ------- 17982 (1 row) -- Vectorized aggregation should work with vectorized filters. -select sum(c) from t where b >= 0; +select sum(c) from dvagg where b >= 0; sum ------- 17982 (1 row) -select sum(c) from t where b = 0; +select sum(c) from dvagg where b = 0; sum ------ 3582 (1 row) -select sum(c) from t where b in (0, 1); +select sum(c) from dvagg where b in (0, 1); sum ------ 7182 (1 row) -select sum(c) from t where b in (0, 1, 3); +select sum(c) from dvagg where b in (0, 1, 3); sum ------- 10782 (1 row) -select sum(c) from t where b > 10; +select sum(c) from dvagg where b > 10; sum ----- (1 row) -explain (costs off) select sum(c) from t where b in (0, 1, 3); +explain (costs off) select sum(c) from dvagg where b in (0, 1, 3); QUERY PLAN ------------------------------------------------------------------------------- Finalize Aggregate @@ -104,11 +104,11 @@ explain (costs off) select sum(c) from t where b in (0, 1, 3); (12 rows) -- The runtime chunk exclusion should work. -explain (costs off) select sum(c) from t where a < stable_abs(1000); +explain (costs off) select sum(c) from dvagg where a < stable_abs(1000); QUERY PLAN --------------------------------------------------------------------- Finalize Aggregate - -> Custom Scan (ChunkAppend) on t + -> Custom Scan (ChunkAppend) on dvagg Chunks excluded during startup: 1 -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk @@ -117,7 +117,7 @@ explain (costs off) select sum(c) from t where a < stable_abs(1000); (7 rows) -- Some negative cases. -explain (costs off) select sum(c) from t group by grouping sets ((), (a)); +explain (costs off) select sum(c) from dvagg group by grouping sets ((), (a)); QUERY PLAN ----------------------------------------------------------------------------------- MixedAggregate @@ -134,7 +134,7 @@ explain (costs off) select sum(c) from t group by grouping sets ((), (a)); -> Seq Scan on compress_hyper_2_4_chunk (12 rows) -explain (costs off) select sum(c) from t having sum(c) > 0; +explain (costs off) select sum(c) from dvagg having sum(c) > 0; QUERY PLAN ----------------------------------------------------------------------------- Finalize Aggregate @@ -151,17 +151,17 @@ explain (costs off) select sum(c) from t having sum(c) > 0; (11 rows) -- As a reference, the result on decompressed table. -select decompress_chunk(show_chunks('t')); +select decompress_chunk(show_chunks('dvagg')); decompress_chunk ---------------------------------------- _timescaledb_internal._hyper_1_1_chunk _timescaledb_internal._hyper_1_3_chunk (2 rows) -select sum(c) from t; +select sum(c) from dvagg; sum ------- 17982 (1 row) -drop table t; +drop table dvagg; diff --git a/tsl/test/sql/vector_agg_default.sql b/tsl/test/sql/vector_agg_default.sql index 5cab70b0b75..71b8ea7ac2a 100644 --- a/tsl/test/sql/vector_agg_default.sql +++ b/tsl/test/sql/vector_agg_default.sql @@ -5,46 +5,46 @@ \c :TEST_DBNAME :ROLE_SUPERUSER create function stable_abs(x int4) returns int4 as 'int4abs' language internal stable; -create table t(a int, b int); -select create_hypertable('t', 'a', chunk_time_interval => 1000); +create table dvagg(a int, b int); +select create_hypertable('dvagg', 'a', chunk_time_interval => 1000); -insert into t select x, x % 5 from generate_series(1, 999) x; -alter table t set (timescaledb.compress); -select compress_chunk(show_chunks('t')); +insert into dvagg select x, x % 5 from generate_series(1, 999) x; +alter table dvagg set (timescaledb.compress); +select compress_chunk(show_chunks('dvagg')); -alter table t add column c int default 7; -insert into t select x, x % 5, 11 from generate_series(1001, 1999) x; -select compress_chunk(show_chunks('t')); +alter table dvagg add column c int default 7; +insert into dvagg select x, x % 5, 11 from generate_series(1001, 1999) x; +select compress_chunk(show_chunks('dvagg')); -- Just the most basic vectorized aggregation query on a table with default -- compressed column. -explain (costs off) select sum(c) from t; -select sum(c) from t; +explain (costs off) select sum(c) from dvagg; +select sum(c) from dvagg; -- Vectorized aggregation should work with vectorized filters. -select sum(c) from t where b >= 0; -select sum(c) from t where b = 0; -select sum(c) from t where b in (0, 1); -select sum(c) from t where b in (0, 1, 3); -select sum(c) from t where b > 10; +select sum(c) from dvagg where b >= 0; +select sum(c) from dvagg where b = 0; +select sum(c) from dvagg where b in (0, 1); +select sum(c) from dvagg where b in (0, 1, 3); +select sum(c) from dvagg where b > 10; -explain (costs off) select sum(c) from t where b in (0, 1, 3); +explain (costs off) select sum(c) from dvagg where b in (0, 1, 3); -- The runtime chunk exclusion should work. -explain (costs off) select sum(c) from t where a < stable_abs(1000); +explain (costs off) select sum(c) from dvagg where a < stable_abs(1000); -- Some negative cases. -explain (costs off) select sum(c) from t group by grouping sets ((), (a)); +explain (costs off) select sum(c) from dvagg group by grouping sets ((), (a)); -explain (costs off) select sum(c) from t having sum(c) > 0; +explain (costs off) select sum(c) from dvagg having sum(c) > 0; -- As a reference, the result on decompressed table. -select decompress_chunk(show_chunks('t')); -select sum(c) from t; +select decompress_chunk(show_chunks('dvagg')); +select sum(c) from dvagg; -drop table t; +drop table dvagg; From 6da0650f728aeb1091f62ad272a1f590ed85bbd5 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 10 Apr 2024 09:26:12 +0200 Subject: [PATCH 58/81] return the inner loop --- tsl/src/nodes/vector_agg/exec.c | 120 +++++++++++++++++--------------- 1 file changed, 63 insertions(+), 57 deletions(-) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index b6ffa7bc5fe..e3ea8edcb26 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -122,16 +122,13 @@ vector_agg_exec(CustomScanState *node) return NULL; } + VectorAggDef *def = (VectorAggDef *) linitial(vector_agg_state->agg_defs); + def->func->agg_init(vector_agg_state->agg_states); + DecompressChunkState *decompress_state = (DecompressChunkState *) linitial(vector_agg_state->custom.custom_ps); - VectorAggDef *def = (VectorAggDef *) linitial(vector_agg_state->agg_defs); DecompressContext *dcontext = &decompress_state->decompress_context; - CompressionColumnDescription *value_column_description = - &dcontext->template_columns[def->column]; - - Assert(value_column_description->type == COMPRESSED_COLUMN || - value_column_description->type == SEGMENTBY_COLUMN); BatchQueue *batch_queue = decompress_state->batch_queue; DecompressBatchState *batch_state = batch_array_get_at(&batch_queue->batch_array, 0); @@ -139,71 +136,80 @@ vector_agg_exec(CustomScanState *node) /* Get a reference the the output TupleTableSlot */ TupleTableSlot *aggregated_slot = vector_agg_state->custom.ss.ps.ps_ResultTupleSlot; Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); - - def->func->agg_init(vector_agg_state->agg_states); ExecClearTuple(aggregated_slot); - /* - * Have to skip the batches that are fully filtered out. This condition also - * handles the batch that was consumed on the previous step. - */ - while (batch_state->next_batch_row >= batch_state->total_batch_rows) + for (;;) { - TupleTableSlot *compressed_slot = - ExecProcNode(linitial(decompress_state->csstate.custom_ps)); + /* + * Have to skip the batches that are fully filtered out. This condition also + * handles the batch that was consumed on the previous step. + */ + while (batch_state->next_batch_row >= batch_state->total_batch_rows) + { + TupleTableSlot *compressed_slot = + ExecProcNode(linitial(decompress_state->csstate.custom_ps)); - if (TupIsNull(compressed_slot)) + if (TupIsNull(compressed_slot)) + { + /* All values are processed. */ + vector_agg_state->input_ended = true; + break; + } + + compressed_batch_set_compressed_tuple(dcontext, batch_state, compressed_slot); + } + + if (vector_agg_state->input_ended) { - /* All values are processed. */ - vector_agg_state->input_ended = true; - return NULL; + break; } - compressed_batch_set_compressed_tuple(dcontext, batch_state, compressed_slot); - } + ArrowArray *arrow = NULL; + CompressionColumnDescription *value_column_description = + &dcontext->template_columns[def->column]; + if (value_column_description->type == COMPRESSED_COLUMN) + { + Assert(dcontext->enable_bulk_decompression); + Assert(value_column_description->bulk_decompression_supported); + CompressedColumnValues *values = &batch_state->compressed_columns[def->column]; + Assert(values->decompression_type != DT_Invalid); + arrow = values->arrow; + } + else + { + Assert(value_column_description->type == SEGMENTBY_COLUMN); + } - ArrowArray *arrow = NULL; - if (value_column_description->type == COMPRESSED_COLUMN) - { - Assert(dcontext->enable_bulk_decompression); - Assert(value_column_description->bulk_decompression_supported); - CompressedColumnValues *values = - &batch_state->compressed_columns[def->column]; - Assert(values->decompression_type != DT_Invalid); - arrow = values->arrow; - } - else - { - Assert(value_column_description->type == SEGMENTBY_COLUMN); - } + if (arrow == NULL) + { + /* + * To calculate the sum for a segment by value or default compressed + * column value, we need to multiply this value with the number of + * passing decompressed tuples in this batch. + */ + int n = batch_state->total_batch_rows; + if (batch_state->vector_qual_result) + { + n = arrow_num_valid(batch_state->vector_qual_result, n); + Assert(n > 0); + } - if (arrow == NULL) - { - /* - * To calculate the sum for a segment by value or default compressed - * column value, we need to multiply this value with the number of - * passing decompressed tuples in this batch. - */ - int n = batch_state->total_batch_rows; - if (batch_state->vector_qual_result) + int offs = AttrNumberGetAttrOffset(value_column_description->output_attno); + def->func->agg_const(vector_agg_state->agg_states, + batch_state->decompressed_scan_slot_data.base.tts_values[offs], + batch_state->decompressed_scan_slot_data.base.tts_isnull[offs], + n); + } + else { - n = arrow_num_valid(batch_state->vector_qual_result, n); - Assert(n > 0); + def->func->agg_vector(vector_agg_state->agg_states, + arrow, + batch_state->vector_qual_result); } - int offs = AttrNumberGetAttrOffset(value_column_description->output_attno); - def->func->agg_const(vector_agg_state->agg_states, - batch_state->decompressed_scan_slot_data.base.tts_values[offs], - batch_state->decompressed_scan_slot_data.base.tts_isnull[offs], - n); - } - else - { - def->func->agg_vector(vector_agg_state->agg_states, arrow, batch_state->vector_qual_result); + compressed_batch_discard_tuples(batch_state); } - compressed_batch_discard_tuples(batch_state); - def->func->agg_emit(vector_agg_state->agg_states, &aggregated_slot->tts_values[0], &aggregated_slot->tts_isnull[0]); From f7d70a706d41f1a71ad4b8925be4ec8f3bab91d1 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 10 Apr 2024 10:39:22 +0200 Subject: [PATCH 59/81] vectorized aggregation for count(*) --- tsl/src/nodes/vector_agg/exec.c | 124 +++++++++++-------- tsl/src/nodes/vector_agg/functions.c | 36 ++++++ tsl/src/nodes/vector_agg/plan.c | 68 ++++++---- tsl/test/expected/compression.out | 2 +- tsl/test/expected/vector_agg_default.out | 33 +++++ tsl/test/expected/vectorized_aggregation.out | 85 ++++++++++++- tsl/test/sql/vector_agg_default.sql | 11 ++ tsl/test/sql/vectorized_aggregation.sql | 8 +- 8 files changed, 286 insertions(+), 81 deletions(-) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index e3ea8edcb26..6955648c4ae 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -53,35 +53,43 @@ vector_agg_begin(CustomScanState *node, EState *estate, int eflags) Assert(IsA(tlentry->expr, Aggref)); Aggref *aggref = castNode(Aggref, tlentry->expr); - Assert(list_length(aggref->args) == 1); + VectorAggDef *def = palloc0(sizeof(VectorAggDef)); + VectorAggFunctions *func = get_vector_aggregate(aggref->aggfnoid); + Assert(func != NULL); + def->func = func; + + if (list_length(aggref->args) > 0) + { + Assert(list_length(aggref->args) == 1); - /* The aggregate should be a partial aggregate */ - Assert(aggref->aggsplit == AGGSPLIT_INITIAL_SERIAL); + /* The aggregate should be a partial aggregate */ + Assert(aggref->aggsplit == AGGSPLIT_INITIAL_SERIAL); - Var *var = castNode(Var, castNode(TargetEntry, linitial(aggref->args))->expr); + Var *var = castNode(Var, castNode(TargetEntry, linitial(aggref->args))->expr); - DecompressContext *dcontext = &decompress_state->decompress_context; + DecompressContext *dcontext = &decompress_state->decompress_context; - CompressionColumnDescription *value_column_description = NULL; - for (int i = 0; i < dcontext->num_total_columns; i++) - { - CompressionColumnDescription *current_column = &dcontext->template_columns[i]; - if (current_column->output_attno == var->varattno) + CompressionColumnDescription *value_column_description = NULL; + for (int i = 0; i < dcontext->num_total_columns; i++) { - value_column_description = current_column; - break; + CompressionColumnDescription *current_column = &dcontext->template_columns[i]; + if (current_column->output_attno == var->varattno) + { + value_column_description = current_column; + break; + } } - } - Ensure(value_column_description != NULL, "aggregated compressed column not found"); + Ensure(value_column_description != NULL, "aggregated compressed column not found"); - Assert(value_column_description->type == COMPRESSED_COLUMN || - value_column_description->type == SEGMENTBY_COLUMN); + Assert(value_column_description->type == COMPRESSED_COLUMN || + value_column_description->type == SEGMENTBY_COLUMN); - VectorAggDef *def = palloc(sizeof(VectorAggDef)); - VectorAggFunctions *func = get_vector_aggregate(aggref->aggfnoid); - Assert(func != NULL); - def->func = func; - def->column = value_column_description - dcontext->template_columns; + def->column = value_column_description - dcontext->template_columns; + } + else + { + def->column = -1; + } vector_agg_state->agg_defs = lappend(vector_agg_state->agg_defs, def); @@ -164,47 +172,57 @@ vector_agg_exec(CustomScanState *node) break; } - ArrowArray *arrow = NULL; - CompressionColumnDescription *value_column_description = - &dcontext->template_columns[def->column]; - if (value_column_description->type == COMPRESSED_COLUMN) - { - Assert(dcontext->enable_bulk_decompression); - Assert(value_column_description->bulk_decompression_supported); - CompressedColumnValues *values = &batch_state->compressed_columns[def->column]; - Assert(values->decompression_type != DT_Invalid); - arrow = values->arrow; - } - else + /* + * To calculate the sum for a segment by value or default compressed + * column value, we need to multiply this value with the number of + * passing decompressed tuples in this batch. + */ + int n = batch_state->total_batch_rows; + if (batch_state->vector_qual_result) { - Assert(value_column_description->type == SEGMENTBY_COLUMN); + n = arrow_num_valid(batch_state->vector_qual_result, n); + Assert(n > 0); } - if (arrow == NULL) + if (def->column >= 0) { - /* - * To calculate the sum for a segment by value or default compressed - * column value, we need to multiply this value with the number of - * passing decompressed tuples in this batch. - */ - int n = batch_state->total_batch_rows; - if (batch_state->vector_qual_result) + ArrowArray *arrow = NULL; + CompressionColumnDescription *value_column_description = + &dcontext->template_columns[def->column]; + if (value_column_description->type == COMPRESSED_COLUMN) { - n = arrow_num_valid(batch_state->vector_qual_result, n); - Assert(n > 0); + Assert(dcontext->enable_bulk_decompression); + Assert(value_column_description->bulk_decompression_supported); + CompressedColumnValues *values = &batch_state->compressed_columns[def->column]; + Assert(values->decompression_type != DT_Invalid); + Assert(values->decompression_type != DT_Iterator); + arrow = values->arrow; + } + else + { + Assert(value_column_description->type == SEGMENTBY_COLUMN); + } + if (arrow == NULL) + { + const int offs = AttrNumberGetAttrOffset(value_column_description->output_attno); + const Datum value = batch_state->decompressed_scan_slot_data.base.tts_values[offs]; + const bool is_null = batch_state->decompressed_scan_slot_data.base.tts_isnull[offs]; + def->func->agg_const(vector_agg_state->agg_states, value, is_null, n); + } + else + { + def->func->agg_vector(vector_agg_state->agg_states, + arrow, + batch_state->vector_qual_result); } - - int offs = AttrNumberGetAttrOffset(value_column_description->output_attno); - def->func->agg_const(vector_agg_state->agg_states, - batch_state->decompressed_scan_slot_data.base.tts_values[offs], - batch_state->decompressed_scan_slot_data.base.tts_isnull[offs], - n); } else { - def->func->agg_vector(vector_agg_state->agg_states, - arrow, - batch_state->vector_qual_result); + /* + * We have only one function w/o arguments -- count(*). Unfortunately + * it has to have a special code path everywhere. + */ + def->func->agg_const(vector_agg_state->agg_states, 0, true, n); } compressed_batch_discard_tuples(batch_state); diff --git a/tsl/src/nodes/vector_agg/functions.c b/tsl/src/nodes/vector_agg/functions.c index 70ef775b6cd..f2ff67ed1dc 100644 --- a/tsl/src/nodes/vector_agg/functions.c +++ b/tsl/src/nodes/vector_agg/functions.c @@ -132,6 +132,40 @@ static VectorAggFunctions int4_sum_agg = { .agg_emit = int4_sum_emit, }; +typedef struct +{ + int64 count; +} CountState; + +static void +count_init(void *agg_state) +{ + CountState *state = (CountState *) agg_state; + state->count = 0; +} + +static void +count_emit(void *agg_state, Datum *out_result, bool *out_isnull) +{ + CountState *state = (CountState *) agg_state; + *out_result = state->count; + *out_isnull = false; +} + +static void +count_star_const(void *agg_state, Datum constvalue, bool constisnull, int n) +{ + CountState *state = (CountState *) agg_state; + state->count += n; +} + +VectorAggFunctions count_star_agg = { + .state_bytes = sizeof(CountState), + .agg_init = count_init, + .agg_const = count_star_const, + .agg_emit = count_emit, +}; + VectorAggFunctions * get_vector_aggregate(Oid aggfnoid) { @@ -139,6 +173,8 @@ get_vector_aggregate(Oid aggfnoid) { case F_SUM_INT4: return &int4_sum_agg; + case F_COUNT_: + return &count_star_agg; default: return NULL; } diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 28514b47dfa..24c43d03c7d 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -204,66 +204,67 @@ try_insert_vector_agg_node(Plan *plan) return plan; } - if (agg->plan.lefttree == NULL) + if (agg->numCols != 0) { - /* - * Not sure what this would mean, but check for it just to be on the - * safe side because we can effectively see any possible plan here. - */ + /* No GROUP BY support for now. */ return plan; } - if (!IsA(agg->plan.lefttree, CustomScan)) + if (agg->groupingSets != NIL) { - /* - * Should have a Custom Scan under aggregation. - */ + /* No GROUPING SETS support. */ return plan; } - CustomScan *custom = castNode(CustomScan, agg->plan.lefttree); - if (strcmp(custom->methods->CustomName, "DecompressChunk") != 0) + if (agg->plan.qual != NIL) { /* - * It should be our DecompressChunk node. + * No HAVING support. Probably we can't have it in this node in any case, + * because we only replace the partial aggregation nodes which can't + * check the HAVING clause. */ return plan; } - if (custom->scan.plan.qual != NIL) + if (list_length(agg->plan.targetlist) != 1) { - /* Can't do vectorized aggregation if we have Postgres quals. */ + /* We currently handle only one agg function per node. */ return plan; } - if (agg->numCols != 0) + if (agg->plan.lefttree == NULL) { - /* No GROUP BY support for now. */ + /* + * Not sure what this would mean, but check for it just to be on the + * safe side because we can effectively see any possible plan here. + */ return plan; } - if (agg->groupingSets != NIL) + if (!IsA(agg->plan.lefttree, CustomScan)) { - /* No GROUPING SETS support. */ + /* + * Should have a Custom Scan under aggregation. + */ return plan; } - if (agg->plan.qual != NIL) + CustomScan *custom = castNode(CustomScan, agg->plan.lefttree); + if (strcmp(custom->methods->CustomName, "DecompressChunk") != 0) { /* - * No HAVING support. Probably we can't have it in this node in any case, - * because we only replace the partial aggregation nodes which can't - * check the HAVING clause. + * It should be our DecompressChunk node. */ return plan; } - if (list_length(agg->plan.targetlist) != 1) + if (custom->scan.plan.qual != NIL) { - /* We currently handle only one agg function per node. */ + /* Can't do vectorized aggregation if we have Postgres quals. */ return plan; } + /* Now check the aggregate function itself. */ Node *expr_node = (Node *) castNode(TargetEntry, linitial(agg->plan.targetlist))->expr; Assert(IsA(expr_node, Aggref)); @@ -277,15 +278,28 @@ try_insert_vector_agg_node(Plan *plan) if (get_vector_aggregate(aggref->aggfnoid) == NULL) { + /* + * We don't have a vectorized implementation for this particular + * aggregate function. + */ return plan; } + if (aggref->args == NIL) + { + /* This must be count(*), we can vectorize it. */ + return vector_agg_plan_create(agg, custom); + } + + /* The function must have one argument, check it. */ + Assert(list_length(aggref->args) == 1); TargetEntry *argument = castNode(TargetEntry, linitial(aggref->args)); if (!IsA(argument->expr, Var)) { /* Can aggregate only a bare decompressed column, not an expression. */ return plan; } + Var *aggregated_var = castNode(Var, argument->expr); /* @@ -328,7 +342,11 @@ try_insert_vector_agg_node(Plan *plan) const bool bulk_decompression_enabled_for_column = list_nth_int(bulk_decompression_column, compressed_column_index); - /* Bulk decompression can also be disabled globally. */ + /* + * Bulk decompression can be disabled for all columns in the DecompressChunk + * node settings, we can't do vectorized aggregation for compressed columns + * in that case. For segmentby columns it's still possible. + */ List *settings = linitial(custom->custom_private); const bool bulk_decompression_enabled_globally = list_nth_int(settings, DCS_EnableBulkDecompression); diff --git a/tsl/test/expected/compression.out b/tsl/test/expected/compression.out index bf3a83a4da9..42864f30190 100644 --- a/tsl/test/expected/compression.out +++ b/tsl/test/expected/compression.out @@ -577,7 +577,7 @@ EXPLAIN (COSTS OFF) EXECUTE prep_plan; ---------------------------------------------------------------------- Finalize Aggregate -> Append - -> Partial Aggregate + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_7_16_chunk -> Seq Scan on compress_hyper_8_18_chunk -> Partial Aggregate diff --git a/tsl/test/expected/vector_agg_default.out b/tsl/test/expected/vector_agg_default.out index 58e8185f7e9..3c55bdef1b3 100644 --- a/tsl/test/expected/vector_agg_default.out +++ b/tsl/test/expected/vector_agg_default.out @@ -55,6 +55,8 @@ select sum(c) from dvagg; 17982 (1 row) +---- Uncomment to generate reference. +--set timescaledb.enable_vectorized_aggregation to off; -- Vectorized aggregation should work with vectorized filters. select sum(c) from dvagg where b >= 0; sum @@ -86,6 +88,36 @@ select sum(c) from dvagg where b > 10; (1 row) +select count(*) from dvagg where b >= 0; + count +------- + 1998 +(1 row) + +select count(*) from dvagg where b = 0; + count +------- + 398 +(1 row) + +select count(*) from dvagg where b in (0, 1); + count +------- + 798 +(1 row) + +select count(*) from dvagg where b in (0, 1, 3); + count +------- + 1198 +(1 row) + +select count(*) from dvagg where b > 10; + count +------- + 0 +(1 row) + explain (costs off) select sum(c) from dvagg where b in (0, 1, 3); QUERY PLAN ------------------------------------------------------------------------------- @@ -103,6 +135,7 @@ explain (costs off) select sum(c) from dvagg where b in (0, 1, 3); -> Parallel Seq Scan on compress_hyper_2_4_chunk (12 rows) +reset timescaledb.enable_vectorized_aggregation; -- The runtime chunk exclusion should work. explain (costs off) select sum(c) from dvagg where a < stable_abs(1000); QUERY PLAN diff --git a/tsl/test/expected/vectorized_aggregation.out b/tsl/test/expected/vectorized_aggregation.out index 87a9df6a150..0419f47675f 100644 --- a/tsl/test/expected/vectorized_aggregation.out +++ b/tsl/test/expected/vectorized_aggregation.out @@ -1293,7 +1293,8 @@ SELECT sum(int_value) FROM testtable; (70 rows) RESET timescaledb.enable_vectorized_aggregation; --- Vectorized aggregation NOT possible without bulk decompression +-- Vectorized aggregation without bullk decompression only possible for +-- segmentby columns. SET timescaledb.enable_bulk_decompression = OFF; :EXPLAIN SELECT sum(int_value) FROM testtable; @@ -1371,6 +1372,88 @@ SELECT sum(int_value) FROM testtable; Output: _hyper_1_1_chunk.int_value (70 rows) +:EXPLAIN +SELECT sum(segment_by_value) FROM testtable; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Finalize Aggregate + Output: sum(_hyper_1_1_chunk.segment_by_value) + -> Gather + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk + Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk + Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk + Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk + Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk + Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk + Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk + Output: compress_hyper_2_20_chunk._ts_meta_count, compress_hyper_2_20_chunk._ts_meta_sequence_num, compress_hyper_2_20_chunk.segment_by_value, compress_hyper_2_20_chunk._ts_meta_min_1, compress_hyper_2_20_chunk._ts_meta_max_1, compress_hyper_2_20_chunk."time", compress_hyper_2_20_chunk.int_value, compress_hyper_2_20_chunk.float_value + -> Partial Aggregate + Output: PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value +(70 rows) + +SELECT sum(segment_by_value) FROM testtable; + sum +-------- + 304695 +(1 row) + RESET timescaledb.enable_bulk_decompression; -- Using the same sum function multiple times is supported by vectorization :EXPLAIN diff --git a/tsl/test/sql/vector_agg_default.sql b/tsl/test/sql/vector_agg_default.sql index 71b8ea7ac2a..b8824307dea 100644 --- a/tsl/test/sql/vector_agg_default.sql +++ b/tsl/test/sql/vector_agg_default.sql @@ -23,6 +23,9 @@ explain (costs off) select sum(c) from dvagg; select sum(c) from dvagg; +---- Uncomment to generate reference. +--set timescaledb.enable_vectorized_aggregation to off; + -- Vectorized aggregation should work with vectorized filters. select sum(c) from dvagg where b >= 0; select sum(c) from dvagg where b = 0; @@ -30,8 +33,16 @@ select sum(c) from dvagg where b in (0, 1); select sum(c) from dvagg where b in (0, 1, 3); select sum(c) from dvagg where b > 10; +select count(*) from dvagg where b >= 0; +select count(*) from dvagg where b = 0; +select count(*) from dvagg where b in (0, 1); +select count(*) from dvagg where b in (0, 1, 3); +select count(*) from dvagg where b > 10; + explain (costs off) select sum(c) from dvagg where b in (0, 1, 3); +reset timescaledb.enable_vectorized_aggregation; + -- The runtime chunk exclusion should work. explain (costs off) select sum(c) from dvagg where a < stable_abs(1000); diff --git a/tsl/test/sql/vectorized_aggregation.sql b/tsl/test/sql/vectorized_aggregation.sql index 6ab98169f87..d195be9fd62 100644 --- a/tsl/test/sql/vectorized_aggregation.sql +++ b/tsl/test/sql/vectorized_aggregation.sql @@ -132,12 +132,18 @@ SELECT sum(int_value) FROM testtable; RESET timescaledb.enable_vectorized_aggregation; --- Vectorized aggregation NOT possible without bulk decompression +-- Vectorized aggregation without bullk decompression only possible for +-- segmentby columns. SET timescaledb.enable_bulk_decompression = OFF; :EXPLAIN SELECT sum(int_value) FROM testtable; +:EXPLAIN +SELECT sum(segment_by_value) FROM testtable; + +SELECT sum(segment_by_value) FROM testtable; + RESET timescaledb.enable_bulk_decompression; -- Using the same sum function multiple times is supported by vectorization From f5aa9069804f804c4f728598bd444450600a600e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 10 Apr 2024 12:10:01 +0200 Subject: [PATCH 60/81] Add segmentby columns to the list of data columns as well --- .../nodes/decompress_chunk/compressed_batch.c | 41 +++++++------ .../nodes/decompress_chunk/compressed_batch.h | 6 ++ .../decompress_chunk/decompress_context.h | 20 ++++++- tsl/src/nodes/decompress_chunk/exec.c | 22 +++---- tsl/src/nodes/vector_agg/exec.c | 11 ++-- .../expected/transparent_decompression-15.out | 60 +++++++++---------- 6 files changed, 92 insertions(+), 68 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index c25c156a1fd..9970f6d0220 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -159,7 +159,7 @@ static void decompress_column(DecompressContext *dcontext, DecompressBatchState *batch_state, TupleTableSlot *compressed_slot, int i) { - CompressionColumnDescription *column_description = &dcontext->template_columns[i]; + CompressionColumnDescription *column_description = &dcontext->compressed_chunk_columns[i]; CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; column_values->arrow = NULL; const AttrNumber attr = AttrNumberGetAttrOffset(column_description->output_attno); @@ -396,22 +396,21 @@ compute_plain_qual(DecompressContext *dcontext, DecompressBatchState *batch_stat Var *var = castNode(Var, linitial(args)); CompressionColumnDescription *column_description = NULL; int column_index = 0; - for (; column_index < dcontext->num_total_columns; column_index++) + for (; column_index < dcontext->num_data_columns; column_index++) { - column_description = &dcontext->template_columns[column_index]; + column_description = &dcontext->compressed_chunk_columns[column_index]; if (column_description->output_attno == var->varattno) { break; } } - Ensure(column_index < dcontext->num_total_columns, + Ensure(column_index < dcontext->num_data_columns, "decompressed column %d not found in batch", var->varattno); Assert(column_description != NULL); Assert(column_description->typid == var->vartype); Ensure(column_description->type == COMPRESSED_COLUMN, "only compressed columns are supported in vectorized quals"); - Assert(column_index < dcontext->num_compressed_columns); CompressedColumnValues *column_values = &batch_state->compressed_columns[column_index]; @@ -804,20 +803,20 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, MemoryContextReset(batch_state->per_batch_context); - for (int i = 0; i < dcontext->num_total_columns; i++) + for (int i = 0; i < dcontext->num_columns_with_metadata; i++) { - CompressionColumnDescription *column_description = &dcontext->template_columns[i]; + CompressionColumnDescription *column_description = &dcontext->compressed_chunk_columns[i]; switch (column_description->type) { case COMPRESSED_COLUMN: { - Assert(i < dcontext->num_compressed_columns); /* * We decompress the compressed columns on demand, so that we can * skip decompressing some columns if the entire batch doesn't pass * the quals. Skip them for now. */ + Assert(i < dcontext->num_data_columns); CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; column_values->decompression_type = DT_Invalid; column_values->arrow = NULL; @@ -836,6 +835,10 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, column_description->compressed_scan_attno, &decompressed_tuple->tts_isnull[attr]); + Assert(i < dcontext->num_data_columns); + CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; + column_values->decompression_type = DT_Default; + /* * Note that if it's not a by-value type, we should copy it into * the slot context. @@ -923,8 +926,8 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, * We have some rows in the batch that pass the vectorized filters, so * we have to decompress the rest of the compressed columns. */ - const int num_compressed_columns = dcontext->num_compressed_columns; - for (int i = 0; i < num_compressed_columns; i++) + const int num_data_columns = dcontext->num_data_columns; + for (int i = 0; i < num_data_columns; i++) { CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; if (column_values->decompression_type == DT_Invalid) @@ -965,14 +968,14 @@ store_text_datum(CompressedColumnValues *column_values, int arrow_row) * Doesn't check the quals. */ static void -make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_compressed_columns) +make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_data_columns) { TupleTableSlot *decompressed_scan_slot = &batch_state->decompressed_scan_slot_data.base; Assert(batch_state->total_batch_rows > 0); Assert(batch_state->next_batch_row < batch_state->total_batch_rows); - for (int i = 0; i < num_compressed_columns; i++) + for (int i = 0; i < num_data_columns; i++) { CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; if (column_values->decompression_type == DT_Iterator) @@ -1101,7 +1104,7 @@ compressed_batch_advance(DecompressContext *dcontext, DecompressBatchState *batc TupleTableSlot *decompressed_scan_slot = &batch_state->decompressed_scan_slot_data.base; const bool reverse = dcontext->reverse; - const int num_compressed_columns = dcontext->num_compressed_columns; + const int num_data_columns = dcontext->num_data_columns; for (; batch_state->next_batch_row < batch_state->total_batch_rows; batch_state->next_batch_row++) @@ -1116,7 +1119,7 @@ compressed_batch_advance(DecompressContext *dcontext, DecompressBatchState *batc * This row doesn't pass the vectorized quals. Advance the iterated * compressed columns if we have any. */ - for (int i = 0; i < num_compressed_columns; i++) + for (int i = 0; i < num_data_columns; i++) { CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; if (column_values->decompression_type == DT_Iterator) @@ -1131,7 +1134,7 @@ compressed_batch_advance(DecompressContext *dcontext, DecompressBatchState *batc continue; } - make_next_tuple(batch_state, arrow_row, num_compressed_columns); + make_next_tuple(batch_state, arrow_row, num_data_columns); if (!postgres_qual(dcontext, batch_state)) { @@ -1153,7 +1156,7 @@ compressed_batch_advance(DecompressContext *dcontext, DecompressBatchState *batc * row-by-row have also ended. */ Assert(batch_state->next_batch_row == batch_state->total_batch_rows); - for (int i = 0; i < num_compressed_columns; i++) + for (int i = 0; i < num_data_columns; i++) { CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; if (column_values->decompression_type == DT_Iterator) @@ -1191,8 +1194,8 @@ compressed_batch_save_first_tuple(DecompressContext *dcontext, DecompressBatchSt * vectorized decompression is disabled with sorted merge. */ #ifdef USE_ASSERT_CHECKING - const int num_compressed_columns = dcontext->num_compressed_columns; - for (int i = 0; i < num_compressed_columns; i++) + const int num_data_columns = dcontext->num_data_columns; + for (int i = 0; i < num_data_columns; i++) { CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; Assert(column_values->decompression_type != DT_Invalid); @@ -1202,7 +1205,7 @@ compressed_batch_save_first_tuple(DecompressContext *dcontext, DecompressBatchSt /* Make the first tuple and save it. */ Assert(batch_state->next_batch_row == 0); const uint16 arrow_row = dcontext->reverse ? batch_state->total_batch_rows - 1 : 0; - make_next_tuple(batch_state, arrow_row, dcontext->num_compressed_columns); + make_next_tuple(batch_state, arrow_row, dcontext->num_data_columns); ExecCopySlot(first_tuple_slot, &batch_state->decompressed_scan_slot_data.base); /* diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.h b/tsl/src/nodes/decompress_chunk/compressed_batch.h index 486f3e9c637..e70142a82fd 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.h +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.h @@ -93,6 +93,12 @@ typedef struct DecompressBatchState */ uint64 *restrict vector_qual_result; + /* + * This follows DecompressContext.compressed_chunk_columns, but does not + * include the trailing metadata columns, but only the leading data columns. + * These columns are compressed and segmentby columns, their total number is + * given by DecompressContext.num_data_columns. + */ CompressedColumnValues compressed_columns[FLEXIBLE_ARRAY_MEMBER]; } DecompressBatchState; diff --git a/tsl/src/nodes/decompress_chunk/decompress_context.h b/tsl/src/nodes/decompress_chunk/decompress_context.h index 0d084ace1ff..fd9d0855bc2 100644 --- a/tsl/src/nodes/decompress_chunk/decompress_context.h +++ b/tsl/src/nodes/decompress_chunk/decompress_context.h @@ -48,9 +48,23 @@ typedef struct CompressionColumnDescription typedef struct DecompressContext { - CompressionColumnDescription *template_columns; - int num_total_columns; - int num_compressed_columns; + /* + * Note that this array contains only those columns that are decompressed + * (output_attno != 0), and the order is different from the compressed chunk + * tuple order: first go the actual data columns, and after that the metadata + * columns. + */ + CompressionColumnDescription *compressed_chunk_columns; + + /* + * This includes all decompressed columns (output_attno != 0), including the + * metadata columns. + */ + int num_columns_with_metadata; + + /* This excludes the metadata columns. */ + int num_data_columns; + List *vectorized_quals_constified; bool reverse; bool batch_sorted_merge; /* Merge append optimization enabled */ diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index f6959b65689..963be8d7a1b 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -219,10 +219,8 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) node->custom_ps = lappend(node->custom_ps, ExecInitNode(compressed_scan, estate, eflags)); /* - * Determine which columns we are going to decompress. Since in the hottest - * loop we work only with compressed columns, we'll put them in front of the - * array. So first, count how many compressed and not compressed columns - * we have. + * Count the actual data columns we have to decompress, skipping the + * metadata columns. */ int num_compressed = 0; int num_total = 0; @@ -242,7 +240,7 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) continue; } - if (output_attno > 0 && !lfirst_int(is_segmentby_cell)) + if (output_attno > 0) { /* * Not a metadata column and not a segmentby column, hence a @@ -255,9 +253,9 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) } Assert(num_compressed <= num_total); - dcontext->num_compressed_columns = num_compressed; - dcontext->num_total_columns = num_total; - dcontext->template_columns = palloc0(sizeof(CompressionColumnDescription) * num_total); + dcontext->num_data_columns = num_compressed; + dcontext->num_columns_with_metadata = num_total; + dcontext->compressed_chunk_columns = palloc0(sizeof(CompressionColumnDescription) * num_total); dcontext->decompressed_slot = node->ss.ss_ScanTupleSlot; dcontext->ps = &node->ss.ps; @@ -316,15 +314,17 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) } } - if (column.type == COMPRESSED_COLUMN) + if (column.output_attno > 0) { + /* Data column. */ Assert(current_compressed < num_total); - dcontext->template_columns[current_compressed++] = column; + dcontext->compressed_chunk_columns[current_compressed++] = column; } else { + /* Metadata column. */ Assert(current_not_compressed < num_total); - dcontext->template_columns[current_not_compressed++] = column; + dcontext->compressed_chunk_columns[current_not_compressed++] = column; } } diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index 6955648c4ae..ca9c4679c10 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -70,9 +70,10 @@ vector_agg_begin(CustomScanState *node, EState *estate, int eflags) DecompressContext *dcontext = &decompress_state->decompress_context; CompressionColumnDescription *value_column_description = NULL; - for (int i = 0; i < dcontext->num_total_columns; i++) + for (int i = 0; i < dcontext->num_data_columns; i++) { - CompressionColumnDescription *current_column = &dcontext->template_columns[i]; + CompressionColumnDescription *current_column = + &dcontext->compressed_chunk_columns[i]; if (current_column->output_attno == var->varattno) { value_column_description = current_column; @@ -84,7 +85,7 @@ vector_agg_begin(CustomScanState *node, EState *estate, int eflags) Assert(value_column_description->type == COMPRESSED_COLUMN || value_column_description->type == SEGMENTBY_COLUMN); - def->column = value_column_description - dcontext->template_columns; + def->column = value_column_description - dcontext->compressed_chunk_columns; } else { @@ -188,7 +189,7 @@ vector_agg_exec(CustomScanState *node) { ArrowArray *arrow = NULL; CompressionColumnDescription *value_column_description = - &dcontext->template_columns[def->column]; + &dcontext->compressed_chunk_columns[def->column]; if (value_column_description->type == COMPRESSED_COLUMN) { Assert(dcontext->enable_bulk_decompression); @@ -218,7 +219,7 @@ vector_agg_exec(CustomScanState *node) } else { - /* + /* * We have only one function w/o arguments -- count(*). Unfortunately * it has to have a special code path everywhere. */ diff --git a/tsl/test/expected/transparent_decompression-15.out b/tsl/test/expected/transparent_decompression-15.out index e90d5143aaa..670b6690595 100644 --- a/tsl/test/expected/transparent_decompression-15.out +++ b/tsl/test/expected/transparent_decompression-15.out @@ -1635,17 +1635,17 @@ ORDER BY time, :PREFIX SELECT count(*) FROM :TEST_TABLE; - QUERY PLAN ------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------- Finalize Aggregate (actual rows=1 loops=1) -> Append (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) (10 rows) @@ -2016,9 +2016,9 @@ WHERE device_id = 1; Finalize Aggregate (actual rows=1 loops=1) Output: count(*) -> Append (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) Output: compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3 @@ -2028,9 +2028,9 @@ WHERE device_id = 1; -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=504 loops=1) Filter: (_hyper_1_2_chunk.device_id = 1) Rows Removed by Filter: 2016 - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) Output: compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3 @@ -5781,18 +5781,18 @@ ORDER BY time, :PREFIX SELECT count(*) FROM :TEST_TABLE; - QUERY PLAN -------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------- Finalize Aggregate (actual rows=1 loops=1) -> Append (actual rows=9 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (never executed) -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (never executed) -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (never executed) -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) @@ -5800,11 +5800,11 @@ FROM :TEST_TABLE; -> Seq Scan on _hyper_2_8_chunk (actual rows=1512 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_2_9_chunk (actual rows=504 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_2_12_chunk (actual rows=504 loops=1) @@ -6257,9 +6257,9 @@ WHERE device_id = 1; Finalize Aggregate (actual rows=1 loops=1) Output: count(*) -> Append (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) Output: compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3 @@ -6269,9 +6269,9 @@ WHERE device_id = 1; -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) Index Cond: (_hyper_2_7_chunk.device_id = 1) Heap Fetches: 504 - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) Output: compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3 From 4568d625695b2f305352e44a819a88f01e1cf349 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 10 Apr 2024 12:25:26 +0200 Subject: [PATCH 61/81] scalar --- tsl/src/nodes/decompress_chunk/compressed_batch.c | 10 +++++----- tsl/src/nodes/decompress_chunk/compressed_batch.h | 14 ++++++++++++-- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index 3407b45f125..8865b977e33 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -177,7 +177,7 @@ decompress_column(DecompressContext *dcontext, DecompressBatchState *batch_state * The column will have a default value for the entire batch, * set it now. */ - column_values->decompression_type = DT_Default; + column_values->decompression_type = DT_Scalar; *column_values->output_value = getmissingattr(dcontext->decompressed_slot->tts_tupleDescriptor, @@ -443,7 +443,7 @@ compute_plain_qual(DecompressContext *dcontext, DecompressBatchState *batch_stat * with this default value, check if it passes the predicate, and apply * it to the entire batch. */ - Assert(column_values->decompression_type == DT_Default); + Assert(column_values->decompression_type == DT_Scalar); /* * We saved the actual default value into the decompressed scan slot @@ -547,7 +547,7 @@ compute_plain_qual(DecompressContext *dcontext, DecompressBatchState *batch_stat /* Translate the result if the column had a default value. */ if (column_values->arrow == NULL) { - Assert(column_values->decompression_type == DT_Default); + Assert(column_values->decompression_type == DT_Scalar); if (!(default_value_predicate_result[0] & 1)) { /* @@ -837,7 +837,7 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, Assert(i < dcontext->num_data_columns); CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; - column_values->decompression_type = DT_Default; + column_values->decompression_type = DT_Scalar; /* * Note that if it's not a by-value type, we should copy it into @@ -1038,7 +1038,7 @@ make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_dat else { /* A compressed column with default value, do nothing. */ - Assert(column_values->decompression_type == DT_Default); + Assert(column_values->decompression_type == DT_Scalar); } } diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.h b/tsl/src/nodes/decompress_chunk/compressed_batch.h index e70142a82fd..b4e93d1d8a8 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.h +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.h @@ -14,13 +14,23 @@ typedef struct ArrowArray ArrowArray; typedef enum { DT_ArrowTextDict = -4, + DT_ArrowText = -3, - DT_Default = -2, + + /* + * The decompressed value is already in the decompressed slot. This is used + * for segmentby and compressed columns with default value in batch. + */ + DT_Scalar = -2, + DT_Iterator = -1, + DT_Invalid = 0, + /* * Any positive number is also valid for the decompression type. It means - * arrow array of a fixed-size by-value type, with size given by the number. + * arrow array of a fixed-size by-value type, with size in bytes given by + * the number. */ } DecompressionType; From f37fa9394f853699c3ccd39400cd29e982af5da3 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 10 Apr 2024 12:37:40 +0200 Subject: [PATCH 62/81] fixes --- src/compat/compat.h | 1 + tsl/test/expected/transparent_decompression-13.out | 12 ++++++------ tsl/test/expected/transparent_decompression-14.out | 12 ++++++------ tsl/test/expected/transparent_decompression-16.out | 12 ++++++------ 4 files changed, 19 insertions(+), 18 deletions(-) diff --git a/src/compat/compat.h b/src/compat/compat.h index 2760a15bb74..5aa9a9e689c 100644 --- a/src/compat/compat.h +++ b/src/compat/compat.h @@ -972,6 +972,7 @@ object_ownercheck(Oid classid, Oid objectid, Oid roleid) #if PG14_LT #define F_SUM_INT4 2108 +#define F_COUNT_ 2803 #endif /* diff --git a/tsl/test/expected/transparent_decompression-13.out b/tsl/test/expected/transparent_decompression-13.out index 77501fd6c3b..82b05d887f8 100644 --- a/tsl/test/expected/transparent_decompression-13.out +++ b/tsl/test/expected/transparent_decompression-13.out @@ -2015,9 +2015,9 @@ WHERE device_id = 1; Finalize Aggregate (actual rows=1 loops=1) Output: count(*) -> Append (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) Output: compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3 @@ -2027,9 +2027,9 @@ WHERE device_id = 1; -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=504 loops=1) Filter: (_hyper_1_2_chunk.device_id = 1) Rows Removed by Filter: 2016 - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) Output: compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3 diff --git a/tsl/test/expected/transparent_decompression-14.out b/tsl/test/expected/transparent_decompression-14.out index 077c64210cb..b7a5e886e3d 100644 --- a/tsl/test/expected/transparent_decompression-14.out +++ b/tsl/test/expected/transparent_decompression-14.out @@ -2015,9 +2015,9 @@ WHERE device_id = 1; Finalize Aggregate (actual rows=1 loops=1) Output: count(*) -> Append (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) Output: compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3 @@ -2027,9 +2027,9 @@ WHERE device_id = 1; -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=504 loops=1) Filter: (_hyper_1_2_chunk.device_id = 1) Rows Removed by Filter: 2016 - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) Output: compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3 diff --git a/tsl/test/expected/transparent_decompression-16.out b/tsl/test/expected/transparent_decompression-16.out index 0e56618f5a0..354463b6566 100644 --- a/tsl/test/expected/transparent_decompression-16.out +++ b/tsl/test/expected/transparent_decompression-16.out @@ -2016,9 +2016,9 @@ WHERE device_id = 1; Finalize Aggregate (actual rows=1 loops=1) Output: count(*) -> Append (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_5_15_chunk (actual rows=1 loops=1) Output: compress_hyper_5_15_chunk._ts_meta_count, compress_hyper_5_15_chunk._ts_meta_sequence_num, compress_hyper_5_15_chunk.device_id, compress_hyper_5_15_chunk.device_id_peer, compress_hyper_5_15_chunk._ts_meta_min_3, compress_hyper_5_15_chunk._ts_meta_max_3, compress_hyper_5_15_chunk."time", compress_hyper_5_15_chunk._ts_meta_min_1, compress_hyper_5_15_chunk._ts_meta_max_1, compress_hyper_5_15_chunk.v0, compress_hyper_5_15_chunk._ts_meta_min_2, compress_hyper_5_15_chunk._ts_meta_max_2, compress_hyper_5_15_chunk.v1, compress_hyper_5_15_chunk.v2, compress_hyper_5_15_chunk.v3 @@ -2028,9 +2028,9 @@ WHERE device_id = 1; -> Seq Scan on _timescaledb_internal._hyper_1_2_chunk (actual rows=504 loops=1) Filter: (_hyper_1_2_chunk.device_id = 1) Rows Removed by Filter: 2016 - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_5_16_chunk (actual rows=1 loops=1) Output: compress_hyper_5_16_chunk._ts_meta_count, compress_hyper_5_16_chunk._ts_meta_sequence_num, compress_hyper_5_16_chunk.device_id, compress_hyper_5_16_chunk.device_id_peer, compress_hyper_5_16_chunk._ts_meta_min_3, compress_hyper_5_16_chunk._ts_meta_max_3, compress_hyper_5_16_chunk."time", compress_hyper_5_16_chunk._ts_meta_min_1, compress_hyper_5_16_chunk._ts_meta_max_1, compress_hyper_5_16_chunk.v0, compress_hyper_5_16_chunk._ts_meta_min_2, compress_hyper_5_16_chunk._ts_meta_max_2, compress_hyper_5_16_chunk.v1, compress_hyper_5_16_chunk.v2, compress_hyper_5_16_chunk.v3 From 4ec877cbef63d64fed701ec335311f23d261281e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 10 Apr 2024 14:04:40 +0200 Subject: [PATCH 63/81] ref --- .../expected/transparent_decompression-13.out | 48 +++++++++---------- .../expected/transparent_decompression-14.out | 48 +++++++++---------- .../expected/transparent_decompression-16.out | 48 +++++++++---------- 3 files changed, 72 insertions(+), 72 deletions(-) diff --git a/tsl/test/expected/transparent_decompression-13.out b/tsl/test/expected/transparent_decompression-13.out index 82b05d887f8..87a611e2fad 100644 --- a/tsl/test/expected/transparent_decompression-13.out +++ b/tsl/test/expected/transparent_decompression-13.out @@ -1634,17 +1634,17 @@ ORDER BY time, :PREFIX SELECT count(*) FROM :TEST_TABLE; - QUERY PLAN ------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------- Finalize Aggregate (actual rows=1 loops=1) -> Append (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) (10 rows) @@ -5807,18 +5807,18 @@ ORDER BY time, :PREFIX SELECT count(*) FROM :TEST_TABLE; - QUERY PLAN -------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------- Finalize Aggregate (actual rows=1 loops=1) -> Append (actual rows=9 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (never executed) -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (never executed) -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (never executed) -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) @@ -5826,11 +5826,11 @@ FROM :TEST_TABLE; -> Seq Scan on _hyper_2_8_chunk (actual rows=1512 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_2_9_chunk (actual rows=504 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_2_12_chunk (actual rows=504 loops=1) @@ -6283,9 +6283,9 @@ WHERE device_id = 1; Finalize Aggregate (actual rows=1 loops=1) Output: count(*) -> Append (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) Output: compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3 @@ -6295,9 +6295,9 @@ WHERE device_id = 1; -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) Index Cond: (_hyper_2_7_chunk.device_id = 1) Heap Fetches: 504 - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) Output: compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3 diff --git a/tsl/test/expected/transparent_decompression-14.out b/tsl/test/expected/transparent_decompression-14.out index b7a5e886e3d..4b3adc0f536 100644 --- a/tsl/test/expected/transparent_decompression-14.out +++ b/tsl/test/expected/transparent_decompression-14.out @@ -1634,17 +1634,17 @@ ORDER BY time, :PREFIX SELECT count(*) FROM :TEST_TABLE; - QUERY PLAN ------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------- Finalize Aggregate (actual rows=1 loops=1) -> Append (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) (10 rows) @@ -5807,18 +5807,18 @@ ORDER BY time, :PREFIX SELECT count(*) FROM :TEST_TABLE; - QUERY PLAN -------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------- Finalize Aggregate (actual rows=1 loops=1) -> Append (actual rows=9 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (never executed) -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (never executed) -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (never executed) -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) @@ -5826,11 +5826,11 @@ FROM :TEST_TABLE; -> Seq Scan on _hyper_2_8_chunk (actual rows=1512 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_2_9_chunk (actual rows=504 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_2_12_chunk (actual rows=504 loops=1) @@ -6283,9 +6283,9 @@ WHERE device_id = 1; Finalize Aggregate (actual rows=1 loops=1) Output: count(*) -> Append (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) Output: compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3 @@ -6295,9 +6295,9 @@ WHERE device_id = 1; -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) Index Cond: (_hyper_2_7_chunk.device_id = 1) Heap Fetches: 504 - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) Output: compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3 diff --git a/tsl/test/expected/transparent_decompression-16.out b/tsl/test/expected/transparent_decompression-16.out index 354463b6566..a98a2eece8b 100644 --- a/tsl/test/expected/transparent_decompression-16.out +++ b/tsl/test/expected/transparent_decompression-16.out @@ -1635,17 +1635,17 @@ ORDER BY time, :PREFIX SELECT count(*) FROM :TEST_TABLE; - QUERY PLAN ------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------- Finalize Aggregate (actual rows=1 loops=1) -> Append (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) -> Seq Scan on compress_hyper_5_15_chunk (actual rows=5 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) -> Seq Scan on compress_hyper_5_16_chunk (actual rows=5 loops=1) (10 rows) @@ -5781,18 +5781,18 @@ ORDER BY time, :PREFIX SELECT count(*) FROM :TEST_TABLE; - QUERY PLAN -------------------------------------------------------------------------------------------------- + QUERY PLAN +--------------------------------------------------------------------------------------- Finalize Aggregate (actual rows=1 loops=1) -> Append (actual rows=9 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (never executed) -> Seq Scan on compress_hyper_6_17_chunk (actual rows=1 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (never executed) -> Seq Scan on compress_hyper_6_18_chunk (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (never executed) -> Seq Scan on compress_hyper_6_19_chunk (actual rows=1 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_2_7_chunk (actual rows=504 loops=1) @@ -5800,11 +5800,11 @@ FROM :TEST_TABLE; -> Seq Scan on _hyper_2_8_chunk (actual rows=1512 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_2_9_chunk (actual rows=504 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) -> Seq Scan on compress_hyper_6_20_chunk (actual rows=1 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) -> Seq Scan on compress_hyper_6_21_chunk (actual rows=3 loops=1) -> Partial Aggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_2_12_chunk (actual rows=504 loops=1) @@ -6257,9 +6257,9 @@ WHERE device_id = 1; Finalize Aggregate (actual rows=1 loops=1) Output: count(*) -> Append (actual rows=3 loops=1) - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_4_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_6_17_chunk (actual rows=1 loops=1) Output: compress_hyper_6_17_chunk._ts_meta_count, compress_hyper_6_17_chunk._ts_meta_sequence_num, compress_hyper_6_17_chunk.device_id, compress_hyper_6_17_chunk.device_id_peer, compress_hyper_6_17_chunk._ts_meta_min_3, compress_hyper_6_17_chunk._ts_meta_max_3, compress_hyper_6_17_chunk."time", compress_hyper_6_17_chunk._ts_meta_min_1, compress_hyper_6_17_chunk._ts_meta_max_1, compress_hyper_6_17_chunk.v0, compress_hyper_6_17_chunk._ts_meta_min_2, compress_hyper_6_17_chunk._ts_meta_max_2, compress_hyper_6_17_chunk.v1, compress_hyper_6_17_chunk.v2, compress_hyper_6_17_chunk.v3 @@ -6269,9 +6269,9 @@ WHERE device_id = 1; -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_device_id_peer_v0_v1_2 on _timescaledb_internal._hyper_2_7_chunk (actual rows=504 loops=1) Index Cond: (_hyper_2_7_chunk.device_id = 1) Heap Fetches: 504 - -> Partial Aggregate (actual rows=1 loops=1) - Output: PARTIAL count(*) - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + Output: (PARTIAL count(*)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_2_10_chunk (never executed) Bulk Decompression: false -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on _timescaledb_internal.compress_hyper_6_20_chunk (actual rows=1 loops=1) Output: compress_hyper_6_20_chunk._ts_meta_count, compress_hyper_6_20_chunk._ts_meta_sequence_num, compress_hyper_6_20_chunk.device_id, compress_hyper_6_20_chunk.device_id_peer, compress_hyper_6_20_chunk._ts_meta_min_3, compress_hyper_6_20_chunk._ts_meta_max_3, compress_hyper_6_20_chunk."time", compress_hyper_6_20_chunk._ts_meta_min_1, compress_hyper_6_20_chunk._ts_meta_max_1, compress_hyper_6_20_chunk.v0, compress_hyper_6_20_chunk._ts_meta_min_2, compress_hyper_6_20_chunk._ts_meta_max_2, compress_hyper_6_20_chunk.v1, compress_hyper_6_20_chunk.v2, compress_hyper_6_20_chunk.v3 From c9a3f80c8eb38a1e4532713c53fd63d41f08d06d Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 10 Apr 2024 14:36:30 +0200 Subject: [PATCH 64/81] fix for i386 --- tsl/src/nodes/vector_agg/functions.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsl/src/nodes/vector_agg/functions.c b/tsl/src/nodes/vector_agg/functions.c index f2ff67ed1dc..01abb80344e 100644 --- a/tsl/src/nodes/vector_agg/functions.c +++ b/tsl/src/nodes/vector_agg/functions.c @@ -148,7 +148,7 @@ static void count_emit(void *agg_state, Datum *out_result, bool *out_isnull) { CountState *state = (CountState *) agg_state; - *out_result = state->count; + *out_result = Int64GetDatum(state->count); *out_isnull = false; } From 8d14a1283cc2e33604938dc5a26d269de1ccecf2 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 10 Apr 2024 14:58:57 +0200 Subject: [PATCH 65/81] Treat segmentby columns same as compressed columns with default value This is a minor refactoring that will later allow to simplify the vectorized aggregation code. No functional or performance changes are expected. --- .../nodes/decompress_chunk/compressed_batch.c | 49 ++++++++------- .../nodes/decompress_chunk/compressed_batch.h | 20 ++++++- .../decompress_chunk/decompress_context.h | 20 ++++++- tsl/src/nodes/decompress_chunk/exec.c | 60 ++++++++++--------- 4 files changed, 93 insertions(+), 56 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index af5ae3d5127..e43a6372622 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -158,7 +158,7 @@ get_max_text_datum_size(ArrowArray *text_array) static void decompress_column(DecompressContext *dcontext, DecompressBatchState *batch_state, int i) { - CompressionColumnDescription *column_description = &dcontext->template_columns[i]; + CompressionColumnDescription *column_description = &dcontext->compressed_chunk_columns[i]; CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; column_values->arrow = NULL; const AttrNumber attr = AttrNumberGetAttrOffset(column_description->output_attno); @@ -178,7 +178,7 @@ decompress_column(DecompressContext *dcontext, DecompressBatchState *batch_state * The column will have a default value for the entire batch, * set it now. */ - column_values->decompression_type = DT_Default; + column_values->decompression_type = DT_Scalar; *column_values->output_value = getmissingattr(dcontext->decompressed_slot->tts_tupleDescriptor, @@ -397,22 +397,21 @@ compute_plain_qual(DecompressContext *dcontext, DecompressBatchState *batch_stat Var *var = castNode(Var, linitial(args)); CompressionColumnDescription *column_description = NULL; int column_index = 0; - for (; column_index < dcontext->num_total_columns; column_index++) + for (; column_index < dcontext->num_data_columns; column_index++) { - column_description = &dcontext->template_columns[column_index]; + column_description = &dcontext->compressed_chunk_columns[column_index]; if (column_description->output_attno == var->varattno) { break; } } - Ensure(column_index < dcontext->num_total_columns, + Ensure(column_index < dcontext->num_data_columns, "decompressed column %d not found in batch", var->varattno); Assert(column_description != NULL); Assert(column_description->typid == var->vartype); Ensure(column_description->type == COMPRESSED_COLUMN, "only compressed columns are supported in vectorized quals"); - Assert(column_index < dcontext->num_compressed_columns); CompressedColumnValues *column_values = &batch_state->compressed_columns[column_index]; @@ -445,7 +444,7 @@ compute_plain_qual(DecompressContext *dcontext, DecompressBatchState *batch_stat * with this default value, check if it passes the predicate, and apply * it to the entire batch. */ - Assert(column_values->decompression_type == DT_Default); + Assert(column_values->decompression_type == DT_Scalar); /* * We saved the actual default value into the decompressed scan slot @@ -549,7 +548,7 @@ compute_plain_qual(DecompressContext *dcontext, DecompressBatchState *batch_stat /* Translate the result if the column had a default value. */ if (column_values->arrow == NULL) { - Assert(column_values->decompression_type == DT_Default); + Assert(column_values->decompression_type == DT_Scalar); if (!(default_value_predicate_result[0] & 1)) { /* @@ -821,20 +820,20 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, MemoryContextReset(batch_state->per_batch_context); - for (int i = 0; i < dcontext->num_total_columns; i++) + for (int i = 0; i < dcontext->num_columns_with_metadata; i++) { - CompressionColumnDescription *column_description = &dcontext->template_columns[i]; + CompressionColumnDescription *column_description = &dcontext->compressed_chunk_columns[i]; switch (column_description->type) { case COMPRESSED_COLUMN: { - Assert(i < dcontext->num_compressed_columns); /* * We decompress the compressed columns on demand, so that we can * skip decompressing some columns if the entire batch doesn't pass * the quals. Skip them for now. */ + Assert(i < dcontext->num_data_columns); CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; column_values->decompression_type = DT_Invalid; column_values->arrow = NULL; @@ -852,6 +851,10 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, slot_getattr(batch_state->compressed_slot, column_description->compressed_scan_attno, &decompressed_tuple->tts_isnull[attr]); + + Assert(i < dcontext->num_data_columns); + CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; + column_values->decompression_type = DT_Scalar; break; } case COUNT_COLUMN: @@ -911,8 +914,8 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, * We have some rows in the batch that pass the vectorized filters, so * we have to decompress the rest of the compressed columns. */ - const int num_compressed_columns = dcontext->num_compressed_columns; - for (int i = 0; i < num_compressed_columns; i++) + const int num_data_columns = dcontext->num_data_columns; + for (int i = 0; i < num_data_columns; i++) { CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; if (column_values->decompression_type == DT_Invalid) @@ -953,14 +956,14 @@ store_text_datum(CompressedColumnValues *column_values, int arrow_row) * Doesn't check the quals. */ static void -make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_compressed_columns) +make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_data_columns) { TupleTableSlot *decompressed_scan_slot = &batch_state->decompressed_scan_slot_data.base; Assert(batch_state->total_batch_rows > 0); Assert(batch_state->next_batch_row < batch_state->total_batch_rows); - for (int i = 0; i < num_compressed_columns; i++) + for (int i = 0; i < num_data_columns; i++) { CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; if (column_values->decompression_type == DT_Iterator) @@ -1023,7 +1026,7 @@ make_next_tuple(DecompressBatchState *batch_state, uint16 arrow_row, int num_com else { /* A compressed column with default value, do nothing. */ - Assert(column_values->decompression_type == DT_Default); + Assert(column_values->decompression_type == DT_Scalar); } } @@ -1089,7 +1092,7 @@ compressed_batch_advance(DecompressContext *dcontext, DecompressBatchState *batc TupleTableSlot *decompressed_scan_slot = &batch_state->decompressed_scan_slot_data.base; const bool reverse = dcontext->reverse; - const int num_compressed_columns = dcontext->num_compressed_columns; + const int num_data_columns = dcontext->num_data_columns; for (; batch_state->next_batch_row < batch_state->total_batch_rows; batch_state->next_batch_row++) @@ -1104,7 +1107,7 @@ compressed_batch_advance(DecompressContext *dcontext, DecompressBatchState *batc * This row doesn't pass the vectorized quals. Advance the iterated * compressed columns if we have any. */ - for (int i = 0; i < num_compressed_columns; i++) + for (int i = 0; i < num_data_columns; i++) { CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; if (column_values->decompression_type == DT_Iterator) @@ -1119,7 +1122,7 @@ compressed_batch_advance(DecompressContext *dcontext, DecompressBatchState *batc continue; } - make_next_tuple(batch_state, arrow_row, num_compressed_columns); + make_next_tuple(batch_state, arrow_row, num_data_columns); if (!postgres_qual(dcontext, batch_state)) { @@ -1141,7 +1144,7 @@ compressed_batch_advance(DecompressContext *dcontext, DecompressBatchState *batc * row-by-row have also ended. */ Assert(batch_state->next_batch_row == batch_state->total_batch_rows); - for (int i = 0; i < num_compressed_columns; i++) + for (int i = 0; i < num_data_columns; i++) { CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; if (column_values->decompression_type == DT_Iterator) @@ -1179,8 +1182,8 @@ compressed_batch_save_first_tuple(DecompressContext *dcontext, DecompressBatchSt * vectorized decompression is disabled with sorted merge. */ #ifdef USE_ASSERT_CHECKING - const int num_compressed_columns = dcontext->num_compressed_columns; - for (int i = 0; i < num_compressed_columns; i++) + const int num_data_columns = dcontext->num_data_columns; + for (int i = 0; i < num_data_columns; i++) { CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; Assert(column_values->decompression_type != DT_Invalid); @@ -1190,7 +1193,7 @@ compressed_batch_save_first_tuple(DecompressContext *dcontext, DecompressBatchSt /* Make the first tuple and save it. */ Assert(batch_state->next_batch_row == 0); const uint16 arrow_row = dcontext->reverse ? batch_state->total_batch_rows - 1 : 0; - make_next_tuple(batch_state, arrow_row, dcontext->num_compressed_columns); + make_next_tuple(batch_state, arrow_row, dcontext->num_data_columns); ExecCopySlot(first_tuple_slot, &batch_state->decompressed_scan_slot_data.base); /* diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.h b/tsl/src/nodes/decompress_chunk/compressed_batch.h index bbde12a7119..917b352fa24 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.h +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.h @@ -14,13 +14,23 @@ typedef struct ArrowArray ArrowArray; typedef enum { DT_ArrowTextDict = -4, + DT_ArrowText = -3, - DT_Default = -2, + + /* + * The decompressed value is already in the decompressed slot. This is used + * for segmentby and compressed columns with default value in batch. + */ + DT_Scalar = -2, + DT_Iterator = -1, + DT_Invalid = 0, + /* * Any positive number is also valid for the decompression type. It means - * arrow array of a fixed-size by-value type, with size given by the number. + * arrow array of a fixed-size by-value type, with size in bytes given by + * the number. */ } DecompressionType; @@ -99,6 +109,12 @@ typedef struct DecompressBatchState */ uint64 *restrict vector_qual_result; + /* + * This follows DecompressContext.compressed_chunk_columns, but does not + * include the trailing metadata columns, but only the leading data columns. + * These columns are compressed and segmentby columns, their total number is + * given by DecompressContext.num_data_columns. + */ CompressedColumnValues compressed_columns[FLEXIBLE_ARRAY_MEMBER]; } DecompressBatchState; diff --git a/tsl/src/nodes/decompress_chunk/decompress_context.h b/tsl/src/nodes/decompress_chunk/decompress_context.h index 7670163b0e2..834c8f62f47 100644 --- a/tsl/src/nodes/decompress_chunk/decompress_context.h +++ b/tsl/src/nodes/decompress_chunk/decompress_context.h @@ -47,9 +47,23 @@ typedef struct CompressionColumnDescription typedef struct DecompressContext { - CompressionColumnDescription *template_columns; - int num_total_columns; - int num_compressed_columns; + /* + * Note that this array contains only those columns that are decompressed + * (output_attno != 0), and the order is different from the compressed chunk + * tuple order: first go the actual data columns, and after that the metadata + * columns. + */ + CompressionColumnDescription *compressed_chunk_columns; + + /* + * This includes all decompressed columns (output_attno != 0), including the + * metadata columns. + */ + int num_columns_with_metadata; + + /* This excludes the metadata columns. */ + int num_data_columns; + List *vectorized_quals_constified; bool reverse; bool batch_sorted_merge; /* Merge append optimization enabled */ diff --git a/tsl/src/nodes/decompress_chunk/exec.c b/tsl/src/nodes/decompress_chunk/exec.c index 238b52f927b..ca3362cca42 100644 --- a/tsl/src/nodes/decompress_chunk/exec.c +++ b/tsl/src/nodes/decompress_chunk/exec.c @@ -223,13 +223,15 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) node->custom_ps = lappend(node->custom_ps, ExecInitNode(compressed_scan, estate, eflags)); /* - * Determine which columns we are going to decompress. Since in the hottest - * loop we work only with compressed columns, we'll put them in front of the - * array. So first, count how many compressed and not compressed columns - * we have. + * Count the actual data columns we have to decompress, skipping the + * metadata columns. We only need the metadata columns when initializing the + * compressed batch, so they are not saved in the compressed batch itself, + * it tracks only the data columns. We put the metadata columns to the end + * of the array to have the same column indexes in compressed batch state + * and in decompression context. */ - int num_compressed = 0; - int num_total = 0; + int num_data_columns = 0; + int num_columns_with_metadata = 0; ListCell *dest_cell; ListCell *is_segmentby_cell; @@ -246,22 +248,22 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) continue; } - if (output_attno > 0 && !lfirst_int(is_segmentby_cell)) + if (output_attno > 0) { /* - * Not a metadata column and not a segmentby column, hence a - * compressed one. + * Not a metadata column. */ - num_compressed++; + num_data_columns++; } - num_total++; + num_columns_with_metadata++; } - Assert(num_compressed <= num_total); - dcontext->num_compressed_columns = num_compressed; - dcontext->num_total_columns = num_total; - dcontext->template_columns = palloc0(sizeof(CompressionColumnDescription) * num_total); + Assert(num_data_columns <= num_columns_with_metadata); + dcontext->num_data_columns = num_data_columns; + dcontext->num_columns_with_metadata = num_columns_with_metadata; + dcontext->compressed_chunk_columns = + palloc0(sizeof(CompressionColumnDescription) * num_columns_with_metadata); dcontext->decompressed_slot = node->ss.ss_ScanTupleSlot; dcontext->ps = &node->ss.ps; @@ -272,7 +274,7 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) * separate indices for them. */ int current_compressed = 0; - int current_not_compressed = num_compressed; + int current_not_compressed = num_data_columns; for (int compressed_index = 0; compressed_index < list_length(chunk_state->decompression_map); compressed_index++) { @@ -330,20 +332,22 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) } } - if (column.type == COMPRESSED_COLUMN) + if (column.output_attno > 0) { - Assert(current_compressed < num_total); - dcontext->template_columns[current_compressed++] = column; + /* Data column. */ + Assert(current_compressed < num_columns_with_metadata); + dcontext->compressed_chunk_columns[current_compressed++] = column; } else { - Assert(current_not_compressed < num_total); - dcontext->template_columns[current_not_compressed++] = column; + /* Metadata column. */ + Assert(current_not_compressed < num_columns_with_metadata); + dcontext->compressed_chunk_columns[current_not_compressed++] = column; } } - Assert(current_compressed == num_compressed); - Assert(current_not_compressed == num_total); + Assert(current_compressed == num_data_columns); + Assert(current_not_compressed == num_columns_with_metadata); /* * Choose which batch queue we are going to use: heap for batch sorted @@ -352,7 +356,7 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) if (dcontext->batch_sorted_merge) { chunk_state->batch_queue = - batch_queue_heap_create(num_compressed, + batch_queue_heap_create(num_data_columns, chunk_state->sortinfo, dcontext->decompressed_slot->tts_tupleDescriptor, &BatchQueueFunctionsHeap); @@ -361,7 +365,7 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags) else { chunk_state->batch_queue = - batch_queue_fifo_create(num_compressed, &BatchQueueFunctionsFifo); + batch_queue_fifo_create(num_data_columns, &BatchQueueFunctionsFifo); chunk_state->exec_methods.ExecCustomScan = decompress_chunk_exec_fifo; } @@ -405,10 +409,10 @@ perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref) Assert(aggref->aggtranstype == INT8OID); /* Two columns are decompressed, the column that needs to be aggregated and the count column */ - Assert(dcontext->num_total_columns == 2); + Assert(dcontext->num_columns_with_metadata == 2); - CompressionColumnDescription *value_column_description = &dcontext->template_columns[0]; - CompressionColumnDescription *count_column_description = &dcontext->template_columns[1]; + CompressionColumnDescription *value_column_description = &dcontext->compressed_chunk_columns[0]; + CompressionColumnDescription *count_column_description = &dcontext->compressed_chunk_columns[1]; if (count_column_description->type != COUNT_COLUMN) { /* From e9e7bf96f1f2993482c05b1421e524b530a95f88 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 10 Apr 2024 15:04:28 +0200 Subject: [PATCH 66/81] spelling --- tsl/src/nodes/vector_agg/functions.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsl/src/nodes/vector_agg/functions.h b/tsl/src/nodes/vector_agg/functions.h index e92dd5a5639..95614375161 100644 --- a/tsl/src/nodes/vector_agg/functions.h +++ b/tsl/src/nodes/vector_agg/functions.h @@ -25,7 +25,7 @@ typedef struct /* Aggregate a constant (like segmentby or column with default value). */ void (*agg_const)(void *agg_state, Datum constvalue, bool constisnull, int n); - /* Emit a parital result. */ + /* Emit a partial result. */ void (*agg_emit)(void *agg_state, Datum *out_result, bool *out_isnull); } VectorAggFunctions; From 7f0bd221d73297d106465a39bc3e8e733561c9b7 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 10 Apr 2024 15:17:01 +0200 Subject: [PATCH 67/81] make it even more uniform --- .../nodes/decompress_chunk/compressed_batch.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index 8865b977e33..3dc7319ccd7 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -829,15 +829,20 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, * and our output tuples are read-only, so it's enough to only * save it once per batch, which we do here. */ - AttrNumber attr = AttrNumberGetAttrOffset(column_description->output_attno); - decompressed_tuple->tts_values[attr] = - slot_getattr(compressed_slot, - column_description->compressed_scan_attno, - &decompressed_tuple->tts_isnull[attr]); - Assert(i < dcontext->num_data_columns); CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; column_values->decompression_type = DT_Scalar; + AttrNumber attr = AttrNumberGetAttrOffset(column_description->output_attno); + column_values->output_value = + &compressed_batch_current_tuple(batch_state)->tts_values[attr]; + column_values->output_isnull = + &compressed_batch_current_tuple(batch_state)->tts_isnull[attr]; + column_values->arrow = NULL; + + *column_values->output_value = + slot_getattr(compressed_slot, + column_description->compressed_scan_attno, + column_values->output_isnull); /* * Note that if it's not a by-value type, we should copy it into From 0ec8c15730c714c630db5543864fb25ba3eb518e Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 10 Apr 2024 15:20:28 +0200 Subject: [PATCH 68/81] simplify scalar columns in vector agg --- tsl/src/nodes/vector_agg/exec.c | 33 +++++++++++---------------------- 1 file changed, 11 insertions(+), 22 deletions(-) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index ca9c4679c10..920dcfd5bde 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -187,33 +187,22 @@ vector_agg_exec(CustomScanState *node) if (def->column >= 0) { - ArrowArray *arrow = NULL; - CompressionColumnDescription *value_column_description = - &dcontext->compressed_chunk_columns[def->column]; - if (value_column_description->type == COMPRESSED_COLUMN) - { - Assert(dcontext->enable_bulk_decompression); - Assert(value_column_description->bulk_decompression_supported); - CompressedColumnValues *values = &batch_state->compressed_columns[def->column]; - Assert(values->decompression_type != DT_Invalid); - Assert(values->decompression_type != DT_Iterator); - arrow = values->arrow; - } - else - { - Assert(value_column_description->type == SEGMENTBY_COLUMN); - } - if (arrow == NULL) + CompressedColumnValues *values = &batch_state->compressed_columns[def->column]; + Assert(values->decompression_type != DT_Invalid); + Assert(values->decompression_type != DT_Iterator); + + if (values->arrow == NULL) { - const int offs = AttrNumberGetAttrOffset(value_column_description->output_attno); - const Datum value = batch_state->decompressed_scan_slot_data.base.tts_values[offs]; - const bool is_null = batch_state->decompressed_scan_slot_data.base.tts_isnull[offs]; - def->func->agg_const(vector_agg_state->agg_states, value, is_null, n); + Assert(values->decompression_type == DT_Scalar); + def->func->agg_const(vector_agg_state->agg_states, + *values->output_value, + *values->output_isnull, + n); } else { def->func->agg_vector(vector_agg_state->agg_states, - arrow, + values->arrow, batch_state->vector_qual_result); } } From eca08e71a35e630d36f384c4062008edeba6d2ad Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 10 Apr 2024 15:17:01 +0200 Subject: [PATCH 69/81] make it even more uniform --- .../nodes/decompress_chunk/compressed_batch.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index e43a6372622..a729ebcfb06 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -846,15 +846,20 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, * and our output tuples are read-only, so it's enough to only * save it once per batch, which we do here. */ - AttrNumber attr = AttrNumberGetAttrOffset(column_description->output_attno); - decompressed_tuple->tts_values[attr] = - slot_getattr(batch_state->compressed_slot, - column_description->compressed_scan_attno, - &decompressed_tuple->tts_isnull[attr]); - Assert(i < dcontext->num_data_columns); CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; column_values->decompression_type = DT_Scalar; + AttrNumber attr = AttrNumberGetAttrOffset(column_description->output_attno); + column_values->output_value = + &compressed_batch_current_tuple(batch_state)->tts_values[attr]; + column_values->output_isnull = + &compressed_batch_current_tuple(batch_state)->tts_isnull[attr]; + column_values->arrow = NULL; + + *column_values->output_value = + slot_getattr(batch_state->compressed_slot, + column_description->compressed_scan_attno, + column_values->output_isnull); break; } case COUNT_COLUMN: From 5f88a232a3b761c0989de45e2fc5ae02175850be Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 11 Apr 2024 14:48:30 +0200 Subject: [PATCH 70/81] cleanup after merge --- .../nodes/decompress_chunk/compressed_batch.c | 28 +++++++++---------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index 3dc7319ccd7..8d06ae0e62c 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -833,30 +833,28 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, CompressedColumnValues *column_values = &batch_state->compressed_columns[i]; column_values->decompression_type = DT_Scalar; AttrNumber attr = AttrNumberGetAttrOffset(column_description->output_attno); - column_values->output_value = - &compressed_batch_current_tuple(batch_state)->tts_values[attr]; - column_values->output_isnull = - &compressed_batch_current_tuple(batch_state)->tts_isnull[attr]; + Datum *output_value = &decompressed_tuple->tts_values[attr]; + bool *output_isnull = &decompressed_tuple->tts_isnull[attr]; + column_values->output_value = output_value; + column_values->output_isnull = output_isnull; column_values->arrow = NULL; - *column_values->output_value = - slot_getattr(compressed_slot, - column_description->compressed_scan_attno, - column_values->output_isnull); + *output_value = slot_getattr(compressed_slot, + column_description->compressed_scan_attno, + output_isnull); /* * Note that if it's not a by-value type, we should copy it into * the slot context. */ - if (!column_description->by_value && - DatumGetPointer(decompressed_tuple->tts_values[attr]) != NULL) + if (!column_description->by_value && !*output_isnull && + DatumGetPointer(*output_value) != NULL) { if (column_description->value_bytes < 0) { /* This is a varlena type. */ - decompressed_tuple->tts_values[attr] = PointerGetDatum( - detoaster_detoast_attr_copy((struct varlena *) - decompressed_tuple->tts_values[attr], + *output_value = PointerGetDatum( + detoaster_detoast_attr_copy((struct varlena *) *output_value, &dcontext->detoaster, batch_state->per_batch_context)); } @@ -866,9 +864,9 @@ compressed_batch_set_compressed_tuple(DecompressContext *dcontext, void *tmp = MemoryContextAlloc(batch_state->per_batch_context, column_description->value_bytes); memcpy(tmp, - DatumGetPointer(decompressed_tuple->tts_values[attr]), + DatumGetPointer(*output_value), column_description->value_bytes); - decompressed_tuple->tts_values[attr] = PointerGetDatum(tmp); + *output_value = PointerGetDatum(tmp); } } break; From e78efb7ddd28675b1565f99ce5381d91d69efdda Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Thu, 11 Apr 2024 21:04:15 +0200 Subject: [PATCH 71/81] prepare for multiple aggregates --- tsl/src/nodes/vector_agg/exec.c | 127 +++++++++++++++++++------------- tsl/src/nodes/vector_agg/exec.h | 10 +-- 2 files changed, 79 insertions(+), 58 deletions(-) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index 920dcfd5bde..3864730450b 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -45,11 +45,11 @@ vector_agg_begin(CustomScanState *node, EState *estate, int eflags) */ List *aggregated_tlist = castNode(CustomScan, vector_agg_state->custom.ss.ps.plan)->custom_scan_tlist; - ListCell *lc; - foreach (lc, aggregated_tlist) + const int naggs = list_length(aggregated_tlist); + for (int i = 0; i < naggs; i++) { /* Determine which kind of vectorized aggregation we should perform */ - TargetEntry *tlentry = (TargetEntry *) lfirst(lc); + TargetEntry *tlentry = (TargetEntry *) list_nth(aggregated_tlist, i); Assert(IsA(tlentry->expr, Aggref)); Aggref *aggref = castNode(Aggref, tlentry->expr); @@ -85,20 +85,25 @@ vector_agg_begin(CustomScanState *node, EState *estate, int eflags) Assert(value_column_description->type == COMPRESSED_COLUMN || value_column_description->type == SEGMENTBY_COLUMN); - def->column = value_column_description - dcontext->compressed_chunk_columns; + def->input_offset = value_column_description - dcontext->compressed_chunk_columns; } else { - def->column = -1; + def->input_offset = -1; } - vector_agg_state->agg_defs = lappend(vector_agg_state->agg_defs, def); + def->output_offset = i; - vector_agg_state->agg_state_row_bytes += MAXALIGN(func->state_bytes); + vector_agg_state->agg_defs = lappend(vector_agg_state->agg_defs, def); } - vector_agg_state->num_agg_state_rows = 1; - vector_agg_state->agg_states = palloc(vector_agg_state->agg_state_row_bytes); + ListCell *lc; + foreach (lc, vector_agg_state->agg_defs) + { + VectorAggDef *def = lfirst(lc); + vector_agg_state->agg_states = + lappend(vector_agg_state->agg_states, palloc0(def->func->state_bytes)); + } } static void @@ -119,6 +124,47 @@ vector_agg_rescan(CustomScanState *node) state->input_ended = false; } +static void +compute_single_aggregate(DecompressBatchState *batch_state, VectorAggDef *agg_def, void *agg_state) +{ + /* + * To calculate the sum for a segment by value or default compressed + * column value, we need to multiply this value with the number of + * passing decompressed tuples in this batch. + */ + int n = batch_state->total_batch_rows; + if (batch_state->vector_qual_result) + { + n = arrow_num_valid(batch_state->vector_qual_result, n); + Assert(n > 0); + } + + if (agg_def->input_offset >= 0) + { + CompressedColumnValues *values = &batch_state->compressed_columns[agg_def->input_offset]; + Assert(values->decompression_type != DT_Invalid); + Assert(values->decompression_type != DT_Iterator); + + if (values->arrow == NULL) + { + Assert(values->decompression_type == DT_Scalar); + agg_def->func->agg_const(agg_state, *values->output_value, *values->output_isnull, n); + } + else + { + agg_def->func->agg_vector(agg_state, values->arrow, batch_state->vector_qual_result); + } + } + else + { + /* + * We have only one function w/o arguments -- count(*). Unfortunately + * it has to have a special code path everywhere. + */ + agg_def->func->agg_const(agg_state, 0, true, n); + } +} + static TupleTableSlot * vector_agg_exec(CustomScanState *node) { @@ -131,8 +177,13 @@ vector_agg_exec(CustomScanState *node) return NULL; } - VectorAggDef *def = (VectorAggDef *) linitial(vector_agg_state->agg_defs); - def->func->agg_init(vector_agg_state->agg_states); + const int naggs = list_length(vector_agg_state->agg_defs); + for (int i = 0; i < naggs; i++) + { + VectorAggDef *agg_def = (VectorAggDef *) list_nth(vector_agg_state->agg_defs, i); + void *agg_state = (void *) list_nth(vector_agg_state->agg_states, i); + agg_def->func->agg_init(agg_state); + } DecompressChunkState *decompress_state = (DecompressChunkState *) linitial(vector_agg_state->custom.custom_ps); @@ -173,54 +224,24 @@ vector_agg_exec(CustomScanState *node) break; } - /* - * To calculate the sum for a segment by value or default compressed - * column value, we need to multiply this value with the number of - * passing decompressed tuples in this batch. - */ - int n = batch_state->total_batch_rows; - if (batch_state->vector_qual_result) - { - n = arrow_num_valid(batch_state->vector_qual_result, n); - Assert(n > 0); - } - - if (def->column >= 0) + for (int i = 0; i < naggs; i++) { - CompressedColumnValues *values = &batch_state->compressed_columns[def->column]; - Assert(values->decompression_type != DT_Invalid); - Assert(values->decompression_type != DT_Iterator); - - if (values->arrow == NULL) - { - Assert(values->decompression_type == DT_Scalar); - def->func->agg_const(vector_agg_state->agg_states, - *values->output_value, - *values->output_isnull, - n); - } - else - { - def->func->agg_vector(vector_agg_state->agg_states, - values->arrow, - batch_state->vector_qual_result); - } - } - else - { - /* - * We have only one function w/o arguments -- count(*). Unfortunately - * it has to have a special code path everywhere. - */ - def->func->agg_const(vector_agg_state->agg_states, 0, true, n); + VectorAggDef *agg_def = (VectorAggDef *) list_nth(vector_agg_state->agg_defs, i); + void *agg_state = (void *) list_nth(vector_agg_state->agg_states, i); + compute_single_aggregate(batch_state, agg_def, agg_state); } compressed_batch_discard_tuples(batch_state); } - def->func->agg_emit(vector_agg_state->agg_states, - &aggregated_slot->tts_values[0], - &aggregated_slot->tts_isnull[0]); + for (int i = 0; i < naggs; i++) + { + VectorAggDef *agg_def = (VectorAggDef *) list_nth(vector_agg_state->agg_defs, i); + void *agg_state = (void *) list_nth(vector_agg_state->agg_states, i); + agg_def->func->agg_emit(agg_state, + &aggregated_slot->tts_values[agg_def->output_offset], + &aggregated_slot->tts_isnull[agg_def->output_offset]); + } ExecStoreVirtualTuple(aggregated_slot); diff --git a/tsl/src/nodes/vector_agg/exec.h b/tsl/src/nodes/vector_agg/exec.h index a8cc52125d0..2cb18af6e89 100644 --- a/tsl/src/nodes/vector_agg/exec.h +++ b/tsl/src/nodes/vector_agg/exec.h @@ -15,7 +15,8 @@ typedef struct { VectorAggFunctions *func; - int column; + int input_offset; + int output_offset; } VectorAggDef; typedef struct @@ -24,16 +25,15 @@ typedef struct List *agg_defs; - int agg_state_row_bytes; - int num_agg_state_rows; - void *agg_states; - /* * We can't call the underlying scan after it has ended, or it will be * restarted. This is the behavior of Postgres heap scans. So we have to * track whether it has ended to avoid this. */ bool input_ended; + + /* The following fields are related to the grouping policy. */ + List *agg_states; } VectorAggState; extern Node *vector_agg_state_create(CustomScan *cscan); From c019642832466bd59ca331b37b40ee507d14d84d Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 12 Apr 2024 12:13:37 +0200 Subject: [PATCH 72/81] multiple aggregate functions --- tsl/src/nodes/vector_agg/exec.c | 1 - tsl/src/nodes/vector_agg/plan.c | 204 ++++++++++--------- tsl/test/expected/vector_agg_default.out | 23 +++ tsl/test/expected/vectorized_aggregation.out | 50 +++-- tsl/test/sql/vector_agg_default.sql | 4 + tsl/test/sql/vectorized_aggregation.sql | 6 +- 6 files changed, 165 insertions(+), 123 deletions(-) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index 3864730450b..1a3a2e649c4 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -195,7 +195,6 @@ vector_agg_exec(CustomScanState *node) /* Get a reference the the output TupleTableSlot */ TupleTableSlot *aggregated_slot = vector_agg_state->custom.ss.ps.ps_ResultTupleSlot; - Assert(aggregated_slot->tts_tupleDescriptor->natts == 1); ExecClearTuple(aggregated_slot); for (;;) diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 24c43d03c7d..51c393f8e6f 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -150,6 +150,104 @@ vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) return (Plan *) custom; } +static bool +can_vectorize_aggref(Aggref *aggref, CustomScan *custom) +{ + if (aggref->aggfilter != NULL) + { + /* Filter clause on aggregate is not supported. */ + return false; + } + + if (get_vector_aggregate(aggref->aggfnoid) == NULL) + { + /* + * We don't have a vectorized implementation for this particular + * aggregate function. + */ + return false; + } + + if (aggref->args == NIL) + { + /* This must be count(*), we can vectorize it. */ + return true; + } + + /* The function must have one argument, check it. */ + Assert(list_length(aggref->args) == 1); + TargetEntry *argument = castNode(TargetEntry, linitial(aggref->args)); + if (!IsA(argument->expr, Var)) + { + /* Can aggregate only a bare decompressed column, not an expression. */ + return false; + } + + Var *aggregated_var = castNode(Var, argument->expr); + + /* + * Check if this particular column is a segmentby or has bulk decompression + * enabled. This hook is called after set_plan_refs, and at this stage the + * output targetlist of the aggregation node uses OUTER_VAR references into + * the child scan targetlist, so first we have to translate this. + */ + Assert(aggregated_var->varno == OUTER_VAR); + TargetEntry *decompressed_target_entry = + list_nth(custom->scan.plan.targetlist, AttrNumberGetAttrOffset(aggregated_var->varattno)); + + if (!IsA(decompressed_target_entry->expr, Var)) + { + /* + * Can only aggregate the plain Vars. Not sure if this is redundant with + * the similar check above. + */ + return false; + } + Var *decompressed_var = castNode(Var, decompressed_target_entry->expr); + + /* + * Now, we have to translate the decompressed varno into the compressed + * column index, to check if the column supports bulk decompression. + */ + List *decompression_map = list_nth(custom->custom_private, DCP_DecompressionMap); + List *is_segmentby_column = list_nth(custom->custom_private, DCP_IsSegmentbyColumn); + List *bulk_decompression_column = list_nth(custom->custom_private, DCP_BulkDecompressionColumn); + int compressed_column_index = 0; + for (; compressed_column_index < list_length(decompression_map); compressed_column_index++) + { + if (list_nth_int(decompression_map, compressed_column_index) == decompressed_var->varattno) + { + break; + } + } + Ensure(compressed_column_index < list_length(decompression_map), "compressed column not found"); + Assert(list_length(decompression_map) == list_length(bulk_decompression_column)); + const bool bulk_decompression_enabled_for_column = + list_nth_int(bulk_decompression_column, compressed_column_index); + + /* + * Bulk decompression can be disabled for all columns in the DecompressChunk + * node settings, we can't do vectorized aggregation for compressed columns + * in that case. For segmentby columns it's still possible. + */ + List *settings = linitial(custom->custom_private); + const bool bulk_decompression_enabled_globally = + list_nth_int(settings, DCS_EnableBulkDecompression); + + /* + * We support vectorized aggregation either for segmentby columns or for + * columns with bulk decompression enabled. + */ + if (!list_nth_int(is_segmentby_column, compressed_column_index) && + !(bulk_decompression_enabled_for_column && bulk_decompression_enabled_globally)) + { + /* Vectorized aggregation not possible for this particular column. */ + return false; + } + + return true; +} + /* * Where possible, replace the partial aggregation plan nodes with our own * vectorized aggregation node. The replacement is done in-place. @@ -226,12 +324,6 @@ try_insert_vector_agg_node(Plan *plan) return plan; } - if (list_length(agg->plan.targetlist) != 1) - { - /* We currently handle only one agg function per node. */ - return plan; - } - if (agg->plan.lefttree == NULL) { /* @@ -264,103 +356,19 @@ try_insert_vector_agg_node(Plan *plan) return plan; } - /* Now check the aggregate function itself. */ - Node *expr_node = (Node *) castNode(TargetEntry, linitial(agg->plan.targetlist))->expr; - Assert(IsA(expr_node, Aggref)); - - Aggref *aggref = castNode(Aggref, expr_node); - - if (aggref->aggfilter != NULL) - { - /* Filter clause on aggregate is not supported. */ - return plan; - } - - if (get_vector_aggregate(aggref->aggfnoid) == NULL) - { - /* - * We don't have a vectorized implementation for this particular - * aggregate function. - */ - return plan; - } - - if (aggref->args == NIL) - { - /* This must be count(*), we can vectorize it. */ - return vector_agg_plan_create(agg, custom); - } - - /* The function must have one argument, check it. */ - Assert(list_length(aggref->args) == 1); - TargetEntry *argument = castNode(TargetEntry, linitial(aggref->args)); - if (!IsA(argument->expr, Var)) - { - /* Can aggregate only a bare decompressed column, not an expression. */ - return plan; - } - - Var *aggregated_var = castNode(Var, argument->expr); - - /* - * Check if this particular column is a segmentby or has bulk decompression - * enabled. This hook is called after set_plan_refs, and at this stage the - * output targetlist of the aggregation node uses OUTER_VAR references into - * the child scan targetlist, so first we have to translate this. - */ - Assert(aggregated_var->varno == OUTER_VAR); - TargetEntry *decompressed_target_entry = - list_nth(custom->scan.plan.targetlist, AttrNumberGetAttrOffset(aggregated_var->varattno)); - - if (!IsA(decompressed_target_entry->expr, Var)) + /* Now check the aggregate functions themselves. */ + ListCell *lc; + foreach (lc, agg->plan.targetlist) { - /* - * Can only aggregate the plain Vars. Not sure if this is redundant with - * the similar check above. - */ - return plan; - } - Var *decompressed_var = castNode(Var, decompressed_target_entry->expr); + TargetEntry *target_entry = castNode(TargetEntry, lfirst(lc)); + Assert(IsA(target_entry->expr, Aggref)); - /* - * Now, we have to translate the decompressed varno into the compressed - * column index, to check if the column supports bulk decompression. - */ - List *decompression_map = list_nth(custom->custom_private, DCP_DecompressionMap); - List *is_segmentby_column = list_nth(custom->custom_private, DCP_IsSegmentbyColumn); - List *bulk_decompression_column = list_nth(custom->custom_private, DCP_BulkDecompressionColumn); - int compressed_column_index = 0; - for (; compressed_column_index < list_length(decompression_map); compressed_column_index++) - { - if (list_nth_int(decompression_map, compressed_column_index) == decompressed_var->varattno) + Aggref *aggref = castNode(Aggref, target_entry->expr); + if (!can_vectorize_aggref(aggref, custom)) { - break; + return plan; } } - Ensure(compressed_column_index < list_length(decompression_map), "compressed column not found"); - Assert(list_length(decompression_map) == list_length(bulk_decompression_column)); - const bool bulk_decompression_enabled_for_column = - list_nth_int(bulk_decompression_column, compressed_column_index); - - /* - * Bulk decompression can be disabled for all columns in the DecompressChunk - * node settings, we can't do vectorized aggregation for compressed columns - * in that case. For segmentby columns it's still possible. - */ - List *settings = linitial(custom->custom_private); - const bool bulk_decompression_enabled_globally = - list_nth_int(settings, DCS_EnableBulkDecompression); - - /* - * We support vectorized aggregation either for segmentby columns or for - * columns with bulk decompression enabled. - */ - if (!list_nth_int(is_segmentby_column, compressed_column_index) && - !(bulk_decompression_enabled_for_column && bulk_decompression_enabled_globally)) - { - /* Vectorized aggregation not possible for this particular column. */ - return plan; - } /* * Finally, all requirements are satisfied and we can vectorize this partial diff --git a/tsl/test/expected/vector_agg_default.out b/tsl/test/expected/vector_agg_default.out index 3c55bdef1b3..4c9e60d43e3 100644 --- a/tsl/test/expected/vector_agg_default.out +++ b/tsl/test/expected/vector_agg_default.out @@ -135,6 +135,29 @@ explain (costs off) select sum(c) from dvagg where b in (0, 1, 3); -> Parallel Seq Scan on compress_hyper_2_4_chunk (12 rows) +select sum(a), sum(b), sum(c) from dvagg where b in (0, 1, 3); + sum | sum | sum +---------+------+------- + 1197600 | 1600 | 10782 +(1 row) + +explain (costs off) select sum(a), sum(b), sum(c) from dvagg where b in (0, 1, 3); + QUERY PLAN +------------------------------------------------------------------------------- + Finalize Aggregate + -> Gather + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + Vectorized Filter: (b = ANY ('{0,1,3}'::integer[])) + -> Parallel Seq Scan on compress_hyper_2_2_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + Vectorized Filter: (b = ANY ('{0,1,3}'::integer[])) + -> Parallel Seq Scan on compress_hyper_2_4_chunk +(12 rows) + reset timescaledb.enable_vectorized_aggregation; -- The runtime chunk exclusion should work. explain (costs off) select sum(c) from dvagg where a < stable_abs(1000); diff --git a/tsl/test/expected/vectorized_aggregation.out b/tsl/test/expected/vectorized_aggregation.out index 0419f47675f..39d0efed52d 100644 --- a/tsl/test/expected/vectorized_aggregation.out +++ b/tsl/test/expected/vectorized_aggregation.out @@ -1293,7 +1293,7 @@ SELECT sum(int_value) FROM testtable; (70 rows) RESET timescaledb.enable_vectorized_aggregation; --- Vectorized aggregation without bullk decompression only possible for +-- Vectorized aggregation without bulk decompression only possible for -- segmentby columns. SET timescaledb.enable_bulk_decompression = OFF; :EXPLAIN @@ -1609,7 +1609,7 @@ SELECT sum(segment_by_value), sum(segment_by_value) FROM testtable; Output: _hyper_1_1_chunk.segment_by_value (70 rows) --- Performing a sum on multiple columns is currently not supported by vectorization +-- Performing a sum on multiple columns is supported. :EXPLAIN SELECT sum(int_value), sum(segment_by_value) FROM testtable; QUERY PLAN @@ -1620,62 +1620,62 @@ SELECT sum(int_value), sum(segment_by_value) FROM testtable; Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)), (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) Workers Planned: 2 -> Parallel Append - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_1_chunk.int_value), PARTIAL sum(_hyper_1_1_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_1_chunk.int_value)), (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk Output: _hyper_1_1_chunk.int_value, _hyper_1_1_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_2_chunk.int_value), PARTIAL sum(_hyper_1_2_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_2_chunk.int_value)), (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk Output: _hyper_1_2_chunk.int_value, _hyper_1_2_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_3_chunk.int_value), PARTIAL sum(_hyper_1_3_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_3_chunk.int_value)), (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk Output: _hyper_1_3_chunk.int_value, _hyper_1_3_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_4_chunk.int_value), PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_4_chunk.int_value)), (PARTIAL sum(_hyper_1_4_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_4_chunk Output: _hyper_1_4_chunk.int_value, _hyper_1_4_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_14_chunk Output: compress_hyper_2_14_chunk._ts_meta_count, compress_hyper_2_14_chunk._ts_meta_sequence_num, compress_hyper_2_14_chunk.segment_by_value, compress_hyper_2_14_chunk._ts_meta_min_1, compress_hyper_2_14_chunk._ts_meta_max_1, compress_hyper_2_14_chunk."time", compress_hyper_2_14_chunk.int_value, compress_hyper_2_14_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_5_chunk.int_value), PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_5_chunk.int_value)), (PARTIAL sum(_hyper_1_5_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_5_chunk Output: _hyper_1_5_chunk.int_value, _hyper_1_5_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_15_chunk Output: compress_hyper_2_15_chunk._ts_meta_count, compress_hyper_2_15_chunk._ts_meta_sequence_num, compress_hyper_2_15_chunk.segment_by_value, compress_hyper_2_15_chunk._ts_meta_min_1, compress_hyper_2_15_chunk._ts_meta_max_1, compress_hyper_2_15_chunk."time", compress_hyper_2_15_chunk.int_value, compress_hyper_2_15_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_6_chunk.int_value), PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_6_chunk.int_value)), (PARTIAL sum(_hyper_1_6_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_6_chunk Output: _hyper_1_6_chunk.int_value, _hyper_1_6_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_16_chunk Output: compress_hyper_2_16_chunk._ts_meta_count, compress_hyper_2_16_chunk._ts_meta_sequence_num, compress_hyper_2_16_chunk.segment_by_value, compress_hyper_2_16_chunk._ts_meta_min_1, compress_hyper_2_16_chunk._ts_meta_max_1, compress_hyper_2_16_chunk."time", compress_hyper_2_16_chunk.int_value, compress_hyper_2_16_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_7_chunk.int_value), PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_7_chunk.int_value)), (PARTIAL sum(_hyper_1_7_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_7_chunk Output: _hyper_1_7_chunk.int_value, _hyper_1_7_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_17_chunk Output: compress_hyper_2_17_chunk._ts_meta_count, compress_hyper_2_17_chunk._ts_meta_sequence_num, compress_hyper_2_17_chunk.segment_by_value, compress_hyper_2_17_chunk._ts_meta_min_1, compress_hyper_2_17_chunk._ts_meta_max_1, compress_hyper_2_17_chunk."time", compress_hyper_2_17_chunk.int_value, compress_hyper_2_17_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_8_chunk.int_value), PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_8_chunk.int_value)), (PARTIAL sum(_hyper_1_8_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_8_chunk Output: _hyper_1_8_chunk.int_value, _hyper_1_8_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_18_chunk Output: compress_hyper_2_18_chunk._ts_meta_count, compress_hyper_2_18_chunk._ts_meta_sequence_num, compress_hyper_2_18_chunk.segment_by_value, compress_hyper_2_18_chunk._ts_meta_min_1, compress_hyper_2_18_chunk._ts_meta_max_1, compress_hyper_2_18_chunk."time", compress_hyper_2_18_chunk.int_value, compress_hyper_2_18_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_9_chunk.int_value), PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_9_chunk.int_value)), (PARTIAL sum(_hyper_1_9_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_9_chunk Output: _hyper_1_9_chunk.int_value, _hyper_1_9_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_19_chunk Output: compress_hyper_2_19_chunk._ts_meta_count, compress_hyper_2_19_chunk._ts_meta_sequence_num, compress_hyper_2_19_chunk.segment_by_value, compress_hyper_2_19_chunk._ts_meta_min_1, compress_hyper_2_19_chunk._ts_meta_max_1, compress_hyper_2_19_chunk."time", compress_hyper_2_19_chunk.int_value, compress_hyper_2_19_chunk.float_value - -> Partial Aggregate - Output: PARTIAL sum(_hyper_1_10_chunk.int_value), PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + -> Custom Scan (VectorAgg) + Output: (PARTIAL sum(_hyper_1_10_chunk.int_value)), (PARTIAL sum(_hyper_1_10_chunk.segment_by_value)) -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_10_chunk Output: _hyper_1_10_chunk.int_value, _hyper_1_10_chunk.segment_by_value -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_20_chunk @@ -1686,6 +1686,12 @@ SELECT sum(int_value), sum(segment_by_value) FROM testtable; Output: _hyper_1_1_chunk.int_value, _hyper_1_1_chunk.segment_by_value (70 rows) +SELECT sum(int_value), sum(segment_by_value) FROM testtable; + sum | sum +--------+-------- + 304695 | 304695 +(1 row) + -- Using the sum function together with another non-vector capable aggregate is not supported :EXPLAIN SELECT sum(int_value), max(int_value) FROM testtable; diff --git a/tsl/test/sql/vector_agg_default.sql b/tsl/test/sql/vector_agg_default.sql index b8824307dea..8cbe5f11b59 100644 --- a/tsl/test/sql/vector_agg_default.sql +++ b/tsl/test/sql/vector_agg_default.sql @@ -41,6 +41,10 @@ select count(*) from dvagg where b > 10; explain (costs off) select sum(c) from dvagg where b in (0, 1, 3); +select sum(a), sum(b), sum(c) from dvagg where b in (0, 1, 3); + +explain (costs off) select sum(a), sum(b), sum(c) from dvagg where b in (0, 1, 3); + reset timescaledb.enable_vectorized_aggregation; diff --git a/tsl/test/sql/vectorized_aggregation.sql b/tsl/test/sql/vectorized_aggregation.sql index d195be9fd62..06faef05466 100644 --- a/tsl/test/sql/vectorized_aggregation.sql +++ b/tsl/test/sql/vectorized_aggregation.sql @@ -132,7 +132,7 @@ SELECT sum(int_value) FROM testtable; RESET timescaledb.enable_vectorized_aggregation; --- Vectorized aggregation without bullk decompression only possible for +-- Vectorized aggregation without bulk decompression only possible for -- segmentby columns. SET timescaledb.enable_bulk_decompression = OFF; @@ -154,10 +154,12 @@ SELECT sum(int_value), sum(int_value) FROM testtable; :EXPLAIN SELECT sum(segment_by_value), sum(segment_by_value) FROM testtable; --- Performing a sum on multiple columns is currently not supported by vectorization +-- Performing a sum on multiple columns is supported. :EXPLAIN SELECT sum(int_value), sum(segment_by_value) FROM testtable; +SELECT sum(int_value), sum(segment_by_value) FROM testtable; + -- Using the sum function together with another non-vector capable aggregate is not supported :EXPLAIN SELECT sum(int_value), max(int_value) FROM testtable; From 25cb1ae3cc16a004f693c4ca6a1046bad1de0eb1 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 12 Apr 2024 20:10:05 +0200 Subject: [PATCH 73/81] grouping policy --- tsl/src/nodes/vector_agg/CMakeLists.txt | 3 +- tsl/src/nodes/vector_agg/exec.c | 80 ++-------- tsl/src/nodes/vector_agg/exec.h | 4 +- tsl/src/nodes/vector_agg/grouping_policy.h | 21 +++ .../nodes/vector_agg/grouping_policy_all.c | 142 ++++++++++++++++++ tsl/src/nodes/vector_agg/plan.c | 1 - 6 files changed, 178 insertions(+), 73 deletions(-) create mode 100644 tsl/src/nodes/vector_agg/grouping_policy.h create mode 100644 tsl/src/nodes/vector_agg/grouping_policy_all.c diff --git a/tsl/src/nodes/vector_agg/CMakeLists.txt b/tsl/src/nodes/vector_agg/CMakeLists.txt index 428e589d92e..47dd8c1a8b6 100644 --- a/tsl/src/nodes/vector_agg/CMakeLists.txt +++ b/tsl/src/nodes/vector_agg/CMakeLists.txt @@ -1,4 +1,5 @@ set(SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/exec.c ${CMAKE_CURRENT_SOURCE_DIR}/functions.c - ${CMAKE_CURRENT_SOURCE_DIR}/plan.c) + ${CMAKE_CURRENT_SOURCE_DIR}/plan.c + ${CMAKE_CURRENT_SOURCE_DIR}/grouping_policy_all.c) target_sources(${TSL_LIBRARY_NAME} PRIVATE ${SOURCES}) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index 1a3a2e649c4..f93cf620b77 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -97,13 +97,7 @@ vector_agg_begin(CustomScanState *node, EState *estate, int eflags) vector_agg_state->agg_defs = lappend(vector_agg_state->agg_defs, def); } - ListCell *lc; - foreach (lc, vector_agg_state->agg_defs) - { - VectorAggDef *def = lfirst(lc); - vector_agg_state->agg_states = - lappend(vector_agg_state->agg_states, palloc0(def->func->state_bytes)); - } + vector_agg_state->grouping = create_grouping_policy_all(vector_agg_state->agg_defs); } static void @@ -122,47 +116,8 @@ vector_agg_rescan(CustomScanState *node) VectorAggState *state = (VectorAggState *) node; state->input_ended = false; -} - -static void -compute_single_aggregate(DecompressBatchState *batch_state, VectorAggDef *agg_def, void *agg_state) -{ - /* - * To calculate the sum for a segment by value or default compressed - * column value, we need to multiply this value with the number of - * passing decompressed tuples in this batch. - */ - int n = batch_state->total_batch_rows; - if (batch_state->vector_qual_result) - { - n = arrow_num_valid(batch_state->vector_qual_result, n); - Assert(n > 0); - } - - if (agg_def->input_offset >= 0) - { - CompressedColumnValues *values = &batch_state->compressed_columns[agg_def->input_offset]; - Assert(values->decompression_type != DT_Invalid); - Assert(values->decompression_type != DT_Iterator); - if (values->arrow == NULL) - { - Assert(values->decompression_type == DT_Scalar); - agg_def->func->agg_const(agg_state, *values->output_value, *values->output_isnull, n); - } - else - { - agg_def->func->agg_vector(agg_state, values->arrow, batch_state->vector_qual_result); - } - } - else - { - /* - * We have only one function w/o arguments -- count(*). Unfortunately - * it has to have a special code path everywhere. - */ - agg_def->func->agg_const(agg_state, 0, true, n); - } + state->grouping->gp_reset(state->grouping); } static TupleTableSlot * @@ -177,14 +132,6 @@ vector_agg_exec(CustomScanState *node) return NULL; } - const int naggs = list_length(vector_agg_state->agg_defs); - for (int i = 0; i < naggs; i++) - { - VectorAggDef *agg_def = (VectorAggDef *) list_nth(vector_agg_state->agg_defs, i); - void *agg_state = (void *) list_nth(vector_agg_state->agg_states, i); - agg_def->func->agg_init(agg_state); - } - DecompressChunkState *decompress_state = (DecompressChunkState *) linitial(vector_agg_state->custom.custom_ps); @@ -197,6 +144,8 @@ vector_agg_exec(CustomScanState *node) TupleTableSlot *aggregated_slot = vector_agg_state->custom.ss.ps.ps_ResultTupleSlot; ExecClearTuple(aggregated_slot); + GroupingPolicy *grouping = vector_agg_state->grouping; + for (;;) { /* @@ -223,25 +172,18 @@ vector_agg_exec(CustomScanState *node) break; } - for (int i = 0; i < naggs; i++) - { - VectorAggDef *agg_def = (VectorAggDef *) list_nth(vector_agg_state->agg_defs, i); - void *agg_state = (void *) list_nth(vector_agg_state->agg_states, i); - compute_single_aggregate(batch_state, agg_def, agg_state); - } + grouping->gp_add_batch(grouping, batch_state); compressed_batch_discard_tuples(batch_state); - } - for (int i = 0; i < naggs; i++) - { - VectorAggDef *agg_def = (VectorAggDef *) list_nth(vector_agg_state->agg_defs, i); - void *agg_state = (void *) list_nth(vector_agg_state->agg_states, i); - agg_def->func->agg_emit(agg_state, - &aggregated_slot->tts_values[agg_def->output_offset], - &aggregated_slot->tts_isnull[agg_def->output_offset]); + if (grouping->gp_should_emit(grouping)) + { + break; + } } + grouping->gp_do_emit(grouping, aggregated_slot); + ExecStoreVirtualTuple(aggregated_slot); return aggregated_slot; diff --git a/tsl/src/nodes/vector_agg/exec.h b/tsl/src/nodes/vector_agg/exec.h index 2cb18af6e89..074758e9342 100644 --- a/tsl/src/nodes/vector_agg/exec.h +++ b/tsl/src/nodes/vector_agg/exec.h @@ -11,6 +11,7 @@ #include #include "functions.h" +#include "grouping_policy.h" typedef struct { @@ -32,8 +33,7 @@ typedef struct */ bool input_ended; - /* The following fields are related to the grouping policy. */ - List *agg_states; + GroupingPolicy *grouping; } VectorAggState; extern Node *vector_agg_state_create(CustomScan *cscan); diff --git a/tsl/src/nodes/vector_agg/grouping_policy.h b/tsl/src/nodes/vector_agg/grouping_policy.h new file mode 100644 index 00000000000..e3cef709134 --- /dev/null +++ b/tsl/src/nodes/vector_agg/grouping_policy.h @@ -0,0 +1,21 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ +#pragma once + +typedef struct DecompressBatchState DecompressBatchState; + +typedef struct GroupingPolicy GroupingPolicy; + +typedef struct GroupingPolicy +{ + void (*gp_reset)(GroupingPolicy *gp); + void (*gp_add_batch)(GroupingPolicy *gp, DecompressBatchState *batch_state); + bool (*gp_should_emit)(GroupingPolicy *gp); + void (*gp_do_emit)(GroupingPolicy *gp, TupleTableSlot *aggregated_slot); + void (*gp_destroy)(GroupingPolicy *gp); +} GroupingPolicy; + +extern GroupingPolicy *create_grouping_policy_all(List *agg_defs); diff --git a/tsl/src/nodes/vector_agg/grouping_policy_all.c b/tsl/src/nodes/vector_agg/grouping_policy_all.c new file mode 100644 index 00000000000..0043ee60b13 --- /dev/null +++ b/tsl/src/nodes/vector_agg/grouping_policy_all.c @@ -0,0 +1,142 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#include + +#include +#include + +#include "grouping_policy.h" + +#include "nodes/vector_agg/exec.h" +#include "nodes/decompress_chunk/compressed_batch.h" + +typedef struct +{ + GroupingPolicy funcs; + List *agg_defs; + List *agg_states; +} GroupingPolicyAll; + +static const GroupingPolicy grouping_policy_all_functions; + +static void +gp_all_reset(GroupingPolicy *obj) +{ + GroupingPolicyAll *policy = (GroupingPolicyAll *) obj; + const int naggs = list_length(policy->agg_defs); + for (int i = 0; i < naggs; i++) + { + VectorAggDef *agg_def = (VectorAggDef *) list_nth(policy->agg_defs, i); + void *agg_state = (void *) list_nth(policy->agg_states, i); + agg_def->func->agg_init(agg_state); + } +} + +GroupingPolicy * +create_grouping_policy_all(List *agg_defs) +{ + GroupingPolicyAll *policy = palloc0(sizeof(GroupingPolicyAll)); + policy->funcs = grouping_policy_all_functions; + policy->agg_defs = agg_defs; + ListCell *lc; + foreach (lc, agg_defs) + { + VectorAggDef *def = lfirst(lc); + policy->agg_states = lappend(policy->agg_states, palloc0(def->func->state_bytes)); + } + gp_all_reset(&policy->funcs); + return &policy->funcs; +} + +static void +compute_single_aggregate(DecompressBatchState *batch_state, VectorAggDef *agg_def, void *agg_state) +{ + /* + * To calculate the sum for a segment by value or default compressed + * column value, we need to multiply this value with the number of + * passing decompressed tuples in this batch. + */ + int n = batch_state->total_batch_rows; + if (batch_state->vector_qual_result) + { + n = arrow_num_valid(batch_state->vector_qual_result, n); + Assert(n > 0); + } + + if (agg_def->input_offset >= 0) + { + CompressedColumnValues *values = &batch_state->compressed_columns[agg_def->input_offset]; + Assert(values->decompression_type != DT_Invalid); + Assert(values->decompression_type != DT_Iterator); + + if (values->arrow == NULL) + { + Assert(values->decompression_type == DT_Scalar); + agg_def->func->agg_const(agg_state, *values->output_value, *values->output_isnull, n); + } + else + { + agg_def->func->agg_vector(agg_state, values->arrow, batch_state->vector_qual_result); + } + } + else + { + /* + * We have only one function w/o arguments -- count(*). Unfortunately + * it has to have a special code path everywhere. + */ + agg_def->func->agg_const(agg_state, 0, true, n); + } +} + +static void +gp_all_add_batch(GroupingPolicy *gp, DecompressBatchState *batch_state) +{ + GroupingPolicyAll *policy = (GroupingPolicyAll *) gp; + const int naggs = list_length(policy->agg_defs); + for (int i = 0; i < naggs; i++) + { + VectorAggDef *agg_def = (VectorAggDef *) list_nth(policy->agg_defs, i); + void *agg_state = (void *) list_nth(policy->agg_states, i); + compute_single_aggregate(batch_state, agg_def, agg_state); + } +} + +static bool +gp_all_should_emit(GroupingPolicy *gp) +{ +#ifndef NDEBUG + /* To have more code coverage in debug mode. */ + return true; +#else + return false; +#endif +} + +static void +gp_all_do_emit(GroupingPolicy *gp, TupleTableSlot *aggregated_slot) +{ + GroupingPolicyAll *policy = (GroupingPolicyAll *) gp; + const int naggs = list_length(policy->agg_defs); + for (int i = 0; i < naggs; i++) + { + VectorAggDef *agg_def = (VectorAggDef *) list_nth(policy->agg_defs, i); + void *agg_state = (void *) list_nth(policy->agg_states, i); + agg_def->func->agg_emit(agg_state, + &aggregated_slot->tts_values[agg_def->output_offset], + &aggregated_slot->tts_isnull[agg_def->output_offset]); + } + + gp_all_reset(gp); +} + +static const GroupingPolicy grouping_policy_all_functions = { + .gp_reset = gp_all_reset, + .gp_add_batch = gp_all_add_batch, + .gp_should_emit = gp_all_should_emit, + .gp_do_emit = gp_all_do_emit, +}; diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 51c393f8e6f..c6eeaf411b8 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -1,4 +1,3 @@ - /* * This file and its contents are licensed under the Timescale License. * Please see the included NOTICE for copyright information and From 32f88eda6d445a9032133f8c46a5f596c7bbfdaf Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Fri, 12 Apr 2024 20:26:53 +0200 Subject: [PATCH 74/81] tmp --- tsl/src/nodes/vector_agg/grouping_policy_all.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tsl/src/nodes/vector_agg/grouping_policy_all.c b/tsl/src/nodes/vector_agg/grouping_policy_all.c index 0043ee60b13..3863d283efb 100644 --- a/tsl/src/nodes/vector_agg/grouping_policy_all.c +++ b/tsl/src/nodes/vector_agg/grouping_policy_all.c @@ -109,12 +109,7 @@ gp_all_add_batch(GroupingPolicy *gp, DecompressBatchState *batch_state) static bool gp_all_should_emit(GroupingPolicy *gp) { -#ifndef NDEBUG - /* To have more code coverage in debug mode. */ - return true; -#else return false; -#endif } static void From f551c1d14598e9ed78647256ed7b999b0b044391 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 17 Apr 2024 14:01:56 +0200 Subject: [PATCH 75/81] more checks? --- tsl/src/nodes/vector_agg/plan.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index c6eeaf411b8..5137dc65fe0 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -158,6 +158,30 @@ can_vectorize_aggref(Aggref *aggref, CustomScan *custom) return false; } + if (aggref->aggdirectargs != NIL) + { + /* Can't process ordered-set agregates with direct arguments. */ + return false; + } + + if (aggref->aggorder != NIL) + { + /* Can't process aggregates with an ORDER BY clause. */ + return false; + } + + if (aggref->aggdistinct != NIL) + { + /* Can't process aggregates with DISTINCT clause. */ + return false; + } + + if (aggref->aggfilter != NULL) + { + /* Can't process aggregates with filter clause. */ + return false; + } + if (get_vector_aggregate(aggref->aggfnoid) == NULL) { /* From dd65d7c67e035b729b33c1e92fe2fbf5bf9aff85 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 17 Apr 2024 15:32:53 +0200 Subject: [PATCH 76/81] grouping on segmentby --- tsl/src/nodes/vector_agg/CMakeLists.txt | 6 +- tsl/src/nodes/vector_agg/exec.c | 103 ++-- tsl/src/nodes/vector_agg/exec.h | 8 + tsl/src/nodes/vector_agg/grouping_policy.h | 2 + .../vector_agg/grouping_policy_segmentby.c | 180 +++++++ tsl/src/nodes/vector_agg/plan.c | 179 ++++--- tsl/test/expected/vectorized_aggregation.out | 499 +++++++++++++----- tsl/test/sql/vectorized_aggregation.sql | 11 +- 8 files changed, 757 insertions(+), 231 deletions(-) create mode 100644 tsl/src/nodes/vector_agg/grouping_policy_segmentby.c diff --git a/tsl/src/nodes/vector_agg/CMakeLists.txt b/tsl/src/nodes/vector_agg/CMakeLists.txt index 47dd8c1a8b6..8d3f3e8b2e4 100644 --- a/tsl/src/nodes/vector_agg/CMakeLists.txt +++ b/tsl/src/nodes/vector_agg/CMakeLists.txt @@ -1,5 +1,7 @@ set(SOURCES - ${CMAKE_CURRENT_SOURCE_DIR}/exec.c ${CMAKE_CURRENT_SOURCE_DIR}/functions.c + ${CMAKE_CURRENT_SOURCE_DIR}/exec.c + ${CMAKE_CURRENT_SOURCE_DIR}/functions.c ${CMAKE_CURRENT_SOURCE_DIR}/plan.c - ${CMAKE_CURRENT_SOURCE_DIR}/grouping_policy_all.c) + ${CMAKE_CURRENT_SOURCE_DIR}/grouping_policy_all.c + ${CMAKE_CURRENT_SOURCE_DIR}/grouping_policy_segmentby.c) target_sources(${TSL_LIBRARY_NAME} PRIVATE ${SOURCES}) diff --git a/tsl/src/nodes/vector_agg/exec.c b/tsl/src/nodes/vector_agg/exec.c index f93cf620b77..deeb50becff 100644 --- a/tsl/src/nodes/vector_agg/exec.c +++ b/tsl/src/nodes/vector_agg/exec.c @@ -12,7 +12,7 @@ #include #include -#include "exec.h" +#include "nodes/vector_agg/exec.h" #include "compression/arrow_c_data_interface.h" #include "functions.h" @@ -21,6 +21,29 @@ #include "nodes/decompress_chunk/exec.h" #include "nodes/vector_agg.h" +static void +get_input_offset(DecompressChunkState *decompress_state, Var *var, int *input_offset) +{ + DecompressContext *dcontext = &decompress_state->decompress_context; + + CompressionColumnDescription *value_column_description = NULL; + for (int i = 0; i < dcontext->num_data_columns; i++) + { + CompressionColumnDescription *current_column = &dcontext->compressed_chunk_columns[i]; + if (current_column->output_attno == var->varattno) + { + value_column_description = current_column; + break; + } + } + Ensure(value_column_description != NULL, "aggregated compressed column not found"); + + Assert(value_column_description->type == COMPRESSED_COLUMN || + value_column_description->type == SEGMENTBY_COLUMN); + + *input_offset = value_column_description - dcontext->compressed_chunk_columns; +} + static void vector_agg_begin(CustomScanState *node, EState *estate, int eflags) { @@ -50,54 +73,57 @@ vector_agg_begin(CustomScanState *node, EState *estate, int eflags) { /* Determine which kind of vectorized aggregation we should perform */ TargetEntry *tlentry = (TargetEntry *) list_nth(aggregated_tlist, i); - Assert(IsA(tlentry->expr, Aggref)); - Aggref *aggref = castNode(Aggref, tlentry->expr); - - VectorAggDef *def = palloc0(sizeof(VectorAggDef)); - VectorAggFunctions *func = get_vector_aggregate(aggref->aggfnoid); - Assert(func != NULL); - def->func = func; - - if (list_length(aggref->args) > 0) + if (IsA(tlentry->expr, Aggref)) { - Assert(list_length(aggref->args) == 1); + Aggref *aggref = castNode(Aggref, tlentry->expr); - /* The aggregate should be a partial aggregate */ - Assert(aggref->aggsplit == AGGSPLIT_INITIAL_SERIAL); + VectorAggDef *def = palloc0(sizeof(VectorAggDef)); + VectorAggFunctions *func = get_vector_aggregate(aggref->aggfnoid); + Assert(func != NULL); + def->func = func; - Var *var = castNode(Var, castNode(TargetEntry, linitial(aggref->args))->expr); + if (list_length(aggref->args) > 0) + { + Assert(list_length(aggref->args) == 1); - DecompressContext *dcontext = &decompress_state->decompress_context; + /* The aggregate should be a partial aggregate */ + Assert(aggref->aggsplit == AGGSPLIT_INITIAL_SERIAL); - CompressionColumnDescription *value_column_description = NULL; - for (int i = 0; i < dcontext->num_data_columns; i++) + Var *var = castNode(Var, castNode(TargetEntry, linitial(aggref->args))->expr); + get_input_offset(decompress_state, var, &def->input_offset); + } + else { - CompressionColumnDescription *current_column = - &dcontext->compressed_chunk_columns[i]; - if (current_column->output_attno == var->varattno) - { - value_column_description = current_column; - break; - } + def->input_offset = -1; } - Ensure(value_column_description != NULL, "aggregated compressed column not found"); - Assert(value_column_description->type == COMPRESSED_COLUMN || - value_column_description->type == SEGMENTBY_COLUMN); + def->output_offset = i; - def->input_offset = value_column_description - dcontext->compressed_chunk_columns; + vector_agg_state->agg_defs = lappend(vector_agg_state->agg_defs, def); } else { - def->input_offset = -1; + Assert(IsA(tlentry->expr, Var)); + Var *var = castNode(Var, tlentry->expr); + GroupingColumn *col = palloc0(sizeof(GroupingColumn)); + col->output_offset = i; + get_input_offset(decompress_state, var, &col->input_offset); + vector_agg_state->output_grouping_columns = + lappend(vector_agg_state->output_grouping_columns, col); } - - def->output_offset = i; - - vector_agg_state->agg_defs = lappend(vector_agg_state->agg_defs, def); } - vector_agg_state->grouping = create_grouping_policy_all(vector_agg_state->agg_defs); + List *grouping_column_offsets = linitial(cscan->custom_private); + if (grouping_column_offsets == NIL) + { + vector_agg_state->grouping = create_grouping_policy_all(vector_agg_state->agg_defs); + } + else + { + vector_agg_state->grouping = + create_grouping_policy_segmentby(vector_agg_state->agg_defs, + vector_agg_state->output_grouping_columns); + } } static void @@ -146,6 +172,7 @@ vector_agg_exec(CustomScanState *node) GroupingPolicy *grouping = vector_agg_state->grouping; + bool have_tuples_this_loop = false; for (;;) { /* @@ -172,6 +199,8 @@ vector_agg_exec(CustomScanState *node) break; } + have_tuples_this_loop = true; + grouping->gp_add_batch(grouping, batch_state); compressed_batch_discard_tuples(batch_state); @@ -182,6 +211,12 @@ vector_agg_exec(CustomScanState *node) } } + if (!have_tuples_this_loop) + { + Assert(vector_agg_state->input_ended); + return NULL; + } + grouping->gp_do_emit(grouping, aggregated_slot); ExecStoreVirtualTuple(aggregated_slot); diff --git a/tsl/src/nodes/vector_agg/exec.h b/tsl/src/nodes/vector_agg/exec.h index 074758e9342..87258459319 100644 --- a/tsl/src/nodes/vector_agg/exec.h +++ b/tsl/src/nodes/vector_agg/exec.h @@ -20,12 +20,20 @@ typedef struct int output_offset; } VectorAggDef; +typedef struct +{ + int input_offset; + int output_offset; +} GroupingColumn; + typedef struct { CustomScanState custom; List *agg_defs; + List *output_grouping_columns; + /* * We can't call the underlying scan after it has ended, or it will be * restarted. This is the behavior of Postgres heap scans. So we have to diff --git a/tsl/src/nodes/vector_agg/grouping_policy.h b/tsl/src/nodes/vector_agg/grouping_policy.h index e3cef709134..586f566fb7b 100644 --- a/tsl/src/nodes/vector_agg/grouping_policy.h +++ b/tsl/src/nodes/vector_agg/grouping_policy.h @@ -19,3 +19,5 @@ typedef struct GroupingPolicy } GroupingPolicy; extern GroupingPolicy *create_grouping_policy_all(List *agg_defs); + +extern GroupingPolicy *create_grouping_policy_segmentby(List *agg_defs, List *grouping_columns); diff --git a/tsl/src/nodes/vector_agg/grouping_policy_segmentby.c b/tsl/src/nodes/vector_agg/grouping_policy_segmentby.c new file mode 100644 index 00000000000..ba9c02ccd79 --- /dev/null +++ b/tsl/src/nodes/vector_agg/grouping_policy_segmentby.c @@ -0,0 +1,180 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ + +#include + +#include +#include + +#include "grouping_policy.h" + +#include "nodes/vector_agg/exec.h" +#include "nodes/decompress_chunk/compressed_batch.h" + +typedef struct +{ + GroupingPolicy funcs; + List *agg_defs; + List *agg_states; + List *output_grouping_columns; + Datum *output_grouping_values; + bool *output_grouping_isnull; +} GroupingPolicySegmentby; + +static const GroupingPolicy grouping_policy_segmentby_functions; + +static void +gp_segmentby_reset(GroupingPolicy *obj) +{ + GroupingPolicySegmentby *policy = (GroupingPolicySegmentby *) obj; + const int naggs = list_length(policy->agg_defs); + for (int i = 0; i < naggs; i++) + { + VectorAggDef *agg_def = (VectorAggDef *) list_nth(policy->agg_defs, i); + void *agg_state = (void *) list_nth(policy->agg_states, i); + agg_def->func->agg_init(agg_state); + } + + const int ngrp = list_length(policy->output_grouping_columns); + for (int i = 0; i < ngrp; i++) + { + policy->output_grouping_values[i] = 0; + policy->output_grouping_isnull[i] = true; + } +} + +GroupingPolicy * +create_grouping_policy_segmentby(List *agg_defs, List *output_grouping_columns) +{ + GroupingPolicySegmentby *policy = palloc0(sizeof(GroupingPolicySegmentby)); + policy->funcs = grouping_policy_segmentby_functions; + policy->output_grouping_columns = output_grouping_columns; + policy->agg_defs = agg_defs; + ListCell *lc; + foreach (lc, agg_defs) + { + VectorAggDef *def = lfirst(lc); + policy->agg_states = lappend(policy->agg_states, palloc0(def->func->state_bytes)); + } + policy->output_grouping_values = + (Datum *) palloc0(MAXALIGN(list_length(output_grouping_columns) * sizeof(Datum)) + + MAXALIGN(list_length(output_grouping_columns) * sizeof(bool))); + policy->output_grouping_isnull = + (bool *) ((char *) policy->output_grouping_values + + MAXALIGN(list_length(output_grouping_columns) * sizeof(Datum))); + gp_segmentby_reset(&policy->funcs); + return &policy->funcs; +} + +static void +compute_single_aggregate(DecompressBatchState *batch_state, VectorAggDef *agg_def, void *agg_state) +{ + /* + * To calculate the sum for a segment by value or default compressed + * column value, we need to multiply this value with the number of + * passing decompressed tuples in this batch. + */ + int n = batch_state->total_batch_rows; + if (batch_state->vector_qual_result) + { + n = arrow_num_valid(batch_state->vector_qual_result, n); + Assert(n > 0); + } + + if (agg_def->input_offset >= 0) + { + CompressedColumnValues *values = &batch_state->compressed_columns[agg_def->input_offset]; + Assert(values->decompression_type != DT_Invalid); + Assert(values->decompression_type != DT_Iterator); + + if (values->arrow == NULL) + { + Assert(values->decompression_type == DT_Scalar); + agg_def->func->agg_const(agg_state, *values->output_value, *values->output_isnull, n); + } + else + { + agg_def->func->agg_vector(agg_state, values->arrow, batch_state->vector_qual_result); + } + } + else + { + /* + * We have only one function w/o arguments -- count(*). Unfortunately + * it has to have a special code path everywhere. + */ + agg_def->func->agg_const(agg_state, 0, true, n); + } +} + +static void +gp_segmentby_add_batch(GroupingPolicy *gp, DecompressBatchState *batch_state) +{ + GroupingPolicySegmentby *policy = (GroupingPolicySegmentby *) gp; + const int naggs = list_length(policy->agg_defs); + for (int i = 0; i < naggs; i++) + { + VectorAggDef *agg_def = (VectorAggDef *) list_nth(policy->agg_defs, i); + void *agg_state = (void *) list_nth(policy->agg_states, i); + compute_single_aggregate(batch_state, agg_def, agg_state); + } + + const int ngrp = list_length(policy->output_grouping_columns); + for (int i = 0; i < ngrp; i++) + { + GroupingColumn *col = list_nth(policy->output_grouping_columns, i); + Assert(col->input_offset >= 0); + Assert(col->output_offset >= 0); + + CompressedColumnValues *values = &batch_state->compressed_columns[col->input_offset]; + Assert(values->decompression_type == DT_Scalar); + + /* FIXME do proper copy here? */ + policy->output_grouping_values[i] = *values->output_value; + policy->output_grouping_isnull[i] = *values->output_isnull; + } +} + +static bool +gp_segmentby_should_emit(GroupingPolicy *gp) +{ + return true; +} + +static void +gp_segmentby_do_emit(GroupingPolicy *gp, TupleTableSlot *aggregated_slot) +{ + GroupingPolicySegmentby *policy = (GroupingPolicySegmentby *) gp; + const int naggs = list_length(policy->agg_defs); + for (int i = 0; i < naggs; i++) + { + VectorAggDef *agg_def = (VectorAggDef *) list_nth(policy->agg_defs, i); + void *agg_state = (void *) list_nth(policy->agg_states, i); + agg_def->func->agg_emit(agg_state, + &aggregated_slot->tts_values[agg_def->output_offset], + &aggregated_slot->tts_isnull[agg_def->output_offset]); + } + + const int ngrp = list_length(policy->output_grouping_columns); + for (int i = 0; i < ngrp; i++) + { + GroupingColumn *col = list_nth(policy->output_grouping_columns, i); + Assert(col->input_offset >= 0); + Assert(col->output_offset >= 0); + + aggregated_slot->tts_values[col->output_offset] = policy->output_grouping_values[i]; + aggregated_slot->tts_isnull[col->output_offset] = policy->output_grouping_isnull[i]; + } + + gp_segmentby_reset(gp); +} + +static const GroupingPolicy grouping_policy_segmentby_functions = { + .gp_reset = gp_segmentby_reset, + .gp_add_batch = gp_segmentby_add_batch, + .gp_should_emit = gp_segmentby_should_emit, + .gp_do_emit = gp_segmentby_do_emit, +}; diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 5137dc65fe0..30d56f326d2 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -146,67 +146,27 @@ vector_agg_plan_create(Agg *agg, CustomScan *decompress_chunk) custom->scan.plan.extParam = bms_copy(agg->plan.extParam); custom->scan.plan.allParam = bms_copy(agg->plan.allParam); + List *grouping_col_offsets = NIL; + for (int i = 0; i < agg->numCols; i++) + { + grouping_col_offsets = + lappend_int(grouping_col_offsets, AttrNumberGetAttrOffset(agg->grpColIdx[i])); + } + custom->custom_private = list_make1(grouping_col_offsets); + return (Plan *) custom; } static bool -can_vectorize_aggref(Aggref *aggref, CustomScan *custom) +is_vector_var(CustomScan *custom, Expr *expr, bool *out_is_segmentby) { - if (aggref->aggfilter != NULL) - { - /* Filter clause on aggregate is not supported. */ - return false; - } - - if (aggref->aggdirectargs != NIL) - { - /* Can't process ordered-set agregates with direct arguments. */ - return false; - } - - if (aggref->aggorder != NIL) - { - /* Can't process aggregates with an ORDER BY clause. */ - return false; - } - - if (aggref->aggdistinct != NIL) - { - /* Can't process aggregates with DISTINCT clause. */ - return false; - } - - if (aggref->aggfilter != NULL) - { - /* Can't process aggregates with filter clause. */ - return false; - } - - if (get_vector_aggregate(aggref->aggfnoid) == NULL) - { - /* - * We don't have a vectorized implementation for this particular - * aggregate function. - */ - return false; - } - - if (aggref->args == NIL) - { - /* This must be count(*), we can vectorize it. */ - return true; - } - - /* The function must have one argument, check it. */ - Assert(list_length(aggref->args) == 1); - TargetEntry *argument = castNode(TargetEntry, linitial(aggref->args)); - if (!IsA(argument->expr, Var)) + if (!IsA(expr, Var)) { /* Can aggregate only a bare decompressed column, not an expression. */ return false; } - Var *aggregated_var = castNode(Var, argument->expr); + Var *aggregated_var = castNode(Var, expr); /* * Check if this particular column is a segmentby or has bulk decompression @@ -257,11 +217,20 @@ can_vectorize_aggref(Aggref *aggref, CustomScan *custom) const bool bulk_decompression_enabled_globally = list_nth_int(settings, DCS_EnableBulkDecompression); + /* + * Check if this column is a segmentby. + */ + const bool is_segmentby = list_nth_int(is_segmentby_column, compressed_column_index); + if (out_is_segmentby) + { + *out_is_segmentby = is_segmentby; + } + /* * We support vectorized aggregation either for segmentby columns or for * columns with bulk decompression enabled. */ - if (!list_nth_int(is_segmentby_column, compressed_column_index) && + if (!is_segmentby && !(bulk_decompression_enabled_for_column && bulk_decompression_enabled_globally)) { /* Vectorized aggregation not possible for this particular column. */ @@ -271,6 +240,95 @@ can_vectorize_aggref(Aggref *aggref, CustomScan *custom) return true; } +static bool +can_vectorize_aggref(Aggref *aggref, CustomScan *custom) +{ + if (aggref->aggfilter != NULL) + { + /* Filter clause on aggregate is not supported. */ + return false; + } + + if (aggref->aggdirectargs != NIL) + { + /* Can't process ordered-set agregates with direct arguments. */ + return false; + } + + if (aggref->aggorder != NIL) + { + /* Can't process aggregates with an ORDER BY clause. */ + return false; + } + + if (aggref->aggdistinct != NIL) + { + /* Can't process aggregates with DISTINCT clause. */ + return false; + } + + if (aggref->aggfilter != NULL) + { + /* Can't process aggregates with filter clause. */ + return false; + } + + if (get_vector_aggregate(aggref->aggfnoid) == NULL) + { + /* + * We don't have a vectorized implementation for this particular + * aggregate function. + */ + return false; + } + + if (aggref->args == NIL) + { + /* This must be count(*), we can vectorize it. */ + return true; + } + + /* The function must have one argument, check it. */ + Assert(list_length(aggref->args) == 1); + TargetEntry *argument = castNode(TargetEntry, linitial(aggref->args)); + if (!is_vector_var(custom, argument->expr, NULL)) + { + return false; + } + + return true; +} + +static bool +can_vectorize_grouping(Agg *agg, CustomScan *custom) +{ + if (agg->numCols == 0) + { + return true; + } + + for (int i = 0; i < agg->numCols; i++) + { + int offset = AttrNumberGetAttrOffset(agg->grpColIdx[i]); + TargetEntry *entry = list_nth(agg->plan.targetlist, offset); + fprintf(stderr, "grpColIdx[%d] = %d, entry:\n", i, offset); + my_print(entry); + + bool is_segmentby = false; + if (!is_vector_var(custom, entry->expr, &is_segmentby)) + { + return false; + } + + if (!is_segmentby) + { + return false; + } + } + + return true; +} + /* * Where possible, replace the partial aggregation plan nodes with our own * vectorized aggregation node. The replacement is done in-place. @@ -325,12 +383,6 @@ try_insert_vector_agg_node(Plan *plan) return plan; } - if (agg->numCols != 0) - { - /* No GROUP BY support for now. */ - return plan; - } - if (agg->groupingSets != NIL) { /* No GROUPING SETS support. */ @@ -379,12 +431,21 @@ try_insert_vector_agg_node(Plan *plan) return plan; } + if (!can_vectorize_grouping(agg, custom)) + { + /* No GROUP BY support for now. */ + return plan; + } + /* Now check the aggregate functions themselves. */ ListCell *lc; foreach (lc, agg->plan.targetlist) { TargetEntry *target_entry = castNode(TargetEntry, lfirst(lc)); - Assert(IsA(target_entry->expr, Aggref)); + if (!IsA(target_entry->expr, Aggref)) + { + continue; + } Aggref *aggref = castNode(Aggref, target_entry->expr); if (!can_vectorize_aggref(aggref, custom)) diff --git a/tsl/test/expected/vectorized_aggregation.out b/tsl/test/expected/vectorized_aggregation.out index 39d0efed52d..20ec65a1149 100644 --- a/tsl/test/expected/vectorized_aggregation.out +++ b/tsl/test/expected/vectorized_aggregation.out @@ -467,6 +467,7 @@ SELECT sum(segment_by_value) FROM testtable GROUP BY int_value; Output: _hyper_1_10_chunk.segment_by_value, _hyper_1_10_chunk.int_value (39 rows) +-- FIXME here the partitionwise aggregation plan is not chosen due to costs? :EXPLAIN SELECT sum(int_value) FROM testtable GROUP BY segment_by_value; QUERY PLAN @@ -512,145 +513,377 @@ SELECT sum(int_value) FROM testtable GROUP BY segment_by_value; Output: _hyper_1_10_chunk.int_value, _hyper_1_10_chunk.segment_by_value (39 rows) --- Vectorization not possible due to two variables and grouping +-- Vectorization possible with grouping by a segmentby column. :EXPLAIN -SELECT sum(segment_by_value), segment_by_value FROM testtable GROUP BY segment_by_value; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Finalize HashAggregate - Output: sum(_hyper_1_1_chunk.segment_by_value), _hyper_1_1_chunk.segment_by_value - Group Key: _hyper_1_1_chunk.segment_by_value - -> Gather - Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Workers Planned: 2 - -> Parallel Append - -> Partial HashAggregate - Output: _hyper_1_1_chunk.segment_by_value, PARTIAL sum(_hyper_1_1_chunk.segment_by_value) - Group Key: _hyper_1_1_chunk.segment_by_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_2_chunk.segment_by_value, PARTIAL sum(_hyper_1_2_chunk.segment_by_value) - Group Key: _hyper_1_2_chunk.segment_by_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_3_chunk.segment_by_value, PARTIAL sum(_hyper_1_3_chunk.segment_by_value) - Group Key: _hyper_1_3_chunk.segment_by_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_4_chunk.segment_by_value, PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - Group Key: _hyper_1_4_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_5_chunk.segment_by_value, PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - Group Key: _hyper_1_5_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_6_chunk.segment_by_value, PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - Group Key: _hyper_1_6_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_7_chunk.segment_by_value, PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - Group Key: _hyper_1_7_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_8_chunk.segment_by_value, PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - Group Key: _hyper_1_8_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_9_chunk.segment_by_value, PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - Group Key: _hyper_1_9_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_10_chunk.segment_by_value, PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - Group Key: _hyper_1_10_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value +SELECT sum(segment_by_value), segment_by_value FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: (sum(_hyper_1_1_chunk.segment_by_value)), _hyper_1_1_chunk.segment_by_value + Sort Key: (sum(_hyper_1_1_chunk.segment_by_value)), _hyper_1_1_chunk.segment_by_value + -> Finalize HashAggregate + Output: sum(_hyper_1_1_chunk.segment_by_value), _hyper_1_1_chunk.segment_by_value + Group Key: _hyper_1_1_chunk.segment_by_value + -> Gather + Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: _hyper_1_2_chunk.segment_by_value, (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: _hyper_1_3_chunk.segment_by_value, (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial HashAggregate + Output: _hyper_1_4_chunk.segment_by_value, PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + Group Key: _hyper_1_4_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_5_chunk.segment_by_value, PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + Group Key: _hyper_1_5_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_6_chunk.segment_by_value, PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + Group Key: _hyper_1_6_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_7_chunk.segment_by_value, PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + Group Key: _hyper_1_7_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_8_chunk.segment_by_value, PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + Group Key: _hyper_1_8_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_9_chunk.segment_by_value, PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + Group Key: _hyper_1_9_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_10_chunk.segment_by_value, PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + Group Key: _hyper_1_10_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value (63 rows) +SELECT sum(segment_by_value), segment_by_value FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; + sum | segment_by_value +------+------------------ + -610 | -10 + -549 | -9 + -488 | -8 + -427 | -7 + -366 | -6 + -305 | -5 + -244 | -4 + -183 | -3 + -122 | -2 + -61 | -1 + 0 | 0 + 61 | 1 + 122 | 2 + 183 | 3 + 244 | 4 + 305 | 5 + 366 | 6 + 427 | 7 + 488 | 8 + 549 | 9 + 610 | 10 + 671 | 11 + 732 | 12 + 793 | 13 + 854 | 14 + 915 | 15 + 976 | 16 + 1037 | 17 + 1098 | 18 + 1159 | 19 + 1220 | 20 + 1281 | 21 + 1342 | 22 + 1403 | 23 + 1464 | 24 + 1525 | 25 + 1586 | 26 + 1647 | 27 + 1708 | 28 + 1769 | 29 + 1830 | 30 + 1891 | 31 + 1952 | 32 + 2013 | 33 + 2074 | 34 + 2135 | 35 + 2196 | 36 + 2257 | 37 + 2318 | 38 + 2379 | 39 + 2440 | 40 + 2501 | 41 + 2562 | 42 + 2623 | 43 + 2684 | 44 + 2745 | 45 + 2806 | 46 + 2867 | 47 + 2928 | 48 + 2989 | 49 + 3050 | 50 + 3111 | 51 + 3172 | 52 + 3233 | 53 + 3294 | 54 + 3355 | 55 + 3416 | 56 + 3477 | 57 + 3538 | 58 + 3599 | 59 + 3660 | 60 + 3721 | 61 + 3782 | 62 + 3843 | 63 + 3904 | 64 + 3965 | 65 + 4026 | 66 + 4087 | 67 + 4148 | 68 + 4209 | 69 + 4270 | 70 + 4331 | 71 + 4392 | 72 + 4453 | 73 + 4514 | 74 + 4575 | 75 + 4636 | 76 + 4697 | 77 + 4758 | 78 + 4819 | 79 + 4880 | 80 + 4941 | 81 + 5002 | 82 + 5063 | 83 + 5124 | 84 + 5185 | 85 + 5246 | 86 + 5307 | 87 + 5368 | 88 + 5429 | 89 + 5490 | 90 + 5551 | 91 + 5612 | 92 + 5673 | 93 + 5734 | 94 + 5795 | 95 + 5856 | 96 + 5917 | 97 + 5978 | 98 + 6039 | 99 + 6100 | 100 +(111 rows) + :EXPLAIN -SELECT segment_by_value, sum(segment_by_value) FROM testtable GROUP BY segment_by_value; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - Finalize HashAggregate - Output: _hyper_1_1_chunk.segment_by_value, sum(_hyper_1_1_chunk.segment_by_value) - Group Key: _hyper_1_1_chunk.segment_by_value - -> Gather - Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) - Workers Planned: 2 - -> Parallel Append - -> Partial HashAggregate - Output: _hyper_1_1_chunk.segment_by_value, PARTIAL sum(_hyper_1_1_chunk.segment_by_value) - Group Key: _hyper_1_1_chunk.segment_by_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk - Output: _hyper_1_1_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk - Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_2_chunk.segment_by_value, PARTIAL sum(_hyper_1_2_chunk.segment_by_value) - Group Key: _hyper_1_2_chunk.segment_by_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk - Output: _hyper_1_2_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk - Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_3_chunk.segment_by_value, PARTIAL sum(_hyper_1_3_chunk.segment_by_value) - Group Key: _hyper_1_3_chunk.segment_by_value - -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk - Output: _hyper_1_3_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk - Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value - -> Partial HashAggregate - Output: _hyper_1_4_chunk.segment_by_value, PARTIAL sum(_hyper_1_4_chunk.segment_by_value) - Group Key: _hyper_1_4_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk - Output: _hyper_1_4_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_5_chunk.segment_by_value, PARTIAL sum(_hyper_1_5_chunk.segment_by_value) - Group Key: _hyper_1_5_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk - Output: _hyper_1_5_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_6_chunk.segment_by_value, PARTIAL sum(_hyper_1_6_chunk.segment_by_value) - Group Key: _hyper_1_6_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk - Output: _hyper_1_6_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_7_chunk.segment_by_value, PARTIAL sum(_hyper_1_7_chunk.segment_by_value) - Group Key: _hyper_1_7_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk - Output: _hyper_1_7_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_8_chunk.segment_by_value, PARTIAL sum(_hyper_1_8_chunk.segment_by_value) - Group Key: _hyper_1_8_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk - Output: _hyper_1_8_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_9_chunk.segment_by_value, PARTIAL sum(_hyper_1_9_chunk.segment_by_value) - Group Key: _hyper_1_9_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk - Output: _hyper_1_9_chunk.segment_by_value - -> Partial HashAggregate - Output: _hyper_1_10_chunk.segment_by_value, PARTIAL sum(_hyper_1_10_chunk.segment_by_value) - Group Key: _hyper_1_10_chunk.segment_by_value - -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk - Output: _hyper_1_10_chunk.segment_by_value +SELECT segment_by_value, sum(segment_by_value) FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Sort + Output: _hyper_1_1_chunk.segment_by_value, (sum(_hyper_1_1_chunk.segment_by_value)) + Sort Key: _hyper_1_1_chunk.segment_by_value, (sum(_hyper_1_1_chunk.segment_by_value)) + -> Finalize HashAggregate + Output: _hyper_1_1_chunk.segment_by_value, sum(_hyper_1_1_chunk.segment_by_value) + Group Key: _hyper_1_1_chunk.segment_by_value + -> Gather + Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + Workers Planned: 2 + -> Parallel Append + -> Custom Scan (VectorAgg) + Output: _hyper_1_1_chunk.segment_by_value, (PARTIAL sum(_hyper_1_1_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_1_chunk + Output: _hyper_1_1_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_11_chunk + Output: compress_hyper_2_11_chunk._ts_meta_count, compress_hyper_2_11_chunk._ts_meta_sequence_num, compress_hyper_2_11_chunk.segment_by_value, compress_hyper_2_11_chunk._ts_meta_min_1, compress_hyper_2_11_chunk._ts_meta_max_1, compress_hyper_2_11_chunk."time", compress_hyper_2_11_chunk.int_value, compress_hyper_2_11_chunk.float_value + -> Custom Scan (VectorAgg) + Output: _hyper_1_2_chunk.segment_by_value, (PARTIAL sum(_hyper_1_2_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_2_chunk + Output: _hyper_1_2_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_12_chunk + Output: compress_hyper_2_12_chunk._ts_meta_count, compress_hyper_2_12_chunk._ts_meta_sequence_num, compress_hyper_2_12_chunk.segment_by_value, compress_hyper_2_12_chunk._ts_meta_min_1, compress_hyper_2_12_chunk._ts_meta_max_1, compress_hyper_2_12_chunk."time", compress_hyper_2_12_chunk.int_value, compress_hyper_2_12_chunk.float_value + -> Custom Scan (VectorAgg) + Output: _hyper_1_3_chunk.segment_by_value, (PARTIAL sum(_hyper_1_3_chunk.segment_by_value)) + -> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_1_3_chunk + Output: _hyper_1_3_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal.compress_hyper_2_13_chunk + Output: compress_hyper_2_13_chunk._ts_meta_count, compress_hyper_2_13_chunk._ts_meta_sequence_num, compress_hyper_2_13_chunk.segment_by_value, compress_hyper_2_13_chunk._ts_meta_min_1, compress_hyper_2_13_chunk._ts_meta_max_1, compress_hyper_2_13_chunk."time", compress_hyper_2_13_chunk.int_value, compress_hyper_2_13_chunk.float_value + -> Partial HashAggregate + Output: _hyper_1_4_chunk.segment_by_value, PARTIAL sum(_hyper_1_4_chunk.segment_by_value) + Group Key: _hyper_1_4_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_4_chunk + Output: _hyper_1_4_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_5_chunk.segment_by_value, PARTIAL sum(_hyper_1_5_chunk.segment_by_value) + Group Key: _hyper_1_5_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_5_chunk + Output: _hyper_1_5_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_6_chunk.segment_by_value, PARTIAL sum(_hyper_1_6_chunk.segment_by_value) + Group Key: _hyper_1_6_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_6_chunk + Output: _hyper_1_6_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_7_chunk.segment_by_value, PARTIAL sum(_hyper_1_7_chunk.segment_by_value) + Group Key: _hyper_1_7_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_7_chunk + Output: _hyper_1_7_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_8_chunk.segment_by_value, PARTIAL sum(_hyper_1_8_chunk.segment_by_value) + Group Key: _hyper_1_8_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_8_chunk + Output: _hyper_1_8_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_9_chunk.segment_by_value, PARTIAL sum(_hyper_1_9_chunk.segment_by_value) + Group Key: _hyper_1_9_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_9_chunk + Output: _hyper_1_9_chunk.segment_by_value + -> Partial HashAggregate + Output: _hyper_1_10_chunk.segment_by_value, PARTIAL sum(_hyper_1_10_chunk.segment_by_value) + Group Key: _hyper_1_10_chunk.segment_by_value + -> Parallel Seq Scan on _timescaledb_internal._hyper_1_10_chunk + Output: _hyper_1_10_chunk.segment_by_value (63 rows) +SELECT segment_by_value, sum(segment_by_value) FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; + segment_by_value | sum +------------------+------ + -10 | -610 + -9 | -549 + -8 | -488 + -7 | -427 + -6 | -366 + -5 | -305 + -4 | -244 + -3 | -183 + -2 | -122 + -1 | -61 + 0 | 0 + 1 | 61 + 2 | 122 + 3 | 183 + 4 | 244 + 5 | 305 + 6 | 366 + 7 | 427 + 8 | 488 + 9 | 549 + 10 | 610 + 11 | 671 + 12 | 732 + 13 | 793 + 14 | 854 + 15 | 915 + 16 | 976 + 17 | 1037 + 18 | 1098 + 19 | 1159 + 20 | 1220 + 21 | 1281 + 22 | 1342 + 23 | 1403 + 24 | 1464 + 25 | 1525 + 26 | 1586 + 27 | 1647 + 28 | 1708 + 29 | 1769 + 30 | 1830 + 31 | 1891 + 32 | 1952 + 33 | 2013 + 34 | 2074 + 35 | 2135 + 36 | 2196 + 37 | 2257 + 38 | 2318 + 39 | 2379 + 40 | 2440 + 41 | 2501 + 42 | 2562 + 43 | 2623 + 44 | 2684 + 45 | 2745 + 46 | 2806 + 47 | 2867 + 48 | 2928 + 49 | 2989 + 50 | 3050 + 51 | 3111 + 52 | 3172 + 53 | 3233 + 54 | 3294 + 55 | 3355 + 56 | 3416 + 57 | 3477 + 58 | 3538 + 59 | 3599 + 60 | 3660 + 61 | 3721 + 62 | 3782 + 63 | 3843 + 64 | 3904 + 65 | 3965 + 66 | 4026 + 67 | 4087 + 68 | 4148 + 69 | 4209 + 70 | 4270 + 71 | 4331 + 72 | 4392 + 73 | 4453 + 74 | 4514 + 75 | 4575 + 76 | 4636 + 77 | 4697 + 78 | 4758 + 79 | 4819 + 80 | 4880 + 81 | 4941 + 82 | 5002 + 83 | 5063 + 84 | 5124 + 85 | 5185 + 86 | 5246 + 87 | 5307 + 88 | 5368 + 89 | 5429 + 90 | 5490 + 91 | 5551 + 92 | 5612 + 93 | 5673 + 94 | 5734 + 95 | 5795 + 96 | 5856 + 97 | 5917 + 98 | 5978 + 99 | 6039 + 100 | 6100 +(111 rows) + -- Vectorized aggregation possible SELECT sum(int_value) FROM testtable; sum diff --git a/tsl/test/sql/vectorized_aggregation.sql b/tsl/test/sql/vectorized_aggregation.sql index 06faef05466..d3817879dfc 100644 --- a/tsl/test/sql/vectorized_aggregation.sql +++ b/tsl/test/sql/vectorized_aggregation.sql @@ -59,15 +59,20 @@ SELECT sum(segment_by_value) FROM testtable GROUP BY float_value; :EXPLAIN SELECT sum(segment_by_value) FROM testtable GROUP BY int_value; +-- FIXME here the partitionwise aggregation plan is not chosen due to costs? :EXPLAIN SELECT sum(int_value) FROM testtable GROUP BY segment_by_value; --- Vectorization not possible due to two variables and grouping +-- Vectorization possible with grouping by a segmentby column. :EXPLAIN -SELECT sum(segment_by_value), segment_by_value FROM testtable GROUP BY segment_by_value; +SELECT sum(segment_by_value), segment_by_value FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; + +SELECT sum(segment_by_value), segment_by_value FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; :EXPLAIN -SELECT segment_by_value, sum(segment_by_value) FROM testtable GROUP BY segment_by_value; +SELECT segment_by_value, sum(segment_by_value) FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; + +SELECT segment_by_value, sum(segment_by_value) FROM testtable GROUP BY segment_by_value ORDER BY 1, 2; -- Vectorized aggregation possible SELECT sum(int_value) FROM testtable; From dd9875c60ea51b8a8a42e0368f28cf754b026aea Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 17 Apr 2024 15:34:50 +0200 Subject: [PATCH 77/81] test ref --- tsl/test/expected/compression_ddl.out | 55 +++++-------- .../expected/transparent_decompression-15.out | 78 ++++++++----------- 2 files changed, 52 insertions(+), 81 deletions(-) diff --git a/tsl/test/expected/compression_ddl.out b/tsl/test/expected/compression_ddl.out index e2a2591a869..db31cb01864 100644 --- a/tsl/test/expected/compression_ddl.out +++ b/tsl/test/expected/compression_ddl.out @@ -1415,14 +1415,13 @@ ORDER BY device_id; -> Sort Sort Key: _hyper_31_110_chunk.device_id -> Append - -> Partial GroupAggregate - Group Key: _hyper_31_110_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_111_chunk -> Partial GroupAggregate Group Key: _hyper_31_110_chunk.device_id -> Index Only Scan using _hyper_31_110_chunk_compression_insert_device_id_time_idx on _hyper_31_110_chunk -(12 rows) +(11 rows) SELECT device_id, count(*) FROM compression_insert @@ -1500,18 +1499,16 @@ ORDER BY device_id; -> Sort Sort Key: _hyper_31_110_chunk.device_id -> Append - -> Partial GroupAggregate - Group Key: _hyper_31_110_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_111_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_112_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_113_chunk -> Partial GroupAggregate Group Key: _hyper_31_112_chunk.device_id -> Index Only Scan using _hyper_31_112_chunk_compression_insert_device_id_time_idx on _hyper_31_112_chunk -(16 rows) +(14 rows) SELECT device_id, count(*) FROM compression_insert @@ -1589,22 +1586,19 @@ ORDER BY device_id; -> Sort Sort Key: _hyper_31_110_chunk.device_id -> Append - -> Partial GroupAggregate - Group Key: _hyper_31_110_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_111_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_112_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_113_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_114_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk -> Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_115_chunk -> Partial GroupAggregate Group Key: _hyper_31_114_chunk.device_id -> Index Only Scan using _hyper_31_114_chunk_compression_insert_device_id_time_idx on _hyper_31_114_chunk -(20 rows) +(17 rows) SELECT device_id, count(*) FROM compression_insert @@ -1682,26 +1676,22 @@ ORDER BY device_id; -> Sort Sort Key: _hyper_31_110_chunk.device_id -> Append - -> Partial GroupAggregate - Group Key: _hyper_31_110_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_111_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_112_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_113_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_114_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk -> Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_115_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_116_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_116_chunk -> Index Scan using compress_hyper_32_117_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_117_chunk -> Partial GroupAggregate Group Key: _hyper_31_116_chunk.device_id -> Index Only Scan using _hyper_31_116_chunk_compression_insert_device_id_time_idx on _hyper_31_116_chunk -(24 rows) +(20 rows) SELECT device_id, count(*) FROM compression_insert @@ -1779,30 +1769,25 @@ ORDER BY device_id; -> Sort Sort Key: _hyper_31_110_chunk.device_id -> Append - -> Partial GroupAggregate - Group Key: _hyper_31_110_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_110_chunk -> Index Scan using compress_hyper_32_111_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_111_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_112_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_112_chunk -> Index Scan using compress_hyper_32_113_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_113_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_114_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_114_chunk -> Index Scan using compress_hyper_32_115_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_115_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_116_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_116_chunk -> Index Scan using compress_hyper_32_117_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_117_chunk - -> Partial GroupAggregate - Group Key: _hyper_31_118_chunk.device_id + -> Custom Scan (VectorAgg) -> Custom Scan (DecompressChunk) on _hyper_31_118_chunk -> Index Scan using compress_hyper_32_119_chunk_device_id__ts_meta_sequence_num_idx on compress_hyper_32_119_chunk -> Partial GroupAggregate Group Key: _hyper_31_118_chunk.device_id -> Index Only Scan using _hyper_31_118_chunk_compression_insert_device_id_time_idx on _hyper_31_118_chunk -(28 rows) +(23 rows) SELECT device_id, count(*) FROM compression_insert diff --git a/tsl/test/expected/transparent_decompression-15.out b/tsl/test/expected/transparent_decompression-15.out index 670b6690595..3d661f6d4e2 100644 --- a/tsl/test/expected/transparent_decompression-15.out +++ b/tsl/test/expected/transparent_decompression-15.out @@ -1665,9 +1665,8 @@ ORDER BY device_id; Sort Key: _hyper_1_1_chunk.device_id Sort Method: quicksort -> Append (actual rows=15 loops=1) - -> Partial GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_1_1_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Custom Scan (VectorAgg) (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) -> Partial GroupAggregate (actual rows=5 loops=1) Group Key: _hyper_1_2_chunk.device_id @@ -1675,11 +1674,10 @@ ORDER BY device_id; Sort Key: _hyper_1_2_chunk.device_id Sort Method: quicksort -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) - -> Partial GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_1_3_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Custom Scan (VectorAgg) (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) -(20 rows) +(18 rows) -- test window functions with GROUP BY :PREFIX @@ -1696,9 +1694,8 @@ ORDER BY device_id; Sort Key: _hyper_1_1_chunk.device_id Sort Method: quicksort -> Append (actual rows=15 loops=1) - -> Partial GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_1_1_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1800 loops=1) + -> Custom Scan (VectorAgg) (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) -> Index Scan using compress_hyper_5_15_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_15_chunk (actual rows=5 loops=1) -> Partial GroupAggregate (actual rows=5 loops=1) Group Key: _hyper_1_2_chunk.device_id @@ -1706,11 +1703,10 @@ ORDER BY device_id; Sort Key: _hyper_1_2_chunk.device_id Sort Method: quicksort -> Seq Scan on _hyper_1_2_chunk (actual rows=2520 loops=1) - -> Partial GroupAggregate (actual rows=5 loops=1) - Group Key: _hyper_1_3_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=2520 loops=1) + -> Custom Scan (VectorAgg) (actual rows=5 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed) -> Index Scan using compress_hyper_5_16_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_5_16_chunk (actual rows=5 loops=1) -(21 rows) +(19 rows) SET enable_hashagg = ON; -- test CTE @@ -5826,17 +5822,14 @@ ORDER BY device_id; Sort Key: _hyper_2_4_chunk.device_id Sort Method: quicksort -> Append (actual rows=15 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_4_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (never executed) -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) - -> Partial GroupAggregate (actual rows=3 loops=1) - Group Key: _hyper_2_5_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Custom Scan (VectorAgg) (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (never executed) -> Index Scan using compress_hyper_6_18_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_18_chunk (actual rows=3 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_6_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (never executed) -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_7_chunk.device_id @@ -5856,13 +5849,11 @@ ORDER BY device_id; Sort Key: _hyper_2_9_chunk.device_id Sort Method: quicksort -> Seq Scan on _hyper_2_9_chunk (actual rows=504 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_10_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) - -> Partial GroupAggregate (actual rows=3 loops=1) - Group Key: _hyper_2_11_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Custom Scan (VectorAgg) (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_12_chunk.device_id @@ -5870,7 +5861,7 @@ ORDER BY device_id; Sort Key: _hyper_2_12_chunk.device_id Sort Method: quicksort -> Seq Scan on _hyper_2_12_chunk (actual rows=504 loops=1) -(50 rows) +(45 rows) -- test window functions with GROUP BY :PREFIX @@ -5887,17 +5878,14 @@ ORDER BY device_id; Sort Key: _hyper_2_4_chunk.device_id Sort Method: quicksort -> Append (actual rows=15 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_4_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (never executed) -> Index Scan using compress_hyper_6_17_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_17_chunk (actual rows=1 loops=1) - -> Partial GroupAggregate (actual rows=3 loops=1) - Group Key: _hyper_2_5_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=1080 loops=1) + -> Custom Scan (VectorAgg) (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (never executed) -> Index Scan using compress_hyper_6_18_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_18_chunk (actual rows=3 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_6_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=360 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (never executed) -> Index Scan using compress_hyper_6_19_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_19_chunk (actual rows=1 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_7_chunk.device_id @@ -5917,13 +5905,11 @@ ORDER BY device_id; Sort Key: _hyper_2_9_chunk.device_id Sort Method: quicksort -> Seq Scan on _hyper_2_9_chunk (actual rows=504 loops=1) - -> Partial GroupAggregate (actual rows=1 loops=1) - Group Key: _hyper_2_10_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=504 loops=1) + -> Custom Scan (VectorAgg) (actual rows=1 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (never executed) -> Index Scan using compress_hyper_6_20_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_20_chunk (actual rows=1 loops=1) - -> Partial GroupAggregate (actual rows=3 loops=1) - Group Key: _hyper_2_11_chunk.device_id - -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=1512 loops=1) + -> Custom Scan (VectorAgg) (actual rows=3 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (never executed) -> Index Scan using compress_hyper_6_21_chunk_device_id_device_id_peer__ts_meta_idx on compress_hyper_6_21_chunk (actual rows=3 loops=1) -> Partial GroupAggregate (actual rows=1 loops=1) Group Key: _hyper_2_12_chunk.device_id @@ -5931,7 +5917,7 @@ ORDER BY device_id; Sort Key: _hyper_2_12_chunk.device_id Sort Method: quicksort -> Seq Scan on _hyper_2_12_chunk (actual rows=504 loops=1) -(51 rows) +(46 rows) SET enable_hashagg = ON; -- test CTE From 8ae7e373f0a4fff538cb8512d38e75d01bf20814 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 17 Apr 2024 18:26:40 +0200 Subject: [PATCH 78/81] remove debug --- tsl/src/nodes/vector_agg/plan.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 30d56f326d2..bec907d8802 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -311,8 +311,6 @@ can_vectorize_grouping(Agg *agg, CustomScan *custom) { int offset = AttrNumberGetAttrOffset(agg->grpColIdx[i]); TargetEntry *entry = list_nth(agg->plan.targetlist, offset); - fprintf(stderr, "grpColIdx[%d] = %d, entry:\n", i, offset); - my_print(entry); bool is_segmentby = false; if (!is_vector_var(custom, entry->expr, &is_segmentby)) From 37f13fac0b4f7206f3ec6d2424df301ce2b66251 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Mon, 22 Apr 2024 20:42:52 +0200 Subject: [PATCH 79/81] add a test --- src/planner/partialize.c | 21 +++- tsl/test/expected/vector_agg_segmentby.out | 126 +++++++++++++++++++++ tsl/test/sql/CMakeLists.txt | 1 + tsl/test/sql/vector_agg_segmentby.sql | 42 +++++++ 4 files changed, 185 insertions(+), 5 deletions(-) create mode 100644 tsl/test/expected/vector_agg_segmentby.out create mode 100644 tsl/test/sql/vector_agg_segmentby.sql diff --git a/src/planner/partialize.c b/src/planner/partialize.c index b254ab4a9d8..da7b752aa75 100644 --- a/src/planner/partialize.c +++ b/src/planner/partialize.c @@ -446,12 +446,18 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI PathTarget *partial_grouping_target, bool can_sort, bool can_hash, double d_num_groups, GroupPathExtraData *extra_data) { +// mybt(); + /* Get subpaths */ List *subpaths = get_subpaths_from_append_path(cheapest_total_path, false); /* No subpaths available or unsupported append node */ if (subpaths == NIL) + { +// fprintf(stderr, "no subpaths in append path:\n"); +// my_print(cheapest_total_path); return; + } if (list_length(subpaths) < 2) { @@ -538,20 +544,25 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI /* Create new append paths */ if (sorted_subpaths != NIL) { - add_path(partially_grouped_rel, - copy_append_like_path(root, + Path *sorted = copy_append_like_path(root, cheapest_total_path, sorted_subpaths, - partial_grouping_target)); + partial_grouping_target); +// fprintf(stderr, "sorted!!!\n"); +// my_print(sorted); + add_path(partially_grouped_rel, sorted); } if (hashed_subpaths != NIL) { - add_path(partially_grouped_rel, + Path *hashed = copy_append_like_path(root, cheapest_total_path, hashed_subpaths, - partial_grouping_target)); + partial_grouping_target); +// fprintf(stderr, "hashed!!!\n"); +// my_print(hashed); + add_path(partially_grouped_rel, hashed); } } diff --git a/tsl/test/expected/vector_agg_segmentby.out b/tsl/test/expected/vector_agg_segmentby.out new file mode 100644 index 00000000000..3127c5d070f --- /dev/null +++ b/tsl/test/expected/vector_agg_segmentby.out @@ -0,0 +1,126 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\set CHUNKS 2::int +\set CHUNK_ROWS 100000::int +\set GROUPING_CARDINALITY 10::int +create table svagg(t int, f int, s int); +select create_hypertable('svagg', 's', chunk_time_interval => :GROUPING_CARDINALITY / :CHUNKS); +NOTICE: adding not-null constraint to column "s" + create_hypertable +-------------------- + (1,public,svagg,t) +(1 row) + +insert into svagg select s * 10000::int + t, (s + t) % 7::int, s from + generate_series(1::int, :CHUNK_ROWS * :CHUNKS / :GROUPING_CARDINALITY) t, + -- (values (1), (10)) s(s) + generate_series(0::int, :GROUPING_CARDINALITY - 1::int) s(s) +; +alter table svagg set (timescaledb.compress, timescaledb.compress_orderby = 't', + timescaledb.compress_segmentby = 's'); +select count(compress_chunk(x)) from show_chunks('svagg') x; + count +------- + 2 +(1 row) + +analyze svagg; +set max_parallel_workers_per_gather = 0; +explain (costs off) +select s, sum(t), count(*) from svagg group by s order by s; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------------- + Finalize GroupAggregate + Group Key: svagg.s + -> Custom Scan (ChunkAppend) on svagg + Order: svagg.s + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Index Scan using compress_hyper_2_3_chunk_s__ts_meta_sequence_num_idx on compress_hyper_2_3_chunk + -> Custom Scan (VectorAgg) + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk + -> Index Scan using compress_hyper_2_4_chunk_s__ts_meta_sequence_num_idx on compress_hyper_2_4_chunk +(10 rows) + +select s, sum(t), count(*) from svagg where f >= 0 group by s order by s; + s | sum | count +---+------------+------- + 0 | 200010000 | 20000 + 1 | 400010000 | 20000 + 2 | 600010000 | 20000 + 3 | 800010000 | 20000 + 4 | 1000010000 | 20000 + 5 | 1200010000 | 20000 + 6 | 1400010000 | 20000 + 7 | 1600010000 | 20000 + 8 | 1800010000 | 20000 + 9 | 2000010000 | 20000 +(10 rows) + +select s, sum(t), count(*) from svagg where f = 0 group by s order by s; + s | sum | count +---+-----------+------- + 0 | 28578571 | 2857 + 1 | 57145714 | 2857 + 2 | 85712857 | 2857 + 3 | 114280000 | 2857 + 4 | 142847143 | 2857 + 5 | 171414286 | 2857 + 6 | 200061429 | 2858 + 7 | 228568571 | 2857 + 8 | 257135714 | 2857 + 9 | 285702857 | 2857 +(10 rows) + +select s, sum(t), count(*) from svagg where f in (0, 1) group by s order by s; + s | sum | count +---+-----------+------- + 0 | 57160000 | 5715 + 1 | 114294285 | 5714 + 2 | 171428571 | 5714 + 3 | 228562857 | 5714 + 4 | 285697143 | 5714 + 5 | 342831429 | 5714 + 6 | 400045715 | 5715 + 7 | 457210000 | 5715 + 8 | 514274285 | 5714 + 9 | 571408571 | 5714 +(10 rows) + +select s, sum(t), count(*) from svagg where f in (0, 1, 3) group by s order by s; + s | sum | count +---+-----------+------- + 0 | 85727143 | 8572 + 1 | 171428571 | 8571 + 2 | 257170000 | 8572 + 3 | 342851428 | 8571 + 4 | 428552857 | 8571 + 5 | 514254286 | 8571 + 6 | 600035715 | 8572 + 7 | 685767143 | 8572 + 8 | 771398571 | 8571 + 9 | 857210000 | 8572 +(10 rows) + +select s, sum(t), count(*) from svagg where f > 10 group by s order by s; + s | sum | count +---+-----+------- +(0 rows) + +-- this should be vectorized as well but isn't because of the projection. +explain (costs off) +select sum(t), s, count(*) from svagg group by s order by s; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------------- + GroupAggregate + Group Key: svagg.s + -> Custom Scan (ChunkAppend) on svagg + Order: svagg.s + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Index Scan using compress_hyper_2_3_chunk_s__ts_meta_sequence_num_idx on compress_hyper_2_3_chunk + -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk + -> Index Scan using compress_hyper_2_4_chunk_s__ts_meta_sequence_num_idx on compress_hyper_2_4_chunk +(8 rows) + +drop table svagg; diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 3469f236b3b..ada403e1110 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -39,6 +39,7 @@ set(TEST_FILES transparent_decompression_join_index.sql vector_agg_default.sql vector_agg_param.sql + vector_agg_segmentby.sql vectorized_aggregation.sql) if(USE_TELEMETRY) diff --git a/tsl/test/sql/vector_agg_segmentby.sql b/tsl/test/sql/vector_agg_segmentby.sql new file mode 100644 index 00000000000..d0a19a59f7e --- /dev/null +++ b/tsl/test/sql/vector_agg_segmentby.sql @@ -0,0 +1,42 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +\set CHUNKS 2::int +\set CHUNK_ROWS 100000::int +\set GROUPING_CARDINALITY 10::int + +create table svagg(t int, f int, s int); +select create_hypertable('svagg', 's', chunk_time_interval => :GROUPING_CARDINALITY / :CHUNKS); + +insert into svagg select s * 10000::int + t, (s + t) % 7::int, s from + generate_series(1::int, :CHUNK_ROWS * :CHUNKS / :GROUPING_CARDINALITY) t, + -- (values (1), (10)) s(s) + generate_series(0::int, :GROUPING_CARDINALITY - 1::int) s(s) +; + +alter table svagg set (timescaledb.compress, timescaledb.compress_orderby = 't', + timescaledb.compress_segmentby = 's'); + +select count(compress_chunk(x)) from show_chunks('svagg') x; + +analyze svagg; + +set max_parallel_workers_per_gather = 0; + +explain (costs off) +select s, sum(t), count(*) from svagg group by s order by s; + +select s, sum(t), count(*) from svagg where f >= 0 group by s order by s; +select s, sum(t), count(*) from svagg where f = 0 group by s order by s; +select s, sum(t), count(*) from svagg where f in (0, 1) group by s order by s; +select s, sum(t), count(*) from svagg where f in (0, 1, 3) group by s order by s; +select s, sum(t), count(*) from svagg where f > 10 group by s order by s; + + +-- this should be vectorized as well but isn't because of the projection. +explain (costs off) +select sum(t), s, count(*) from svagg group by s order by s; + + +drop table svagg; From 2cf656c9162469ed3f1d1f66739cafa3e7c021c9 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Tue, 23 Apr 2024 15:00:04 +0200 Subject: [PATCH 80/81] more tests --- src/guc.c | 25 ++++++++++ src/guc.h | 9 ++++ src/planner/partialize.c | 29 ++++++----- tsl/src/nodes/vector_agg/plan.c | 58 ++++++++++++++++++++++ tsl/src/nodes/vector_agg/plan.h | 1 + tsl/src/planner.c | 10 ++++ tsl/test/expected/vector_agg_default.out | 29 +++++------ tsl/test/expected/vector_agg_segmentby.out | 54 ++++++++++++++------ tsl/test/sql/CMakeLists.txt | 43 ++++++++-------- tsl/test/sql/vector_agg_default.sql | 11 +++- tsl/test/sql/vector_agg_segmentby.sql | 34 ++++++++++++- 11 files changed, 230 insertions(+), 73 deletions(-) diff --git a/src/guc.c b/src/guc.c index 7d6406c9bf4..666a8bf3e71 100644 --- a/src/guc.c +++ b/src/guc.c @@ -118,6 +118,18 @@ static const struct config_enum_entry require_vector_qual_options[] = { #endif DebugRequireVectorQual ts_guc_debug_require_vector_qual = RVQ_Allow; + +#ifdef TS_DEBUG +static const struct config_enum_entry require_vector_agg_options[] = { + { "allow", RVA_Allow, false }, + { "forbid", RVA_Forbid, false }, + { "require", RVA_Require, false }, + { NULL, 0, false } +}; +#endif + +DebugRequireVectorAgg ts_guc_debug_require_vector_agg = RVA_Allow; + bool ts_guc_debug_compression_path_info = false; static bool ts_guc_enable_hypertable_create = true; @@ -820,6 +832,19 @@ _guc_init(void) /* assign_hook= */ NULL, /* show_hook= */ NULL); + DefineCustomEnumVariable(/* name= */ MAKE_EXTOPTION("debug_require_vector_agg"), + /* short_desc= */ + "ensure that vectorized aggregation is used or not", + /* long_desc= */ "this is for debugging purposes", + /* valueAddr= */ (int *) &ts_guc_debug_require_vector_agg, + /* bootValue= */ RVQ_Allow, + /* options = */ require_vector_agg_options, + /* context= */ PGC_USERSET, + /* flags= */ 0, + /* check_hook= */ NULL, + /* assign_hook= */ NULL, + /* show_hook= */ NULL); + DefineCustomEnumVariable(/* name= */ MAKE_EXTOPTION("debug_require_vector_qual"), /* short_desc= */ "ensure that non-vectorized or vectorized filters are used in " diff --git a/src/guc.h b/src/guc.h index 59775e69e8a..79df742b1da 100644 --- a/src/guc.h +++ b/src/guc.h @@ -87,6 +87,15 @@ extern TSDLLEXPORT bool ts_guc_debug_require_batch_sorted_merge; extern TSDLLEXPORT bool ts_guc_debug_allow_cagg_with_deprecated_funcs; +typedef enum DebugRequireVectorAgg +{ + RVA_Allow = 0, + RVA_Forbid, + RVA_Require, +} DebugRequireVectorAgg; + +extern TSDLLEXPORT DebugRequireVectorAgg ts_guc_debug_require_vector_agg; + void _guc_init(void); typedef enum diff --git a/src/planner/partialize.c b/src/planner/partialize.c index da7b752aa75..664e8b70114 100644 --- a/src/planner/partialize.c +++ b/src/planner/partialize.c @@ -446,7 +446,7 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI PathTarget *partial_grouping_target, bool can_sort, bool can_hash, double d_num_groups, GroupPathExtraData *extra_data) { -// mybt(); + // mybt(); /* Get subpaths */ List *subpaths = get_subpaths_from_append_path(cheapest_total_path, false); @@ -454,8 +454,8 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI /* No subpaths available or unsupported append node */ if (subpaths == NIL) { -// fprintf(stderr, "no subpaths in append path:\n"); -// my_print(cheapest_total_path); + // fprintf(stderr, "no subpaths in append path:\n"); + // my_print(cheapest_total_path); return; } @@ -545,23 +545,22 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI if (sorted_subpaths != NIL) { Path *sorted = copy_append_like_path(root, - cheapest_total_path, - sorted_subpaths, - partial_grouping_target); -// fprintf(stderr, "sorted!!!\n"); -// my_print(sorted); + cheapest_total_path, + sorted_subpaths, + partial_grouping_target); + // fprintf(stderr, "sorted!!!\n"); + // my_print(sorted); add_path(partially_grouped_rel, sorted); } if (hashed_subpaths != NIL) { - Path *hashed = - copy_append_like_path(root, - cheapest_total_path, - hashed_subpaths, - partial_grouping_target); -// fprintf(stderr, "hashed!!!\n"); -// my_print(hashed); + Path *hashed = copy_append_like_path(root, + cheapest_total_path, + hashed_subpaths, + partial_grouping_target); + // fprintf(stderr, "hashed!!!\n"); + // my_print(hashed); add_path(partially_grouped_rel, hashed); } } diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index bec907d8802..69b23c5fd94 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -327,6 +327,64 @@ can_vectorize_grouping(Agg *agg, CustomScan *custom) return true; } +/* + * Check if we have a vectorized aggregation node in the plan tree. This is used + * for testing. + */ +bool +has_vector_agg_node(Plan *plan) +{ + if (plan->lefttree && has_vector_agg_node(plan->lefttree)) + { + return true; + } + + if (plan->righttree && has_vector_agg_node(plan->righttree)) + { + return true; + } + + CustomScan *custom = NULL; + List *append_plans = NIL; + if (IsA(plan, Append)) + { + append_plans = castNode(Append, plan)->appendplans; + } + else if (IsA(plan, CustomScan)) + { + custom = castNode(CustomScan, plan); + if (strcmp("ChunkAppend", custom->methods->CustomName) == 0) + { + append_plans = custom->custom_plans; + } + } + + if (append_plans) + { + ListCell *lc; + foreach (lc, append_plans) + { + if (has_vector_agg_node(lfirst(lc))) + { + return true; + } + } + return false; + } + + if (custom == NULL) + { + return false; + } + + if (strcmp(VECTOR_AGG_NODE_NAME, custom->methods->CustomName) == 0) + { + return true; + } + + return false; +} + /* * Where possible, replace the partial aggregation plan nodes with our own * vectorized aggregation node. The replacement is done in-place. diff --git a/tsl/src/nodes/vector_agg/plan.h b/tsl/src/nodes/vector_agg/plan.h index 653d9d1e1d0..f8b336d5d0d 100644 --- a/tsl/src/nodes/vector_agg/plan.h +++ b/tsl/src/nodes/vector_agg/plan.h @@ -16,3 +16,4 @@ typedef struct VectorAggPlan extern void _vector_agg_init(void); Plan *try_insert_vector_agg_node(Plan *plan); +bool has_vector_agg_node(Plan *plan); diff --git a/tsl/src/planner.c b/tsl/src/planner.c index c92084923ee..7c29a5d6464 100644 --- a/tsl/src/planner.c +++ b/tsl/src/planner.c @@ -214,4 +214,14 @@ tsl_postprocess_plan(PlannedStmt *stmt) { stmt->planTree = try_insert_vector_agg_node(stmt->planTree); } + + if (ts_guc_debug_require_vector_agg != RVA_Allow) + { + const bool has_vector_agg = has_vector_agg_node(stmt->planTree); + const bool should_have_vector_agg = (ts_guc_debug_require_vector_agg == RVA_Require); + if (has_vector_agg != should_have_vector_agg) + { + elog(ERROR, "vector aggregation inconsistent with debug_require_vector_agg GUC"); + } + } } diff --git a/tsl/test/expected/vector_agg_default.out b/tsl/test/expected/vector_agg_default.out index 4c9e60d43e3..d1498d2bc29 100644 --- a/tsl/test/expected/vector_agg_default.out +++ b/tsl/test/expected/vector_agg_default.out @@ -55,7 +55,9 @@ select sum(c) from dvagg; 17982 (1 row) ----- Uncomment to generate reference. +set timescaledb.debug_require_vector_agg = 'require'; +---- Uncomment to generate reference +--set timescaledb.debug_require_vector_agg = 'forbid'; --set timescaledb.enable_vectorized_aggregation to off; -- Vectorized aggregation should work with vectorized filters. select sum(c) from dvagg where b >= 0; @@ -172,7 +174,16 @@ explain (costs off) select sum(c) from dvagg where a < stable_abs(1000); -> Seq Scan on compress_hyper_2_2_chunk (7 rows) +-- The case with HAVING can still be vectorized because it is applied after +-- final aggregation. +select sum(c) from dvagg having sum(c) > 0; + sum +------- + 17982 +(1 row) + -- Some negative cases. +set timescaledb.debug_require_vector_agg to 'forbid'; explain (costs off) select sum(c) from dvagg group by grouping sets ((), (a)); QUERY PLAN ----------------------------------------------------------------------------------- @@ -190,22 +201,6 @@ explain (costs off) select sum(c) from dvagg group by grouping sets ((), (a)); -> Seq Scan on compress_hyper_2_4_chunk (12 rows) -explain (costs off) select sum(c) from dvagg having sum(c) > 0; - QUERY PLAN ------------------------------------------------------------------------------ - Finalize Aggregate - Filter: (sum(_hyper_1_1_chunk.c) > 0) - -> Gather - Workers Planned: 2 - -> Parallel Append - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk - -> Parallel Seq Scan on compress_hyper_2_2_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk - -> Parallel Seq Scan on compress_hyper_2_4_chunk -(11 rows) - -- As a reference, the result on decompressed table. select decompress_chunk(show_chunks('dvagg')); decompress_chunk diff --git a/tsl/test/expected/vector_agg_segmentby.out b/tsl/test/expected/vector_agg_segmentby.out index 3127c5d070f..93534be71ee 100644 --- a/tsl/test/expected/vector_agg_segmentby.out +++ b/tsl/test/expected/vector_agg_segmentby.out @@ -27,22 +27,45 @@ select count(compress_chunk(x)) from show_chunks('svagg') x; analyze svagg; set max_parallel_workers_per_gather = 0; -explain (costs off) -select s, sum(t), count(*) from svagg group by s order by s; - QUERY PLAN ---------------------------------------------------------------------------------------------------------------------------- - Finalize GroupAggregate - Group Key: svagg.s - -> Custom Scan (ChunkAppend) on svagg - Order: svagg.s - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk - -> Index Scan using compress_hyper_2_3_chunk_s__ts_meta_sequence_num_idx on compress_hyper_2_3_chunk - -> Custom Scan (VectorAgg) - -> Custom Scan (DecompressChunk) on _hyper_1_2_chunk - -> Index Scan using compress_hyper_2_4_chunk_s__ts_meta_sequence_num_idx on compress_hyper_2_4_chunk -(10 rows) +-- Check that the debug GUC actually works. +\set ON_ERROR_STOP 0 +set timescaledb.debug_require_vector_agg = 'require'; +set timescaledb.enable_vectorized_aggregation to off; +select sum(t) from svagg; +ERROR: vector aggregation inconsistent with debug_require_vector_agg GUC +set timescaledb.debug_require_vector_agg = 'forbid'; +set timescaledb.enable_vectorized_aggregation to off; +select sum(t) from svagg; + sum +------------- + 11000100000 +(1 row) + +set timescaledb.debug_require_vector_agg = 'forbid'; +set timescaledb.enable_vectorized_aggregation to on; +select sum(t) from svagg; +ERROR: vector aggregation inconsistent with debug_require_vector_agg GUC +set timescaledb.debug_require_vector_agg = 'require'; +set timescaledb.enable_vectorized_aggregation to on; +select sum(t) from svagg; + sum +------------- + 11000100000 +(1 row) + +set timescaledb.debug_require_vector_agg = 'allow'; +set timescaledb.enable_vectorized_aggregation to on; +select sum(t) from svagg; + sum +------------- + 11000100000 +(1 row) +\set ON_ERROR_STOP 1 +set timescaledb.debug_require_vector_agg = 'require'; +---- Uncomment to generate reference +--set timescaledb.debug_require_vector_agg = 'forbid'; +--set timescaledb.enable_vectorized_aggregation to off; select s, sum(t), count(*) from svagg where f >= 0 group by s order by s; s | sum | count ---+------------+------- @@ -109,6 +132,7 @@ select s, sum(t), count(*) from svagg where f > 10 group by s order by s (0 rows) -- this should be vectorized as well but isn't because of the projection. +set timescaledb.debug_require_vector_agg to 'forbid'; explain (costs off) select sum(t), s, count(*) from svagg group by s order by s; QUERY PLAN diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index ada403e1110..2dcfab79a9a 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -37,9 +37,7 @@ set(TEST_FILES size_utils_tsl.sql skip_scan.sql transparent_decompression_join_index.sql - vector_agg_default.sql vector_agg_param.sql - vector_agg_segmentby.sql vectorized_aggregation.sql) if(USE_TELEMETRY) @@ -51,23 +49,11 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) APPEND TEST_FILES bgw_db_scheduler.sql + bgw_db_scheduler_fixed.sql bgw_job_stat_history.sql bgw_job_stat_history_errors.sql bgw_job_stat_history_errors_permissions.sql - bgw_db_scheduler_fixed.sql bgw_reorder_drop_chunks.sql - scheduler_fixed.sql - compress_bgw_reorder_drop_chunks.sql - chunk_api.sql - chunk_merge.sql - chunk_utils_compression.sql - compression_algos.sql - compression_ddl.sql - compression_hypertable.sql - compression_merge.sql - compression_indexscan.sql - compression_segment_meta.sql - compression_sorted_merge_filter.sql cagg_bgw_drop_chunks.sql cagg_drop_chunks.sql cagg_dump.sql @@ -76,25 +62,38 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) cagg_multi.sql cagg_on_cagg.sql cagg_on_cagg_joins.sql - cagg_tableam.sql cagg_policy_run.sql + cagg_tableam.sql + chunk_api.sql + chunk_merge.sql + chunk_utils_compression.sql + compress_bgw_reorder_drop_chunks.sql + compression_algos.sql + compression_ddl.sql + compression_hypertable.sql + compression_indexscan.sql + compression_merge.sql + compression_segment_meta.sql + compression_sorted_merge_filter.sql decompress_memory.sql decompress_vector_qual.sql exp_cagg_monthly.sql exp_cagg_next_gen.sql exp_cagg_origin.sql exp_cagg_timezone.sql + feature_flags.sql + fixed_schedules.sql hypertable_generalization.sql - insert_memory_usage.sql information_view_chunk_count.sql + insert_memory_usage.sql + license_tsl.sql read_only.sql + recompress_chunk_segmentwise.sql + scheduler_fixed.sql transparent_decompression_queries.sql tsl_tables.sql - license_tsl.sql - fixed_schedules.sql - recompress_chunk_segmentwise.sql - feature_flags.sql) - + vector_agg_default.sql + vector_agg_segmentby.sql) endif(CMAKE_BUILD_TYPE MATCHES Debug) if((${PG_VERSION_MAJOR} GREATER_EQUAL "14")) diff --git a/tsl/test/sql/vector_agg_default.sql b/tsl/test/sql/vector_agg_default.sql index 8cbe5f11b59..83617ca8746 100644 --- a/tsl/test/sql/vector_agg_default.sql +++ b/tsl/test/sql/vector_agg_default.sql @@ -23,7 +23,9 @@ explain (costs off) select sum(c) from dvagg; select sum(c) from dvagg; ----- Uncomment to generate reference. +set timescaledb.debug_require_vector_agg = 'require'; +---- Uncomment to generate reference +--set timescaledb.debug_require_vector_agg = 'forbid'; --set timescaledb.enable_vectorized_aggregation to off; -- Vectorized aggregation should work with vectorized filters. @@ -51,11 +53,16 @@ reset timescaledb.enable_vectorized_aggregation; -- The runtime chunk exclusion should work. explain (costs off) select sum(c) from dvagg where a < stable_abs(1000); +-- The case with HAVING can still be vectorized because it is applied after +-- final aggregation. +select sum(c) from dvagg having sum(c) > 0; + -- Some negative cases. +set timescaledb.debug_require_vector_agg to 'forbid'; + explain (costs off) select sum(c) from dvagg group by grouping sets ((), (a)); -explain (costs off) select sum(c) from dvagg having sum(c) > 0; -- As a reference, the result on decompressed table. diff --git a/tsl/test/sql/vector_agg_segmentby.sql b/tsl/test/sql/vector_agg_segmentby.sql index d0a19a59f7e..07b8d462440 100644 --- a/tsl/test/sql/vector_agg_segmentby.sql +++ b/tsl/test/sql/vector_agg_segmentby.sql @@ -24,8 +24,37 @@ analyze svagg; set max_parallel_workers_per_gather = 0; -explain (costs off) -select s, sum(t), count(*) from svagg group by s order by s; + +-- Check that the debug GUC actually works. +\set ON_ERROR_STOP 0 + +set timescaledb.debug_require_vector_agg = 'require'; +set timescaledb.enable_vectorized_aggregation to off; +select sum(t) from svagg; + +set timescaledb.debug_require_vector_agg = 'forbid'; +set timescaledb.enable_vectorized_aggregation to off; +select sum(t) from svagg; + +set timescaledb.debug_require_vector_agg = 'forbid'; +set timescaledb.enable_vectorized_aggregation to on; +select sum(t) from svagg; + +set timescaledb.debug_require_vector_agg = 'require'; +set timescaledb.enable_vectorized_aggregation to on; +select sum(t) from svagg; + +set timescaledb.debug_require_vector_agg = 'allow'; +set timescaledb.enable_vectorized_aggregation to on; +select sum(t) from svagg; + +\set ON_ERROR_STOP 1 + + +set timescaledb.debug_require_vector_agg = 'require'; +---- Uncomment to generate reference +--set timescaledb.debug_require_vector_agg = 'forbid'; +--set timescaledb.enable_vectorized_aggregation to off; select s, sum(t), count(*) from svagg where f >= 0 group by s order by s; select s, sum(t), count(*) from svagg where f = 0 group by s order by s; @@ -35,6 +64,7 @@ select s, sum(t), count(*) from svagg where f > 10 group by s order by s -- this should be vectorized as well but isn't because of the projection. +set timescaledb.debug_require_vector_agg to 'forbid'; explain (costs off) select sum(t), s, count(*) from svagg group by s order by s; From 8e1651c10c928fda31e9efd4268a70eea6622943 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 24 Apr 2024 16:44:19 +0200 Subject: [PATCH 81/81] typo --- tsl/src/nodes/vector_agg/plan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tsl/src/nodes/vector_agg/plan.c b/tsl/src/nodes/vector_agg/plan.c index 69b23c5fd94..17e553b85ee 100644 --- a/tsl/src/nodes/vector_agg/plan.c +++ b/tsl/src/nodes/vector_agg/plan.c @@ -251,7 +251,7 @@ can_vectorize_aggref(Aggref *aggref, CustomScan *custom) if (aggref->aggdirectargs != NIL) { - /* Can't process ordered-set agregates with direct arguments. */ + /* Can't process ordered-set aggregates with direct arguments. */ return false; }