Skip to content

Commit

Permalink
Support for segment_by filters on vector aggs
Browse files Browse the repository at this point in the history
This patch adds support for vectorized aggregation queries with filters
on the segment_by value of compressed data.
  • Loading branch information
jnidzwetzki committed Nov 1, 2023
1 parent bc5e39d commit 48f4296
Show file tree
Hide file tree
Showing 5 changed files with 678 additions and 32 deletions.
7 changes: 6 additions & 1 deletion tsl/src/nodes/decompress_chunk/exec.c
Original file line number Diff line number Diff line change
Expand Up @@ -514,6 +514,12 @@ perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref)
TupleTableSlot *decompressed_scan_slot = chunk_state->csstate.ss.ss_ScanTupleSlot;
Assert(decompressed_scan_slot->tts_tupleDescriptor->natts == 1);

/* Set all attributes of the result tuple to NULL. So, we return NULL if no data is processed
* by our implementation. In addition, the call marks the slot as beeing used (i.e., no
* ExecStoreVirtualTuple call is required). */
ExecStoreAllNullTuple(decompressed_scan_slot);
Assert(!TupIsNull(decompressed_scan_slot));

int64 result_sum = 0;

if (column_description->type == SEGMENTBY_COLUMN)
Expand Down Expand Up @@ -668,7 +674,6 @@ perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref)
* systems */
decompressed_scan_slot->tts_values[0] = Int64GetDatum(result_sum);

ExecStoreVirtualTuple(decompressed_scan_slot);
return decompressed_scan_slot;
}

Expand Down
9 changes: 2 additions & 7 deletions tsl/src/partialize_agg.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,6 @@
#include "utils.h"
#include "debug_assert.h"

#define is_restricted_path(path) \
(list_length(path->parent->baserestrictinfo) > 0 || path->parent->joininfo != NULL)

/*
* Are we able to optimize the path by applying vectorized aggregation?
*/
Expand All @@ -52,14 +49,12 @@ is_vectorizable_agg_path(PlannerInfo *root, AggPath *agg_path, Path *path)
DecompressChunkPath *decompress_path = (DecompressChunkPath *) path;
Assert(decompress_path->custom_path.custom_paths != NIL);

Path *compressed_path = linitial(decompress_path->custom_path.custom_paths);

/* Hypertable compression info is already fetched from the catalog */
Assert(decompress_path->info != NULL);
Assert(decompress_path->info->hypertable_compression_info != NULL);

/* No filters are supported at the moment */
if (is_restricted_path(path) || is_restricted_path(compressed_path))
/* No filters on the compressed attributes are supported at the moment */
if ((list_length(path->parent->baserestrictinfo) > 0 || path->parent->joininfo != NULL))
return false;

/* We currently handle only one agg function per node */
Expand Down
26 changes: 12 additions & 14 deletions tsl/test/expected/compression_qualpushdown.out
Original file line number Diff line number Diff line change
Expand Up @@ -289,24 +289,22 @@ SELECT compress_chunk(i) FROM show_chunks('deleteme') i;
(1 row)

EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE segment::text like '%4%';
QUERY PLAN
---------------------------------------------------------------
QUERY PLAN
---------------------------------------------------------
Finalize Aggregate
-> Partial Aggregate
-> Custom Scan (DecompressChunk) on _hyper_7_8_chunk
-> Seq Scan on compress_hyper_8_9_chunk
Filter: ((segment)::text ~~ '%4%'::text)
(5 rows)
-> Custom Scan (DecompressChunk) on _hyper_7_8_chunk
-> Seq Scan on compress_hyper_8_9_chunk
Filter: ((segment)::text ~~ '%4%'::text)
(4 rows)

EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE '4' = segment::text;
QUERY PLAN
---------------------------------------------------------------
QUERY PLAN
---------------------------------------------------------
Finalize Aggregate
-> Partial Aggregate
-> Custom Scan (DecompressChunk) on _hyper_7_8_chunk
-> Seq Scan on compress_hyper_8_9_chunk
Filter: ('4'::text = (segment)::text)
(5 rows)
-> Custom Scan (DecompressChunk) on _hyper_7_8_chunk
-> Seq Scan on compress_hyper_8_9_chunk
Filter: ('4'::text = (segment)::text)
(4 rows)

CREATE TABLE deleteme_with_bytea(time bigint NOT NULL, bdata bytea);
SELECT create_hypertable('deleteme_with_bytea', 'time', chunk_time_interval => 1000000);
Expand Down
Loading

0 comments on commit 48f4296

Please sign in to comment.