From 97c05fe0b51c8d609e716ac9a430a0019cdaf930 Mon Sep 17 00:00:00 2001 From: Ante Kresic Date: Thu, 15 Dec 2022 09:59:42 +0100 Subject: [PATCH] Fix index att number calculation Attribute offset was used by mistake where attribute number was needed causing wrong values to be fetched when scanning compressed chunk index. --- tsl/src/compression/compression.c | 4 +- tsl/test/expected/compression_merge.out | 52 +++++++++++++++++++++---- tsl/test/sql/compression_merge.sql | 19 ++++++--- 3 files changed, 61 insertions(+), 14 deletions(-) diff --git a/tsl/src/compression/compression.c b/tsl/src/compression/compression.c index 043533f9be1..5e674e2b549 100644 --- a/tsl/src/compression/compression.c +++ b/tsl/src/compression/compression.c @@ -632,8 +632,8 @@ index_scan_sequence_number(Relation table_rel, Oid index_oid, ScanKeyData *scank if (index_getnext_tid(index_scan, BackwardScanDirection)) { result = index_getattr(index_scan->xs_itup, - index_scan->xs_itupdesc->natts - - 1, /* Last attribute of the index is sequence number. */ + index_scan->xs_itupdesc + ->natts, /* Last attribute of the index is sequence number. */ index_scan->xs_itupdesc, &is_null); if (is_null) diff --git a/tsl/test/expected/compression_merge.out b/tsl/test/expected/compression_merge.out index 8815f1ef527..7b17277400a 100644 --- a/tsl/test/expected/compression_merge.out +++ b/tsl/test/expected/compression_merge.out @@ -253,11 +253,28 @@ NOTICE: adding not-null constraint to column "Time" INSERT INTO test5 SELECT t, 1, gen_rand_minstd() FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-03 0:59', '1 minute') t; -- Compression is set to merge those 24 chunks into 1 24 hour chunk ALTER TABLE test5 set (timescaledb.compress, timescaledb.compress_segmentby='i', timescaledb.compress_orderby='"Time"', timescaledb.compress_chunk_time_interval='24 hours'); -SELECT compress_chunk(i) FROM show_chunks('test5') i LIMIT 1; +SELECT + $$ + SELECT * FROM test5 ORDER BY i, "Time" + $$ AS "QUERY" \gset +SELECT compress_chunk(i) FROM show_chunks('test5') i LIMIT 4; compress_chunk ------------------------------------------ _timescaledb_internal._hyper_9_163_chunk -(1 row) + _timescaledb_internal._hyper_9_163_chunk + _timescaledb_internal._hyper_9_163_chunk + _timescaledb_internal._hyper_9_163_chunk +(4 rows) + +-- Make sure sequence numbers are correctly fetched from index. +SELECT _ts_meta_sequence_num FROM _timescaledb_internal.compress_hyper_10_187_chunk where i = 1; + _ts_meta_sequence_num +----------------------- + 10 + 20 + 30 + 40 +(4 rows) SELECT schemaname || '.' || indexname AS "INDEXNAME" FROM pg_indexes i @@ -267,10 +284,31 @@ LIMIT 1 \gset DROP INDEX :INDEXNAME; -- We dropped the index from compressed chunk thats needed to determine sequence numbers -- during merge, merging will fallback to doing heap scans and work just fine. -SELECT - $$ - SELECT * FROM test5 ORDER BY i, "Time" - $$ AS "QUERY" \gset +SELECT compress_chunk(i, true) FROM show_chunks('test5') i LIMIT 5; +NOTICE: chunk "_hyper_9_163_chunk" is already compressed + compress_chunk +------------------------------------------ + _timescaledb_internal._hyper_9_163_chunk + _timescaledb_internal._hyper_9_163_chunk + _timescaledb_internal._hyper_9_163_chunk + _timescaledb_internal._hyper_9_163_chunk + _timescaledb_internal._hyper_9_163_chunk +(5 rows) + +-- Make sure sequence numbers are correctly fetched from heap. +SELECT _ts_meta_sequence_num FROM _timescaledb_internal.compress_hyper_10_187_chunk where i = 1; + _ts_meta_sequence_num +----------------------- + 10 + 20 + 30 + 40 + 50 + 60 + 70 + 80 +(8 rows) + SELECT 'test5' AS "HYPERTABLE_NAME" \gset \ir include/compression_test_merge.sql -- This file and its contents are licensed under the Timescale License. @@ -280,7 +318,7 @@ SELECT 'test5' AS "HYPERTABLE_NAME" \gset psql:include/compression_test_merge.sql:12: NOTICE: chunk "_hyper_9_163_chunk" is already compressed count_compressed ------------------ - 24 + 17 (1 row) ?column? | count diff --git a/tsl/test/sql/compression_merge.sql b/tsl/test/sql/compression_merge.sql index 3101c53d78b..6774eb87b57 100644 --- a/tsl/test/sql/compression_merge.sql +++ b/tsl/test/sql/compression_merge.sql @@ -105,7 +105,15 @@ INSERT INTO test5 SELECT t, 1, gen_rand_minstd() FROM generate_series('2018-03-0 -- Compression is set to merge those 24 chunks into 1 24 hour chunk ALTER TABLE test5 set (timescaledb.compress, timescaledb.compress_segmentby='i', timescaledb.compress_orderby='"Time"', timescaledb.compress_chunk_time_interval='24 hours'); -SELECT compress_chunk(i) FROM show_chunks('test5') i LIMIT 1; +SELECT + $$ + SELECT * FROM test5 ORDER BY i, "Time" + $$ AS "QUERY" \gset + +SELECT compress_chunk(i) FROM show_chunks('test5') i LIMIT 4; + +-- Make sure sequence numbers are correctly fetched from index. +SELECT _ts_meta_sequence_num FROM _timescaledb_internal.compress_hyper_10_187_chunk where i = 1; SELECT schemaname || '.' || indexname AS "INDEXNAME" FROM pg_indexes i @@ -113,14 +121,15 @@ INNER JOIN _timescaledb_catalog.chunk cc ON i.schemaname = cc.schema_name and i. INNER JOIN _timescaledb_catalog.chunk c ON (cc.id = c.compressed_chunk_id) LIMIT 1 \gset + DROP INDEX :INDEXNAME; -- We dropped the index from compressed chunk thats needed to determine sequence numbers -- during merge, merging will fallback to doing heap scans and work just fine. -SELECT - $$ - SELECT * FROM test5 ORDER BY i, "Time" - $$ AS "QUERY" \gset +SELECT compress_chunk(i, true) FROM show_chunks('test5') i LIMIT 5; + +-- Make sure sequence numbers are correctly fetched from heap. +SELECT _ts_meta_sequence_num FROM _timescaledb_internal.compress_hyper_10_187_chunk where i = 1; SELECT 'test5' AS "HYPERTABLE_NAME" \gset \ir include/compression_test_merge.sql