Skip to content

Commit

Permalink
Clean up compression settings when deleting compressed cagg
Browse files Browse the repository at this point in the history
When deleting a cagg with compression on the materialization hypertable
the compression settings for that hypertable would not get removed when
dropping the cagg.
  • Loading branch information
svenklemm committed Apr 28, 2024
1 parent 183d309 commit a7890e6
Show file tree
Hide file tree
Showing 7 changed files with 126 additions and 0 deletions.
1 change: 1 addition & 0 deletions .unreleased/pr_6867
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fixes: #6867 Clean up compression settings when deleting compressed cagg
2 changes: 2 additions & 0 deletions src/ts_catalog/continuous_agg.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
#include "time_bucket.h"
#include "time_utils.h"
#include "ts_catalog/catalog.h"
#include "ts_catalog/compression_settings.h"
#include "ts_catalog/continuous_agg.h"
#include "ts_catalog/continuous_aggs_watermark.h"
#include "utils.h"
Expand Down Expand Up @@ -1017,6 +1018,7 @@ drop_continuous_agg(FormData_continuous_agg *cadata, bool drop_user_view)
if (OidIsValid(mat_hypertable.objectId))
{
performDeletion(&mat_hypertable, DROP_CASCADE, 0);
ts_compression_settings_delete(mat_hypertable.objectId);
ts_hypertable_delete_by_id(cadata->mat_hypertable_id);
}

Expand Down
28 changes: 28 additions & 0 deletions tsl/test/expected/cagg_ddl-13.out
Original file line number Diff line number Diff line change
Expand Up @@ -2074,3 +2074,31 @@ SELECT * FROM conditions_daily ORDER BY bucket, avg;
NYC | Thu Nov 01 17:00:00 2018 PDT | 15
(6 rows)

-- check compression settings are cleaned up when deleting a cagg with compression
CREATE TABLE cagg_cleanup(time timestamptz not null);
SELECT table_name FROM create_hypertable('cagg_cleanup','time');
table_name
--------------
cagg_cleanup
(1 row)

INSERT INTO cagg_cleanup SELECT '2020-01-01';
CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous) AS SELECT time_bucket('1h',time) FROM cagg_cleanup GROUP BY 1;
NOTICE: refreshing continuous aggregate "cagg1"
ALTER MATERIALIZED VIEW cagg1 SET (timescaledb.compress);
NOTICE: defaulting compress_orderby to time_bucket
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
NOTICE: default segment by for hypertable "_materialized_hypertable_52" is set to ""
SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch;
count
-------
1
(1 row)

DROP MATERIALIZED VIEW cagg1;
NOTICE: drop cascades to table _timescaledb_internal._hyper_52_68_chunk
SELECT * FROM _timescaledb_catalog.compression_settings;
relid | segmentby | orderby | orderby_desc | orderby_nullsfirst
-------+-----------+---------+--------------+--------------------
(0 rows)

28 changes: 28 additions & 0 deletions tsl/test/expected/cagg_ddl-14.out
Original file line number Diff line number Diff line change
Expand Up @@ -2074,3 +2074,31 @@ SELECT * FROM conditions_daily ORDER BY bucket, avg;
NYC | Thu Nov 01 17:00:00 2018 PDT | 15
(6 rows)

-- check compression settings are cleaned up when deleting a cagg with compression
CREATE TABLE cagg_cleanup(time timestamptz not null);
SELECT table_name FROM create_hypertable('cagg_cleanup','time');
table_name
--------------
cagg_cleanup
(1 row)

INSERT INTO cagg_cleanup SELECT '2020-01-01';
CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous) AS SELECT time_bucket('1h',time) FROM cagg_cleanup GROUP BY 1;
NOTICE: refreshing continuous aggregate "cagg1"
ALTER MATERIALIZED VIEW cagg1 SET (timescaledb.compress);
NOTICE: defaulting compress_orderby to time_bucket
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
NOTICE: default segment by for hypertable "_materialized_hypertable_52" is set to ""
SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch;
count
-------
1
(1 row)

DROP MATERIALIZED VIEW cagg1;
NOTICE: drop cascades to table _timescaledb_internal._hyper_52_68_chunk
SELECT * FROM _timescaledb_catalog.compression_settings;
relid | segmentby | orderby | orderby_desc | orderby_nullsfirst
-------+-----------+---------+--------------+--------------------
(0 rows)

28 changes: 28 additions & 0 deletions tsl/test/expected/cagg_ddl-15.out
Original file line number Diff line number Diff line change
Expand Up @@ -2074,3 +2074,31 @@ SELECT * FROM conditions_daily ORDER BY bucket, avg;
NYC | Thu Nov 01 17:00:00 2018 PDT | 15
(6 rows)

-- check compression settings are cleaned up when deleting a cagg with compression
CREATE TABLE cagg_cleanup(time timestamptz not null);
SELECT table_name FROM create_hypertable('cagg_cleanup','time');
table_name
--------------
cagg_cleanup
(1 row)

INSERT INTO cagg_cleanup SELECT '2020-01-01';
CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous) AS SELECT time_bucket('1h',time) FROM cagg_cleanup GROUP BY 1;
NOTICE: refreshing continuous aggregate "cagg1"
ALTER MATERIALIZED VIEW cagg1 SET (timescaledb.compress);
NOTICE: defaulting compress_orderby to time_bucket
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
NOTICE: default segment by for hypertable "_materialized_hypertable_52" is set to ""
SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch;
count
-------
1
(1 row)

DROP MATERIALIZED VIEW cagg1;
NOTICE: drop cascades to table _timescaledb_internal._hyper_52_68_chunk
SELECT * FROM _timescaledb_catalog.compression_settings;
relid | segmentby | orderby | orderby_desc | orderby_nullsfirst
-------+-----------+---------+--------------+--------------------
(0 rows)

28 changes: 28 additions & 0 deletions tsl/test/expected/cagg_ddl-16.out
Original file line number Diff line number Diff line change
Expand Up @@ -2074,3 +2074,31 @@ SELECT * FROM conditions_daily ORDER BY bucket, avg;
NYC | Thu Nov 01 17:00:00 2018 PDT | 15
(6 rows)

-- check compression settings are cleaned up when deleting a cagg with compression
CREATE TABLE cagg_cleanup(time timestamptz not null);
SELECT table_name FROM create_hypertable('cagg_cleanup','time');
table_name
--------------
cagg_cleanup
(1 row)

INSERT INTO cagg_cleanup SELECT '2020-01-01';
CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous) AS SELECT time_bucket('1h',time) FROM cagg_cleanup GROUP BY 1;
NOTICE: refreshing continuous aggregate "cagg1"
ALTER MATERIALIZED VIEW cagg1 SET (timescaledb.compress);
NOTICE: defaulting compress_orderby to time_bucket
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
NOTICE: default segment by for hypertable "_materialized_hypertable_52" is set to ""
SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch;
count
-------
1
(1 row)

DROP MATERIALIZED VIEW cagg1;
NOTICE: drop cascades to table _timescaledb_internal._hyper_52_68_chunk
SELECT * FROM _timescaledb_catalog.compression_settings;
relid | segmentby | orderby | orderby_desc | orderby_nullsfirst
-------+-----------+---------+--------------+--------------------
(0 rows)

11 changes: 11 additions & 0 deletions tsl/test/sql/cagg_ddl.sql.in
Original file line number Diff line number Diff line change
Expand Up @@ -1320,3 +1320,14 @@ SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark

-- Should return ROWS because the watermark was reseted by the TRUNCATE
SELECT * FROM conditions_daily ORDER BY bucket, avg;

-- check compression settings are cleaned up when deleting a cagg with compression
CREATE TABLE cagg_cleanup(time timestamptz not null);
SELECT table_name FROM create_hypertable('cagg_cleanup','time');
INSERT INTO cagg_cleanup SELECT '2020-01-01';
CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous) AS SELECT time_bucket('1h',time) FROM cagg_cleanup GROUP BY 1;
ALTER MATERIALIZED VIEW cagg1 SET (timescaledb.compress);
SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch;
DROP MATERIALIZED VIEW cagg1;
SELECT * FROM _timescaledb_catalog.compression_settings;

0 comments on commit a7890e6

Please sign in to comment.