diff --git a/.unreleased/pr_6867 b/.unreleased/pr_6867 new file mode 100644 index 00000000000..03690e0920d --- /dev/null +++ b/.unreleased/pr_6867 @@ -0,0 +1 @@ +Fixes: #6867 Clean up compression settings when deleting compressed cagg diff --git a/src/ts_catalog/continuous_agg.c b/src/ts_catalog/continuous_agg.c index 2041020d909..d21d55c964c 100644 --- a/src/ts_catalog/continuous_agg.c +++ b/src/ts_catalog/continuous_agg.c @@ -38,6 +38,7 @@ #include "time_bucket.h" #include "time_utils.h" #include "ts_catalog/catalog.h" +#include "ts_catalog/compression_settings.h" #include "ts_catalog/continuous_agg.h" #include "ts_catalog/continuous_aggs_watermark.h" #include "utils.h" @@ -1017,6 +1018,7 @@ drop_continuous_agg(FormData_continuous_agg *cadata, bool drop_user_view) if (OidIsValid(mat_hypertable.objectId)) { performDeletion(&mat_hypertable, DROP_CASCADE, 0); + ts_compression_settings_delete(mat_hypertable.objectId); ts_hypertable_delete_by_id(cadata->mat_hypertable_id); } diff --git a/tsl/test/expected/cagg_ddl-13.out b/tsl/test/expected/cagg_ddl-13.out index a3862ac53b2..0811f37f80a 100644 --- a/tsl/test/expected/cagg_ddl-13.out +++ b/tsl/test/expected/cagg_ddl-13.out @@ -2074,3 +2074,31 @@ SELECT * FROM conditions_daily ORDER BY bucket, avg; NYC | Thu Nov 01 17:00:00 2018 PDT | 15 (6 rows) +-- check compression settings are cleaned up when deleting a cagg with compression +CREATE TABLE cagg_cleanup(time timestamptz not null); +SELECT table_name FROM create_hypertable('cagg_cleanup','time'); + table_name +-------------- + cagg_cleanup +(1 row) + +INSERT INTO cagg_cleanup SELECT '2020-01-01'; +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous) AS SELECT time_bucket('1h',time) FROM cagg_cleanup GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg1" +ALTER MATERIALIZED VIEW cagg1 SET (timescaledb.compress); +NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_52" is set to "" +SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch; + count +------- + 1 +(1 row) + +DROP MATERIALIZED VIEW cagg1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_52_68_chunk +SELECT * FROM _timescaledb_catalog.compression_settings; + relid | segmentby | orderby | orderby_desc | orderby_nullsfirst +-------+-----------+---------+--------------+-------------------- +(0 rows) + diff --git a/tsl/test/expected/cagg_ddl-14.out b/tsl/test/expected/cagg_ddl-14.out index a3862ac53b2..0811f37f80a 100644 --- a/tsl/test/expected/cagg_ddl-14.out +++ b/tsl/test/expected/cagg_ddl-14.out @@ -2074,3 +2074,31 @@ SELECT * FROM conditions_daily ORDER BY bucket, avg; NYC | Thu Nov 01 17:00:00 2018 PDT | 15 (6 rows) +-- check compression settings are cleaned up when deleting a cagg with compression +CREATE TABLE cagg_cleanup(time timestamptz not null); +SELECT table_name FROM create_hypertable('cagg_cleanup','time'); + table_name +-------------- + cagg_cleanup +(1 row) + +INSERT INTO cagg_cleanup SELECT '2020-01-01'; +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous) AS SELECT time_bucket('1h',time) FROM cagg_cleanup GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg1" +ALTER MATERIALIZED VIEW cagg1 SET (timescaledb.compress); +NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_52" is set to "" +SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch; + count +------- + 1 +(1 row) + +DROP MATERIALIZED VIEW cagg1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_52_68_chunk +SELECT * FROM _timescaledb_catalog.compression_settings; + relid | segmentby | orderby | orderby_desc | orderby_nullsfirst +-------+-----------+---------+--------------+-------------------- +(0 rows) + diff --git a/tsl/test/expected/cagg_ddl-15.out b/tsl/test/expected/cagg_ddl-15.out index a3862ac53b2..0811f37f80a 100644 --- a/tsl/test/expected/cagg_ddl-15.out +++ b/tsl/test/expected/cagg_ddl-15.out @@ -2074,3 +2074,31 @@ SELECT * FROM conditions_daily ORDER BY bucket, avg; NYC | Thu Nov 01 17:00:00 2018 PDT | 15 (6 rows) +-- check compression settings are cleaned up when deleting a cagg with compression +CREATE TABLE cagg_cleanup(time timestamptz not null); +SELECT table_name FROM create_hypertable('cagg_cleanup','time'); + table_name +-------------- + cagg_cleanup +(1 row) + +INSERT INTO cagg_cleanup SELECT '2020-01-01'; +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous) AS SELECT time_bucket('1h',time) FROM cagg_cleanup GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg1" +ALTER MATERIALIZED VIEW cagg1 SET (timescaledb.compress); +NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_52" is set to "" +SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch; + count +------- + 1 +(1 row) + +DROP MATERIALIZED VIEW cagg1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_52_68_chunk +SELECT * FROM _timescaledb_catalog.compression_settings; + relid | segmentby | orderby | orderby_desc | orderby_nullsfirst +-------+-----------+---------+--------------+-------------------- +(0 rows) + diff --git a/tsl/test/expected/cagg_ddl-16.out b/tsl/test/expected/cagg_ddl-16.out index cb8aabe1d05..65e5a1b8e40 100644 --- a/tsl/test/expected/cagg_ddl-16.out +++ b/tsl/test/expected/cagg_ddl-16.out @@ -2074,3 +2074,31 @@ SELECT * FROM conditions_daily ORDER BY bucket, avg; NYC | Thu Nov 01 17:00:00 2018 PDT | 15 (6 rows) +-- check compression settings are cleaned up when deleting a cagg with compression +CREATE TABLE cagg_cleanup(time timestamptz not null); +SELECT table_name FROM create_hypertable('cagg_cleanup','time'); + table_name +-------------- + cagg_cleanup +(1 row) + +INSERT INTO cagg_cleanup SELECT '2020-01-01'; +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous) AS SELECT time_bucket('1h',time) FROM cagg_cleanup GROUP BY 1; +NOTICE: refreshing continuous aggregate "cagg1" +ALTER MATERIALIZED VIEW cagg1 SET (timescaledb.compress); +NOTICE: defaulting compress_orderby to time_bucket +WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes +NOTICE: default segment by for hypertable "_materialized_hypertable_52" is set to "" +SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch; + count +------- + 1 +(1 row) + +DROP MATERIALIZED VIEW cagg1; +NOTICE: drop cascades to table _timescaledb_internal._hyper_52_68_chunk +SELECT * FROM _timescaledb_catalog.compression_settings; + relid | segmentby | orderby | orderby_desc | orderby_nullsfirst +-------+-----------+---------+--------------+-------------------- +(0 rows) + diff --git a/tsl/test/sql/cagg_ddl.sql.in b/tsl/test/sql/cagg_ddl.sql.in index 9ba0a7df0e4..0434e41276b 100644 --- a/tsl/test/sql/cagg_ddl.sql.in +++ b/tsl/test/sql/cagg_ddl.sql.in @@ -1320,3 +1320,14 @@ SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark -- Should return ROWS because the watermark was reseted by the TRUNCATE SELECT * FROM conditions_daily ORDER BY bucket, avg; + +-- check compression settings are cleaned up when deleting a cagg with compression +CREATE TABLE cagg_cleanup(time timestamptz not null); +SELECT table_name FROM create_hypertable('cagg_cleanup','time'); +INSERT INTO cagg_cleanup SELECT '2020-01-01'; +CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous) AS SELECT time_bucket('1h',time) FROM cagg_cleanup GROUP BY 1; +ALTER MATERIALIZED VIEW cagg1 SET (timescaledb.compress); +SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch; +DROP MATERIALIZED VIEW cagg1; +SELECT * FROM _timescaledb_catalog.compression_settings; +