diff --git a/tests/rp_cloud_cleanup.py b/tests/rp_cloud_cleanup.py index 036c59015b6c..68b9744c4297 100644 --- a/tests/rp_cloud_cleanup.py +++ b/tests/rp_cloud_cleanup.py @@ -109,6 +109,7 @@ def __init__(self, log_level=logging.INFO): # Fugure out which time was 36h back self.back_36h = datetime.now() - timedelta(hours=36) + self.back_8h = datetime.now() - timedelta(hours=8) def load_globals(self, path): _gconfig = {} @@ -488,7 +489,7 @@ def get_net_id_from_nat(nat): return - def clean_buckets(self, mask=None): + def clean_buckets(self, mask=None, eight_hours=False): """ Function list buckets on S3 for cloud cluster storage and cleans them up if corresponding cluster is deleted/not exists. @@ -508,16 +509,17 @@ def clean_buckets(self, mask=None): "Will not delete all buckets for account") return else: - self.log.info(f"# Listing buckets usign '{mask}'") + self.log.info(f"# Listing buckets using '{mask}'") buckets = self.provider.list_buckets(mask=mask) self.log.info(f"# Found buckets: {len(buckets)}") for bucket in buckets: self.log.info(f"-> Processing '{bucket['Name']}'") # Check creation time created = bucket['CreationDate'] - if created.timestamp() > self.back_36h.timestamp(): + ts = self.back_8h if eight_hours else self.back_36h + if created.timestamp() > ts.timestamp(): _d = created.strftime(ns_name_date_fmt) - self.log.info(f"...36h not passed; created at {_d}'; " + self.log.info(f"...8h not passed; created at {_d}'; " "skipped") continue # Do not need to check the cluster status in CloudV2 API @@ -602,7 +604,7 @@ def cleanup_entrypoint(): # Clean buckets for deleted clusters and networks if clean_buckets: - cleaner.clean_buckets(mask="panda-bucket-") + cleaner.clean_buckets(mask="panda-bucket-", eight_hours=True) cleaner.clean_buckets(mask="redpanda-cloud-storage-") cleaner.clean_buckets(mask="redpanda-network-logs-")