diff --git a/myhoard/controller.py b/myhoard/controller.py index 0abc372..7e1215d 100644 --- a/myhoard/controller.py +++ b/myhoard/controller.py @@ -247,6 +247,7 @@ def is_safe_to_reload(self) -> bool: with self.lock: for stream in self.backup_streams: if stream.active_phase == BackupStream.ActivePhase.basebackup: + self.log.info("Not safe to reload while taking basebackup") return False return True @@ -1293,7 +1294,7 @@ def _process_removed_binlogs(self, binlogs): stream.remove_binlogs(binlogs) def _purge_old_backups(self): - purgeable = [backup for backup in self.state["backups"] if backup["completed_at"]] + purgeable = [backup for backup in self.state["backups"] if backup["completed_at"] and not backup["recovery_site"]] broken_backups_count = sum(backup["broken_at"] is not None for backup in purgeable) # do not consider broken backups for the count, they will still be purged # but we should only purge when the count of non-broken backups has exceeded the limit. diff --git a/test/test_controller.py b/test/test_controller.py index 3ce6cce..9cfc759 100644 --- a/test/test_controller.py +++ b/test/test_controller.py @@ -1781,6 +1781,62 @@ def restoration_has_phase(phase): new_controller.stop() +@patch.object(BackupStream, "remove", autospec=True) +def test_purge_old_backups_should_not_remove_read_only_backups( + mocked_backup_stream_remove: MagicMock, + default_backup_site, + mysql_empty, + session_tmpdir, +) -> None: + # pylint: disable=protected-access + controller = build_controller( + Controller, + default_backup_site=default_backup_site, + mysql_config=mysql_empty, + session_tmpdir=session_tmpdir, + ) + controller.backup_settings["backup_count_min"] = 0 + controller.backup_settings["backup_count_max"] = 1 + # set it to 1 day + controller.backup_settings["backup_age_days_max"] = 1 + + def remove_backup(backup_stream) -> None: + controller.state["backups"] = [ + backup for backup in controller.state["backups"] if backup["stream_id"] != backup_stream.stream_id + ] + + mocked_backup_stream_remove.side_effect = remove_backup + + ten_years_old = time.time() - 3650 * 24 * 60 * 60 + + def _append_backup(stream_id: str, ts: float, read_only: bool = True) -> None: + controller.state["backups"].append( + { + "basebackup_info": {"end_ts": ts}, + "closed_at": ts, + "completed_at": ts, + "broken_at": None, + "preserve_until": None, + "recovery_site": read_only, + "stream_id": stream_id, + "resumable": True, + "site": "default", + } + ) + + _append_backup(stream_id="1", ts=ten_years_old, read_only=True) # no matter how old, should be kept if read-only + _append_backup(stream_id="2", ts=ten_years_old, read_only=False) # old backup, should be purged + _append_backup(stream_id="3", ts=time.time(), read_only=False) # latest backup + + # since _purge_old_backups only purge one backup per call, try to call maximum possible times + controller._purge_old_backups() + controller._purge_old_backups() + controller._purge_old_backups() + backup_list = controller.state["backups"] + assert len(backup_list) == 2 + assert all(b["stream_id"] != "2" for b in backup_list) # backup "2" should be deleted + + @patch.object(BackupStream, "remove", autospec=True) def test_purge_old_backups_exceeding_backup_age_days_max( mocked_backup_stream_remove: MagicMock,