From d1ba0ee788a242c046f828914f536299290edb89 Mon Sep 17 00:00:00 2001 From: Andrew Wong Date: Wed, 8 May 2024 14:00:51 -0700 Subject: [PATCH 1/9] storage: rename log_manager::collection_threshold I found the name a bit vague in trying to understand what this code was doing. This updates it to lowest_ts_to_retain(), and updates the disk_space_manager to use it rather than duplicating the logic. --- src/v/resource_mgmt/storage.cc | 15 +++------------ src/v/storage/log_manager.cc | 24 ++++++++++++------------ src/v/storage/log_manager.h | 4 ++++ 3 files changed, 19 insertions(+), 24 deletions(-) diff --git a/src/v/resource_mgmt/storage.cc b/src/v/resource_mgmt/storage.cc index 91d6d0df56d0..d6e01a97b4be 100644 --- a/src/v/resource_mgmt/storage.cc +++ b/src/v/resource_mgmt/storage.cc @@ -278,19 +278,10 @@ eviction_policy::collect_reclaimable_offsets() { /* * retention settings mirror settings found in housekeeping() */ - const auto collection_threshold = [this] { - const auto& lm = _storage->local().log_mgr(); - if (!lm.config().log_retention().has_value()) { - return model::timestamp(0); - } - const auto now = model::timestamp::now().value(); - const auto retention = lm.config().log_retention().value().count(); - return model::timestamp(now - retention); - }; - + const auto collection_ts + = _storage->local().log_mgr().lowest_ts_to_retain(); gc_config cfg( - collection_threshold(), - _storage->local().log_mgr().config().retention_bytes()); + collection_ts, _storage->local().log_mgr().config().retention_bytes()); /* * in smallish batches partitions are queried for their reclaimable diff --git a/src/v/storage/log_manager.cc b/src/v/storage/log_manager.cc index 2d59944fa4d5..9f95ddeedb77 100644 --- a/src/v/storage/log_manager.cc +++ b/src/v/storage/log_manager.cc @@ -310,19 +310,19 @@ ss::future<> log_manager::housekeeping() { } } +model::timestamp log_manager::lowest_ts_to_retain() const { + if (!_config.log_retention().has_value()) { + return model::timestamp(0); + } + const auto now = model::timestamp::now().value(); + const auto retention = _config.log_retention().value().count(); + return model::timestamp(now - retention); +} + ss::future<> log_manager::housekeeping_loop() { /* * data older than this threshold may be garbage collected */ - const auto collection_threshold = [this] { - if (!_config.log_retention().has_value()) { - return model::timestamp(0); - } - const auto now = model::timestamp::now().value(); - const auto retention = _config.log_retention().value().count(); - return model::timestamp(now - retention); - }; - while (true) { try { const auto prev_jitter_base = _jitter.base_duration(); @@ -383,7 +383,7 @@ ss::future<> log_manager::housekeeping_loop() { auto ntp = log_meta.handle->config().ntp(); auto usage = co_await log_meta.handle->disk_usage( - gc_config(collection_threshold(), _config.retention_bytes())); + gc_config(lowest_ts_to_retain(), _config.retention_bytes())); /* * NOTE: this estimate is for local retention policy only. for a @@ -405,7 +405,7 @@ ss::future<> log_manager::housekeeping_loop() { continue; } co_await log->gc( - gc_config(collection_threshold(), _config.retention_bytes())); + gc_config(lowest_ts_to_retain(), _config.retention_bytes())); } } @@ -432,7 +432,7 @@ ss::future<> log_manager::housekeeping_loop() { auto prev_sg = co_await ss::coroutine::switch_to(_config.compaction_sg); try { - co_await housekeeping_scan(collection_threshold()); + co_await housekeeping_scan(lowest_ts_to_retain()); } catch (const std::exception& e) { vlog(stlog.info, "Error processing housekeeping(): {}", e); } diff --git a/src/v/storage/log_manager.h b/src/v/storage/log_manager.h index dd19868e3188..d03cb505563b 100644 --- a/src/v/storage/log_manager.h +++ b/src/v/storage/log_manager.h @@ -227,6 +227,10 @@ class log_manager { /// Returns all ntp's managed by this instance absl::flat_hash_set get_all_ntps() const; + // Returns the timestamp that should be retained via log.retention.ms, or 0 + // if not configured. + model::timestamp lowest_ts_to_retain() const; + int64_t compaction_backlog() const; storage_resources& resources() { return _resources; } From 0597b477c747593f12afc29285b43459df207c09 Mon Sep 17 00:00:00 2001 From: Andrew Wong Date: Wed, 8 May 2024 14:03:35 -0700 Subject: [PATCH 2/9] resource_mgmt: clarify variable names in manage_disk_space target_excess -> adjusted_target_excess: I find the name more intuitive, despite being more verbose at call sites, as a reader I prefer the reminder that the value has been tweaked --- src/v/resource_mgmt/storage.cc | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/v/resource_mgmt/storage.cc b/src/v/resource_mgmt/storage.cc index d6e01a97b4be..8ffcbc181e2b 100644 --- a/src/v/resource_mgmt/storage.cc +++ b/src/v/resource_mgmt/storage.cc @@ -557,10 +557,10 @@ ss::future<> disk_space_manager::manage_data_disk(uint64_t target_size) { * generally we may want to consider a smoother function as well as * dynamically adjusting the control loop frequency. */ - const auto target_excess = static_cast( + const auto adjusted_target_excess = static_cast( real_target_excess * config::shard_local_cfg().retention_local_trim_overage_coeff()); - _probe.set_target_excess(target_excess); + _probe.set_target_excess(adjusted_target_excess); /* * when log storage has exceeded the target usage, then there are some knobs @@ -580,7 +580,7 @@ ss::future<> disk_space_manager::manage_data_disk(uint64_t target_size) { * targets for cloud-enabled topics, removing data that has been backed up * into the cloud. */ - if (target_excess > usage.reclaim.retention) { + if (adjusted_target_excess > usage.reclaim.retention) { vlog( rlog.info, "Log storage usage {} > target size {} by {} (adjusted {}). Garbage " @@ -589,34 +589,34 @@ ss::future<> disk_space_manager::manage_data_disk(uint64_t target_size) { human::bytes(usage.usage.total()), human::bytes(target_size), human::bytes(real_target_excess), - human::bytes(target_excess), + human::bytes(adjusted_target_excess), human::bytes(usage.reclaim.retention), - human::bytes(target_excess - usage.reclaim.retention), + human::bytes(adjusted_target_excess - usage.reclaim.retention), human::bytes(usage.reclaim.available)); auto schedule = co_await _policy.create_new_schedule(); if (schedule.sched_size > 0) { auto estimate = _policy.evict_until_local_retention( - schedule, target_excess); + schedule, adjusted_target_excess); _probe.set_reclaim_local(estimate); - if (estimate < target_excess) { + if (estimate < adjusted_target_excess) { const auto amount = _policy.evict_until_low_space_non_hinted( - schedule, target_excess - estimate); + schedule, adjusted_target_excess - estimate); _probe.set_reclaim_low_non_hinted(amount); estimate += amount; } - if (estimate < target_excess) { + if (estimate < adjusted_target_excess) { const auto amount = _policy.evict_until_low_space_hinted( - schedule, target_excess - estimate); + schedule, adjusted_target_excess - estimate); _probe.set_reclaim_low_hinted(amount); estimate += amount; } - if (estimate < target_excess) { + if (estimate < adjusted_target_excess) { const auto amount = _policy.evict_until_active_segment( - schedule, target_excess - estimate); + schedule, adjusted_target_excess - estimate); _probe.set_reclaim_active_segment(amount); estimate += amount; } @@ -645,7 +645,7 @@ ss::future<> disk_space_manager::manage_data_disk(uint64_t target_size) { human::bytes(usage.usage.total()), human::bytes(target_size), human::bytes(real_target_excess), - human::bytes(target_excess), + human::bytes(adjusted_target_excess), human::bytes(usage.reclaim.retention)); } From c8eb9e7c734c1b3f5ced14feacd8d8a122ca25d3 Mon Sep 17 00:00:00 2001 From: Andrew Wong Date: Wed, 8 May 2024 14:56:34 -0700 Subject: [PATCH 3/9] resource_mgmt: tweak info log in space management I found the existing logging a bit confusing because "tiered storage retention" isn't widely used verbiage. This tweaks it to refer to this as "space management of tiered storage topics". Also s/recover/remove: it feels a bit clearer, segment removal isn't referred to as "recovering" in other contexts. --- src/v/resource_mgmt/storage.cc | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/v/resource_mgmt/storage.cc b/src/v/resource_mgmt/storage.cc index 8ffcbc181e2b..74e9e0347cb2 100644 --- a/src/v/resource_mgmt/storage.cc +++ b/src/v/resource_mgmt/storage.cc @@ -584,8 +584,9 @@ ss::future<> disk_space_manager::manage_data_disk(uint64_t target_size) { vlog( rlog.info, "Log storage usage {} > target size {} by {} (adjusted {}). Garbage " - "collection expected to recover {}. Overriding tiered storage " - "retention to recover {}. Total estimated available to recover {}", + "collection expected to remove {}. Space management of tiered " + "storage topics to remove {}. Total estimated available to remove " + "{}", human::bytes(usage.usage.total()), human::bytes(target_size), human::bytes(real_target_excess), @@ -641,7 +642,8 @@ ss::future<> disk_space_manager::manage_data_disk(uint64_t target_size) { vlog( rlog.info, "Log storage usage {} > target size {} by {} (adjusted {}). Garbage " - "collection expected to recover {}.", + "collection expected to remove {}. No additional space management " + "required", human::bytes(usage.usage.total()), human::bytes(target_size), human::bytes(real_target_excess), From 986264e365c554e7d8599cc3fc5ed392f2c2a15c Mon Sep 17 00:00:00 2001 From: Andrew Wong Date: Wed, 8 May 2024 14:38:48 -0700 Subject: [PATCH 4/9] storage: rename maybe_override_retention_config() The previous name is pretty vague: as a reader it's unclear what it's actually doing other than modifying the config in some way. Updates it to explicitly mention that it is handling local storage overrides. IMO this is easier to reason about when reading the method and its call sites. --- src/v/storage/disk_log_impl.cc | 7 ++++--- src/v/storage/disk_log_impl.h | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/v/storage/disk_log_impl.cc b/src/v/storage/disk_log_impl.cc index 4e179acce977..9ccc219a475b 100644 --- a/src/v/storage/disk_log_impl.cc +++ b/src/v/storage/disk_log_impl.cc @@ -1031,7 +1031,8 @@ bool disk_log_impl::has_local_retention_override() const { return false; } -gc_config disk_log_impl::maybe_override_retention_config(gc_config cfg) const { +gc_config +disk_log_impl::maybe_apply_local_storage_overrides(gc_config cfg) const { // Read replica topics have a different default retention if (config().is_read_replica_mode_enabled()) { cfg.eviction_time = std::max( @@ -1162,7 +1163,7 @@ gc_config disk_log_impl::apply_base_overrides(gc_config defaults) const { gc_config disk_log_impl::apply_overrides(gc_config defaults) const { auto ret = apply_base_overrides(defaults); - return maybe_override_retention_config(ret); + return maybe_apply_local_storage_overrides(ret); } ss::future<> disk_log_impl::housekeeping(housekeeping_config cfg) { @@ -3226,7 +3227,7 @@ disk_log_impl::disk_usage_target(gc_config cfg, usage usage) { */ } else { // applies local retention overrides for cloud storage - cfg = maybe_override_retention_config(cfg); + cfg = maybe_apply_local_storage_overrides(cfg); } /* diff --git a/src/v/storage/disk_log_impl.h b/src/v/storage/disk_log_impl.h index 654a59349fd4..50eb62896a93 100644 --- a/src/v/storage/disk_log_impl.h +++ b/src/v/storage/disk_log_impl.h @@ -289,7 +289,7 @@ class disk_log_impl final : public log { // overrides, such as custom topic configs. bool has_local_retention_override() const; - gc_config maybe_override_retention_config(gc_config) const; + gc_config maybe_apply_local_storage_overrides(gc_config) const; gc_config override_retention_config(gc_config) const; bool is_cloud_retention_active() const; From fded43a132f54796e7459784004e1c3570c1d13d Mon Sep 17 00:00:00 2001 From: Andrew Wong Date: Wed, 8 May 2024 14:39:45 -0700 Subject: [PATCH 5/9] storage: rename override_retention_config Similar to maybe_override_retention_config(), updates override_retention_config() to be called apply_local_storage_overrides(). --- src/v/storage/disk_log_impl.cc | 8 ++++---- src/v/storage/disk_log_impl.h | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/v/storage/disk_log_impl.cc b/src/v/storage/disk_log_impl.cc index 9ccc219a475b..adb6aec5cae8 100644 --- a/src/v/storage/disk_log_impl.cc +++ b/src/v/storage/disk_log_impl.cc @@ -1064,7 +1064,7 @@ disk_log_impl::maybe_apply_local_storage_overrides(gc_config cfg) const { return cfg; } - cfg = override_retention_config(cfg); + cfg = apply_local_storage_overrides(cfg); vlog( gclog.trace, @@ -1075,7 +1075,7 @@ disk_log_impl::maybe_apply_local_storage_overrides(gc_config cfg) const { return cfg; } -gc_config disk_log_impl::override_retention_config(gc_config cfg) const { +gc_config disk_log_impl::apply_local_storage_overrides(gc_config cfg) const { tristate local_retention_bytes{std::nullopt}; tristate local_retention_ms{std::nullopt}; @@ -3046,7 +3046,7 @@ disk_log_impl::disk_usage_and_reclaimable_space(gc_config input_cfg) { * retention above and takes into account local retention advisory flag. */ auto local_retention_cfg = apply_base_overrides(input_cfg); - local_retention_cfg = override_retention_config(local_retention_cfg); + local_retention_cfg = apply_local_storage_overrides(local_retention_cfg); const auto local_retention_offset = co_await maybe_adjusted_retention_offset(local_retention_cfg); @@ -3508,7 +3508,7 @@ disk_log_impl::get_reclaimable_offsets(gc_config cfg) { * only when local retention is non-advisory. */ cfg = apply_base_overrides(cfg); - cfg = override_retention_config(cfg); + cfg = apply_local_storage_overrides(cfg); const auto local_retention_offset = co_await maybe_adjusted_retention_offset(cfg); diff --git a/src/v/storage/disk_log_impl.h b/src/v/storage/disk_log_impl.h index 50eb62896a93..5cb7367352b2 100644 --- a/src/v/storage/disk_log_impl.h +++ b/src/v/storage/disk_log_impl.h @@ -290,7 +290,7 @@ class disk_log_impl final : public log { bool has_local_retention_override() const; gc_config maybe_apply_local_storage_overrides(gc_config) const; - gc_config override_retention_config(gc_config) const; + gc_config apply_local_storage_overrides(gc_config) const; bool is_cloud_retention_active() const; From de686ce87f9b0b626fb0cca2df3634b246221b5c Mon Sep 17 00:00:00 2001 From: Andrew Wong Date: Wed, 8 May 2024 14:40:35 -0700 Subject: [PATCH 6/9] storage: rename apply_base_overrides Renames apply_base_overrides() to apply_kafka_retention_overrides(). I think it's a bit easier to infer what the Kafka retention overrides are (i.e. the ones that are Kafka compatible: retention.ms, retention.bytes). Others considered but not taken: - total_retention - full_retention - consumable_retention - topic_retention --- src/v/storage/disk_log_impl.cc | 11 ++++++----- src/v/storage/disk_log_impl.h | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/v/storage/disk_log_impl.cc b/src/v/storage/disk_log_impl.cc index adb6aec5cae8..4d328b847ad3 100644 --- a/src/v/storage/disk_log_impl.cc +++ b/src/v/storage/disk_log_impl.cc @@ -1128,7 +1128,8 @@ bool disk_log_impl::is_cloud_retention_active() const { /* * applies overrides for non-cloud storage settings */ -gc_config disk_log_impl::apply_base_overrides(gc_config defaults) const { +gc_config +disk_log_impl::apply_kafka_retention_overrides(gc_config defaults) const { if (!config().has_overrides()) { return defaults; } @@ -1162,7 +1163,7 @@ gc_config disk_log_impl::apply_base_overrides(gc_config defaults) const { } gc_config disk_log_impl::apply_overrides(gc_config defaults) const { - auto ret = apply_base_overrides(defaults); + auto ret = apply_kafka_retention_overrides(defaults); return maybe_apply_local_storage_overrides(ret); } @@ -3045,7 +3046,7 @@ disk_log_impl::disk_usage_and_reclaimable_space(gc_config input_cfg) { * offset calculation for local retention reclaimable is different than the * retention above and takes into account local retention advisory flag. */ - auto local_retention_cfg = apply_base_overrides(input_cfg); + auto local_retention_cfg = apply_kafka_retention_overrides(input_cfg); local_retention_cfg = apply_local_storage_overrides(local_retention_cfg); const auto local_retention_offset = co_await maybe_adjusted_retention_offset(local_retention_cfg); @@ -3198,7 +3199,7 @@ disk_log_impl::disk_usage_target(gc_config cfg, usage usage) { = config::shard_local_cfg().storage_reserve_min_segments() * max_segment_size(); - cfg = apply_base_overrides(cfg); + cfg = apply_kafka_retention_overrides(cfg); /* * compacted topics are always stored whole on local storage such that local @@ -3507,7 +3508,7 @@ disk_log_impl::get_reclaimable_offsets(gc_config cfg) { * override in contrast to housekeeping GC where the overrides are applied * only when local retention is non-advisory. */ - cfg = apply_base_overrides(cfg); + cfg = apply_kafka_retention_overrides(cfg); cfg = apply_local_storage_overrides(cfg); const auto local_retention_offset = co_await maybe_adjusted_retention_offset(cfg); diff --git a/src/v/storage/disk_log_impl.h b/src/v/storage/disk_log_impl.h index 5cb7367352b2..42eea3b6db69 100644 --- a/src/v/storage/disk_log_impl.h +++ b/src/v/storage/disk_log_impl.h @@ -281,7 +281,7 @@ class disk_log_impl final : public log { retention_adjust_timestamps(std::chrono::seconds ignore_in_future); gc_config apply_overrides(gc_config) const; - gc_config apply_base_overrides(gc_config) const; + gc_config apply_kafka_retention_overrides(gc_config) const; void wrote_stm_bytes(size_t); From e81fc1328146352e872d89e80d56c34cac8796bf Mon Sep 17 00:00:00 2001 From: Andrew Wong Date: Wed, 8 May 2024 14:46:42 -0700 Subject: [PATCH 7/9] storage: refactor retention_adjust_timestamps Pulls out a couple instances of duplicate code, in an effort to clarify naming of various overrides and adjustments done around retention. --- src/v/storage/disk_log_impl.cc | 33 +++++++++++++-------------------- src/v/storage/disk_log_impl.h | 3 +-- 2 files changed, 14 insertions(+), 22 deletions(-) diff --git a/src/v/storage/disk_log_impl.cc b/src/v/storage/disk_log_impl.cc index 4d328b847ad3..3c8e0e2a643f 100644 --- a/src/v/storage/disk_log_impl.cc +++ b/src/v/storage/disk_log_impl.cc @@ -1313,10 +1313,18 @@ ss::future> disk_log_impl::do_gc(gc_config cfg) { co_return std::nullopt; } -ss::future<> disk_log_impl::retention_adjust_timestamps( - std::chrono::seconds ignore_in_future) { +ss::future<> disk_log_impl::maybe_adjust_retention_timestamps() { + // Correct any timestamps too far in the future, meant to be called before + // calculating the retention offset for garbage collection. + // It's expected that this will be used only for segments pre v23.3, + // without a proper broker_timestamps + auto ignore_in_future + = config::shard_local_cfg().storage_ignore_timestamps_in_future_sec(); + if (!ignore_in_future.has_value()) { + co_return; + } auto ignore_threshold = model::timestamp( - model::timestamp::now().value() + ignore_in_future / 1ms); + model::timestamp::now().value() + ignore_in_future.value() / 1ms); auto retention_cfg = time_based_retention_cfg::make( _feature_table.local()); // this will retrieve cluster cfgs @@ -1383,17 +1391,7 @@ ss::future<> disk_log_impl::retention_adjust_timestamps( ss::future> disk_log_impl::maybe_adjusted_retention_offset(gc_config cfg) { - auto ignore_timestamps_in_future - = config::shard_local_cfg().storage_ignore_timestamps_in_future_sec(); - if (ignore_timestamps_in_future.has_value()) { - // Correct any timestamps too far in future, before calculating the - // retention offset. - // It's expected that this will be used only for segments pre v23.3, - // without a proper broker_timestamps - co_await retention_adjust_timestamps( - ignore_timestamps_in_future.value()); - } - + co_await maybe_adjust_retention_timestamps(); co_return retention_offset(cfg); } @@ -3284,12 +3282,7 @@ disk_log_impl::disk_usage_target_time_retention(gc_config cfg) { * take the opportunity to maybe fixup janky weird timestamps. also done in * normal garbage collection path and should be idempotent. */ - if (auto ignore_timestamps_in_future - = config::shard_local_cfg().storage_ignore_timestamps_in_future_sec(); - ignore_timestamps_in_future.has_value()) { - co_await retention_adjust_timestamps( - ignore_timestamps_in_future.value()); - } + co_await maybe_adjust_retention_timestamps(); /* * we are going to use whole segments for the data we'll use to extrapolate diff --git a/src/v/storage/disk_log_impl.h b/src/v/storage/disk_log_impl.h index 42eea3b6db69..dfdc72bca881 100644 --- a/src/v/storage/disk_log_impl.h +++ b/src/v/storage/disk_log_impl.h @@ -277,8 +277,7 @@ class disk_log_impl final : public log { /// Conditionally adjust retention timestamp on any segment that appears /// to have invalid timestamps, to ensure retention can proceed. - ss::future<> - retention_adjust_timestamps(std::chrono::seconds ignore_in_future); + ss::future<> maybe_adjust_retention_timestamps(); gc_config apply_overrides(gc_config) const; gc_config apply_kafka_retention_overrides(gc_config) const; From 6705a4a3d0f584311fdec0a2183e13b962eb2583 Mon Sep 17 00:00:00 2001 From: Andrew Wong Date: Wed, 8 May 2024 14:51:17 -0700 Subject: [PATCH 8/9] storage: tweak strict_retention condition for clarity With the original code, it was easy to get tripped up by the nots and ors. Tweaks the implementation and adds a comment to make this easier to read. No functional changes. --- src/v/storage/disk_log_impl.cc | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/v/storage/disk_log_impl.cc b/src/v/storage/disk_log_impl.cc index 3c8e0e2a643f..9a876cb9aa90 100644 --- a/src/v/storage/disk_log_impl.cc +++ b/src/v/storage/disk_log_impl.cc @@ -1051,13 +1051,17 @@ disk_log_impl::maybe_apply_local_storage_overrides(gc_config cfg) const { /* * don't override with local retention settings--let partition data expand * up to standard retention settings. + * NOTE: both retention_local_strict and retention_local_strict_override + * need to be set to true to honor strict local retention. Otherwise, local + * retention is advisory. */ - if ( - !config::shard_local_cfg().retention_local_strict() - || !config::shard_local_cfg().retention_local_strict_override()) { + bool strict_local_retention + = config::shard_local_cfg().retention_local_strict() + && config::shard_local_cfg().retention_local_strict_override(); + if (!strict_local_retention) { vlog( gclog.trace, - "[{}] Skipped retention override for topic with remote write " + "[{}] Skipped local retention override for topic with remote write " "enabled: {}", config().ntp(), cfg); From 9425c187e81ad5d0ebc7633b44d52db399c5c3b3 Mon Sep 17 00:00:00 2001 From: Andrew Wong Date: Fri, 10 May 2024 08:36:37 -0700 Subject: [PATCH 9/9] resource_mgmt: add clarity to info+ logs We have seen issues where the lack of tiered storage resulted in disk filling up. The info logs didn't make it clear what was happening and it was assumed to be a bug in code, rather than a misconfiguration issue. This commit bumps a log line to info level so that it would have been clear that a specific error path wasn't being taken. Also tweaks wording to makes a stronger connection to tiered storage. --- src/v/resource_mgmt/storage.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/v/resource_mgmt/storage.cc b/src/v/resource_mgmt/storage.cc index 74e9e0347cb2..02ef2dcf267c 100644 --- a/src/v/resource_mgmt/storage.cc +++ b/src/v/resource_mgmt/storage.cc @@ -303,7 +303,7 @@ eviction_policy::collect_reclaimable_offsets() { .handle_exception_type([](const ss::gate_closed_exception&) {}) .handle_exception([ntp = p->ntp()](std::exception_ptr e) { vlog( - rlog.debug, + rlog.warn, "Error collecting reclaimable offsets from {}: {}", ntp, e); @@ -636,7 +636,9 @@ ss::future<> disk_space_manager::manage_data_disk(uint64_t target_size) { rlog.info, "Scheduling {} for reclaim", human::bytes(estimate)); co_await _policy.install_schedule(std::move(schedule)); } else { - vlog(rlog.info, "No partitions eligible for reclaim were found"); + vlog( + rlog.info, + "No tiered storage partitions were found, unable to reclaim"); } } else { vlog(