From 5ca7672c6b56db587891732d0493152b1112a116 Mon Sep 17 00:00:00 2001 From: lidezhu <47731263+lidezhu@users.noreply.github.com> Date: Mon, 17 Jul 2023 21:00:45 +0800 Subject: [PATCH] store raft data in dedicated blob file (#7719) ref pingcap/tiflash#6827 --- dbms/src/Interpreters/Settings.h | 1 + dbms/src/Storages/Page/Config.h | 7 +- dbms/src/Storages/Page/PageStorage.cpp | 2 +- dbms/src/Storages/Page/V3/Blob/BlobConfig.h | 8 +- dbms/src/Storages/Page/V3/Blob/BlobStat.cpp | 45 +-- dbms/src/Storages/Page/V3/Blob/BlobStat.h | 18 +- dbms/src/Storages/Page/V3/BlobStore.cpp | 85 +++-- dbms/src/Storages/Page/V3/BlobStore.h | 30 +- .../tests/gtest_file_read_write.cpp | 8 +- dbms/src/Storages/Page/V3/GCDefines.cpp | 14 +- dbms/src/Storages/Page/V3/PageDirectory.cpp | 16 + dbms/src/Storages/Page/V3/PageDirectory.h | 4 + dbms/src/Storages/Page/V3/PageStorageImpl.cpp | 9 +- dbms/src/Storages/Page/V3/PageType.h | 65 ++++ .../V3/Universal/UniversalPageStorage.cpp | 11 +- .../Page/V3/Universal/UniversalPageStorage.h | 6 +- .../V3/Universal/tests/gtest_remote_read.cpp | 8 +- .../Page/V3/tests/gtest_blob_stat.cpp | 159 +++++---- .../Page/V3/tests/gtest_blob_store.cpp | 330 +++++++++++------- .../Page/V3/tests/gtest_page_storage.cpp | 12 +- dbms/src/Storages/Transaction/ProxyFFI.cpp | 2 +- .../tests/gtest_kvstore_fast_add_peer.cpp | 2 +- 22 files changed, 567 insertions(+), 275 deletions(-) create mode 100644 dbms/src/Storages/Page/V3/PageType.h diff --git a/dbms/src/Interpreters/Settings.h b/dbms/src/Interpreters/Settings.h index 8ffdcaf112b..1e66bb17921 100644 --- a/dbms/src/Interpreters/Settings.h +++ b/dbms/src/Interpreters/Settings.h @@ -215,6 +215,7 @@ struct Settings M(SettingUInt64, dt_checksum_frame_size, DBMS_DEFAULT_BUFFER_SIZE, "Frame size for delta tree stable storage") \ \ M(SettingDouble, dt_page_gc_threshold, 0.5, "Max valid rate of deciding to do a GC in PageStorage") \ + M(SettingDouble, dt_page_gc_threshold_raft_data, 0.05, "Max valid rate of deciding to do a GC for BlobFile storing PageData in PageStorage") \ M(SettingBool, dt_enable_read_thread, true, "Enable storage read thread or not") \ M(SettingBool, dt_enable_bitmap_filter, true, "Use bitmap filter to read data or not") \ M(SettingDouble, dt_read_thread_count_scale, 1.0, "Number of read thread = number of logical cpu cores * dt_read_thread_count_scale. Only has meaning at server startup.") \ diff --git a/dbms/src/Storages/Page/Config.h b/dbms/src/Storages/Page/Config.h index 5e5b13c1159..09ac84f9bcb 100644 --- a/dbms/src/Storages/Page/Config.h +++ b/dbms/src/Storages/Page/Config.h @@ -99,6 +99,7 @@ struct PageStorageConfig SettingUInt64 blob_file_limit_size = BLOBFILE_LIMIT_SIZE; SettingUInt64 blob_spacemap_type = 2; SettingDouble blob_heavy_gc_valid_rate = 0.5; + SettingDouble blob_heavy_gc_valid_rate_raft_data = 0.05; SettingUInt64 blob_block_alignment_bytes = 0; SettingUInt64 wal_roll_size = PAGE_META_ROLL_SIZE; @@ -122,6 +123,7 @@ struct PageStorageConfig blob_file_limit_size = rhs.blob_file_limit_size; blob_spacemap_type = rhs.blob_spacemap_type; blob_heavy_gc_valid_rate = rhs.blob_heavy_gc_valid_rate; + blob_heavy_gc_valid_rate_raft_data = rhs.blob_heavy_gc_valid_rate_raft_data; blob_block_alignment_bytes = rhs.blob_block_alignment_bytes; wal_roll_size = rhs.wal_roll_size; @@ -150,11 +152,12 @@ struct PageStorageConfig return fmt::format( "PageStorageConfig {{" "blob_file_limit_size: {}, blob_spacemap_type: {}, " - "blob_heavy_gc_valid_rate: {:.3f}, blob_block_alignment_bytes: {}, " - "wal_roll_size: {}, wal_max_persisted_log_files: {}}}", + "blob_heavy_gc_valid_rate: {:.3f}, blob_heavy_gc_valid_rate_raft_data: {:.3f}, " + "blob_block_alignment_bytes: {}, wal_roll_size: {}, wal_max_persisted_log_files: {}}}", blob_file_limit_size.get(), blob_spacemap_type.get(), blob_heavy_gc_valid_rate.get(), + blob_heavy_gc_valid_rate_raft_data.get(), blob_block_alignment_bytes.get(), wal_roll_size.get(), wal_max_persisted_log_files.get()); diff --git a/dbms/src/Storages/Page/PageStorage.cpp b/dbms/src/Storages/Page/PageStorage.cpp index 7f78fba68cd..4a16e255d91 100644 --- a/dbms/src/Storages/Page/PageStorage.cpp +++ b/dbms/src/Storages/Page/PageStorage.cpp @@ -740,7 +740,7 @@ void PageWriter::writeIntoMixMode(WriteBatch && write_batch, WriteLimiterPtr wri void PageWriter::writeIntoUni(UniversalWriteBatch && write_batch, WriteLimiterPtr write_limiter) const { - uni_ps->write(std::move(write_batch), write_limiter); + uni_ps->write(std::move(write_batch), PageType::Normal, write_limiter); } PageStorageConfig PageWriter::getSettings() const diff --git a/dbms/src/Storages/Page/V3/Blob/BlobConfig.h b/dbms/src/Storages/Page/V3/Blob/BlobConfig.h index 303bc881d68..c227e848a6f 100644 --- a/dbms/src/Storages/Page/V3/Blob/BlobConfig.h +++ b/dbms/src/Storages/Page/V3/Blob/BlobConfig.h @@ -28,17 +28,20 @@ struct BlobConfig SettingUInt64 spacemap_type = SpaceMap::SpaceMapType::SMAP64_STD_MAP; SettingUInt64 block_alignment_bytes = 0; SettingDouble heavy_gc_valid_rate = 0.2; + SettingDouble heavy_gc_valid_rate_raft_data = 0.05; String toString() { return fmt::format("BlobStore Config Info: " "[file_limit_size={}] [spacemap_type={}] " "[block_alignment_bytes={}] " - "[heavy_gc_valid_rate={}]", + "[heavy_gc_valid_rate={}]" + "[heavy_gc_valid_rate_raft_data={}]", file_limit_size, spacemap_type, block_alignment_bytes, - heavy_gc_valid_rate); + heavy_gc_valid_rate, + heavy_gc_valid_rate_raft_data); } static BlobConfig from(const PageStorageConfig & config) @@ -48,6 +51,7 @@ struct BlobConfig blob_config.file_limit_size = config.blob_file_limit_size; blob_config.spacemap_type = config.blob_spacemap_type; blob_config.heavy_gc_valid_rate = config.blob_heavy_gc_valid_rate; + blob_config.heavy_gc_valid_rate_raft_data = config.blob_heavy_gc_valid_rate_raft_data; blob_config.block_alignment_bytes = config.blob_block_alignment_bytes; return blob_config; diff --git a/dbms/src/Storages/Page/V3/Blob/BlobStat.cpp b/dbms/src/Storages/Page/V3/Blob/BlobStat.cpp index 9fded181527..9aafaa16147 100644 --- a/dbms/src/Storages/Page/V3/Blob/BlobStat.cpp +++ b/dbms/src/Storages/Page/V3/Blob/BlobStat.cpp @@ -35,7 +35,10 @@ namespace DB::PS::V3 * BlobStats methods * *********************/ -BlobStats::BlobStats(LoggerPtr log_, PSDiskDelegatorPtr delegator_, BlobConfig & config_) +BlobStats::BlobStats( + LoggerPtr log_, + PSDiskDelegatorPtr delegator_, + BlobConfig & config_) : log(std::move(log_)) , delegator(delegator_) , config(config_) @@ -90,20 +93,15 @@ std::pair BlobStats::getBlobIdFromName(String blob_name) void BlobStats::restore() { - BlobFileId max_restored_file_id = 0; - for (auto & [path, stats] : stats_map) { (void)path; for (const auto & stat : stats) { stat->recalculateSpaceMap(); - max_restored_file_id = std::max(stat->id, max_restored_file_id); + cur_max_id = std::max(stat->id, cur_max_id); } } - - // restore `roll_id` - roll_id = max_restored_file_id + 1; } std::lock_guard BlobStats::lock() const @@ -113,15 +111,6 @@ std::lock_guard BlobStats::lock() const BlobStats::BlobStatPtr BlobStats::createStat(BlobFileId blob_file_id, UInt64 max_caps, const std::lock_guard & guard) { - // New blob file id won't bigger than roll_id - if (blob_file_id > roll_id) - { - throw Exception(fmt::format("BlobStats won't create [blob_id={}], which is bigger than [roll_id={}]", - blob_file_id, - roll_id), - ErrorCodes::LOGICAL_ERROR); - } - for (auto & [path, stats] : stats_map) { (void)path; @@ -137,15 +126,7 @@ BlobStats::BlobStatPtr BlobStats::createStat(BlobFileId blob_file_id, UInt64 max } // Create a stat without checking the file_id exist or not - auto stat = createStatNotChecking(blob_file_id, max_caps, guard); - - // Roll to the next new blob id - if (blob_file_id == roll_id) - { - roll_id++; - } - - return stat; + return createStatNotChecking(blob_file_id, max_caps, guard); } BlobStats::BlobStatPtr BlobStats::createStatNotChecking(BlobFileId blob_file_id, UInt64 max_caps, const std::lock_guard &) @@ -203,14 +184,16 @@ void BlobStats::eraseStat(BlobFileId blob_file_id, const std::lock_guard BlobStats::chooseStat(size_t buf_size, const std::lock_guard &) +std::pair BlobStats::chooseStat(size_t buf_size, PageType page_type, const std::lock_guard &) { BlobStatPtr stat_ptr = nullptr; // No stats exist if (stats_map.empty()) { - return std::make_pair(nullptr, roll_id); + auto next_id = PageTypeUtils::nextFileID(page_type, cur_max_id); + cur_max_id = next_id; + return std::make_pair(nullptr, next_id); } // If the stats_map size changes, or stats_map_path_index is out of range, @@ -226,6 +209,9 @@ std::pair BlobStats::chooseStat(size_t buf_s // Try to find a suitable stat under current path (path=`stats_iter->first`) for (const auto & stat : stats_iter->second) { + if (PageTypeUtils::getPageType(stat->id) != page_type) + continue; + auto defer_lock = stat->defer_lock(); if (defer_lock.try_lock() && stat->isNormal() && stat->sm_max_caps >= buf_size) { @@ -245,7 +231,9 @@ std::pair BlobStats::chooseStat(size_t buf_s stats_map_path_index += path_iter_idx + 1; // Can not find a suitable stat under all paths - return std::make_pair(nullptr, roll_id); + auto next_id = PageTypeUtils::nextFileID(page_type, cur_max_id); + cur_max_id = next_id; + return std::make_pair(nullptr, next_id); } BlobStats::BlobStatPtr BlobStats::blobIdToStat(BlobFileId file_id, bool ignore_not_exist) @@ -358,5 +346,4 @@ void BlobStats::BlobStat::recalculateCapacity() { sm_max_caps = smap->updateAccurateMaxCapacity(); } - } // namespace DB::PS::V3 diff --git a/dbms/src/Storages/Page/V3/Blob/BlobStat.h b/dbms/src/Storages/Page/V3/Blob/BlobStat.h index 1d01f818839..f0745e33e9f 100644 --- a/dbms/src/Storages/Page/V3/Blob/BlobStat.h +++ b/dbms/src/Storages/Page/V3/Blob/BlobStat.h @@ -15,8 +15,10 @@ #pragma once #include +#include #include #include +#include #include #include #include @@ -58,7 +60,11 @@ class BlobStats double sm_valid_rate = 0.0; public: - BlobStat(BlobFileId id_, SpaceMap::SpaceMapType sm_type, UInt64 sm_max_caps_, BlobStatType type_) + BlobStat( + BlobFileId id_, + SpaceMap::SpaceMapType sm_type, + UInt64 sm_max_caps_, + BlobStatType type_) : id(id_) , type(type_) , smap(SpaceMap::createSpaceMap(sm_type, 0, sm_max_caps_)) @@ -119,7 +125,10 @@ class BlobStats using BlobStatPtr = std::shared_ptr; public: - BlobStats(LoggerPtr log_, PSDiskDelegatorPtr delegator_, BlobConfig & config); + BlobStats( + LoggerPtr log_, + PSDiskDelegatorPtr delegator_, + BlobConfig & config); // Don't require a lock from BlobStats When you already hold a BlobStat lock // @@ -155,7 +164,7 @@ class BlobStats * The `INVALID_BLOBFILE_ID` means that you don't need create a new `BlobFile`. * */ - std::pair chooseStat(size_t buf_size, const std::lock_guard &); + std::pair chooseStat(size_t buf_size, PageType page_type, const std::lock_guard &); BlobStatPtr blobIdToStat(BlobFileId file_id, bool ignore_not_exist = false); @@ -184,7 +193,8 @@ class BlobStats BlobConfig & config; mutable std::mutex lock_stats; - BlobFileId roll_id = 1; + const PageTypeAndConfig page_type_and_config; + BlobFileId cur_max_id = 1; // Index for selecting next path for creating new blobfile UInt32 stats_map_path_index = 0; std::map> stats_map; diff --git a/dbms/src/Storages/Page/V3/BlobStore.cpp b/dbms/src/Storages/Page/V3/BlobStore.cpp index f2f3d52684d..0c38034f991 100644 --- a/dbms/src/Storages/Page/V3/BlobStore.cpp +++ b/dbms/src/Storages/Page/V3/BlobStore.cpp @@ -76,10 +76,16 @@ static_assert(!std::is_same_v, "The checksum mus *********************/ template -BlobStore::BlobStore(const String & storage_name, const FileProviderPtr & file_provider_, PSDiskDelegatorPtr delegator_, const BlobConfig & config_) +BlobStore::BlobStore( + const String & storage_name, + const FileProviderPtr & file_provider_, + PSDiskDelegatorPtr delegator_, + const BlobConfig & config_, + const PageTypeAndConfig & page_type_and_config_) : delegator(std::move(delegator_)) , file_provider(file_provider_) , config(config_) + , page_type_and_config(page_type_and_config_) , log(Logger::get(storage_name)) , blob_stats(log, delegator, config) { @@ -132,6 +138,16 @@ void BlobStore::reloadConfig(const BlobConfig & rhs) config.spacemap_type = rhs.spacemap_type; config.block_alignment_bytes = rhs.block_alignment_bytes; config.heavy_gc_valid_rate = rhs.heavy_gc_valid_rate; + config.heavy_gc_valid_rate_raft_data = rhs.heavy_gc_valid_rate_raft_data; + auto reload_page_type_config = [this](PageType page_type, const PageTypeConfig & config) { + auto iter = page_type_and_config.find(page_type); + if (iter != page_type_and_config.end()) + { + iter->second = config; + } + }; + reload_page_type_config(PageType::Normal, PageTypeConfig{.heavy_gc_valid_rate = config.heavy_gc_valid_rate}); + reload_page_type_config(PageType::RaftData, PageTypeConfig{.heavy_gc_valid_rate = config.heavy_gc_valid_rate_raft_data}); } template @@ -169,7 +185,7 @@ FileUsageStatistics BlobStore::getFileUsageStatistics() const template typename BlobStore::PageEntriesEdit -BlobStore::handleLargeWrite(typename Trait::WriteBatch && wb, const WriteLimiterPtr & write_limiter) +BlobStore::handleLargeWrite(typename Trait::WriteBatch && wb, PageType page_type, const WriteLimiterPtr & write_limiter) { PageEntriesEdit edit; for (auto & write : wb.getMutWrites()) @@ -179,7 +195,7 @@ BlobStore::handleLargeWrite(typename Trait::WriteBatch && wb, const Write case WriteBatchWriteType::PUT: case WriteBatchWriteType::UPDATE_DATA_FROM_REMOTE: { - const auto [blob_id, offset_in_file] = getPosFromStats(write.size); + const auto [blob_id, offset_in_file] = getPosFromStats(write.size, page_type); auto blob_file = getBlobFile(blob_id); ChecksumClass digest; // swap from WriteBatch instead of copying @@ -351,7 +367,7 @@ BlobStore::handleLargeWrite(typename Trait::WriteBatch && wb, const Write template typename BlobStore::PageEntriesEdit -BlobStore::write(typename Trait::WriteBatch && wb, const WriteLimiterPtr & write_limiter) +BlobStore::write(typename Trait::WriteBatch && wb, PageType page_type, const WriteLimiterPtr & write_limiter) { ProfileEvents::increment(ProfileEvents::PSMWritePages, wb.putWriteCount()); @@ -425,7 +441,7 @@ BlobStore::write(typename Trait::WriteBatch && wb, const WriteLimiterPtr if (all_page_data_size > config.file_limit_size) { LOG_INFO(log, "handling large write, all_page_data_size={}", all_page_data_size); - return handleLargeWrite(std::move(wb), write_limiter); + return handleLargeWrite(std::move(wb), page_type, write_limiter); } char * buffer = static_cast(alloc(all_page_data_size)); @@ -443,7 +459,7 @@ BlobStore::write(typename Trait::WriteBatch && wb, const WriteLimiterPtr size_t actually_allocated_size = all_page_data_size + replenish_size; - auto [blob_id, offset_in_file] = getPosFromStats(actually_allocated_size); + auto [blob_id, offset_in_file] = getPosFromStats(actually_allocated_size, page_type); size_t offset_in_allocated = 0; @@ -637,15 +653,15 @@ void BlobStore::remove(const PageEntries & del_entries) } template -std::pair BlobStore::getPosFromStats(size_t size) +std::pair BlobStore::getPosFromStats(size_t size, PageType page_type) { Stopwatch watch; BlobStatPtr stat; - auto lock_stat = [size, this, &stat]() { + auto lock_stat = [size, this, &stat, &page_type]() { auto lock_stats = blob_stats.lock(); BlobFileId blob_file_id = INVALID_BLOBFILE_ID; - std::tie(stat, blob_file_id) = blob_stats.chooseStat(size, lock_stats); + std::tie(stat, blob_file_id) = blob_stats.chooseStat(size, page_type, lock_stats); if (stat == nullptr) { // No valid stat for putting data with `size`, create a new one @@ -1062,11 +1078,11 @@ BlobFilePtr BlobStore::read(const typename BlobStore::PageId & pag template -std::vector BlobStore::getGCStats() +typename BlobStore::PageTypeAndBlobIds BlobStore::getGCStats() { // Get a copy of stats map to avoid the big lock on stats map const auto stats_list = blob_stats.getStats(); - std::vector blob_need_gc; + PageTypeAndBlobIds blob_need_gc; BlobStoreGCInfo blobstore_gc_info; fiu_do_on(FailPoints::force_change_all_blobs_to_read_only, @@ -1132,10 +1148,25 @@ std::vector BlobStore::getGCStats() } // Check if GC is required - if (stat->sm_valid_rate <= config.heavy_gc_valid_rate) + // Raft related data contains raft log and other meta data. + // Raft log is written and deleted in a faster pace which is not suitable for full gc. + // But other meta data is written and deleted in a slower pace and cannot be completely deleted even if the region is removed, and full gc is needed. + // So we choose to also do full gc for raft related data but with a smaller threshold. + PageType page_type = PageTypeUtils::getPageType(stat->id); + auto heavy_gc_threhold = config.heavy_gc_valid_rate; + if (auto iter = page_type_and_config.find(page_type); iter != page_type_and_config.end()) + { + heavy_gc_threhold = iter->second.heavy_gc_valid_rate; + } + bool do_full_gc = stat->sm_valid_rate <= heavy_gc_threhold; + if (do_full_gc) { LOG_TRACE(log, "Current [blob_id={}] valid rate is {:.2f}, full GC", stat->id, stat->sm_valid_rate); - blob_need_gc.emplace_back(stat->id); + if (blob_need_gc.find(page_type) == blob_need_gc.end()) + { + blob_need_gc.emplace(page_type, std::vector()); + } + blob_need_gc[page_type].emplace_back(stat->id); // Change current stat to read only stat->changeToReadOnly(); @@ -1166,14 +1197,15 @@ std::vector BlobStore::getGCStats() } template -typename BlobStore::PageEntriesEdit -BlobStore::gc(GcEntriesMap & entries_need_gc, - const PageSize & total_page_size, - const WriteLimiterPtr & write_limiter, - const ReadLimiterPtr & read_limiter) +void BlobStore::gc( + PageType page_type, + const GcEntriesMap & entries_need_gc, + const PageSize & total_page_size, + PageEntriesEdit & edit, + const WriteLimiterPtr & write_limiter, + const ReadLimiterPtr & read_limiter) { std::vector> written_blobs; - PageEntriesEdit edit; if (total_page_size == 0) { @@ -1251,7 +1283,7 @@ BlobStore::gc(GcEntriesMap & entries_need_gc, BlobFileOffset offset_in_data = 0; BlobFileId blobfile_id; BlobFileOffset file_offset_begin; - std::tie(blobfile_id, file_offset_begin) = getPosFromStats(alloc_size); + std::tie(blobfile_id, file_offset_begin) = getPosFromStats(alloc_size, page_type); // blob_file_0, [, // , @@ -1288,7 +1320,7 @@ BlobStore::gc(GcEntriesMap & entries_need_gc, // Acquire a span from stats for remaining data auto next_alloc_size = (remaining_page_size > alloc_size ? alloc_size : remaining_page_size); remaining_page_size -= next_alloc_size; - std::tie(blobfile_id, file_offset_begin) = getPosFromStats(next_alloc_size); + std::tie(blobfile_id, file_offset_begin) = getPosFromStats(next_alloc_size, page_type); } assert(offset_in_data + entry.size <= alloc_size); @@ -1314,7 +1346,18 @@ BlobStore::gc(GcEntriesMap & entries_need_gc, { write_blob(blobfile_id, data_buf, file_offset_begin, offset_in_data); } +} +template +typename BlobStore::PageEntriesEdit BlobStore::gc(const PageTypeAndGcInfo & page_type_and_gc_info, + const WriteLimiterPtr & write_limiter, + const ReadLimiterPtr & read_limiter) +{ + PageEntriesEdit edit; + for (const auto & [page_type, entries_need_gc, total_page_size] : page_type_and_gc_info) + { + gc(page_type, entries_need_gc, total_page_size, edit, write_limiter, read_limiter); + } return edit; } diff --git a/dbms/src/Storages/Page/V3/BlobStore.h b/dbms/src/Storages/Page/V3/BlobStore.h index 76bc04144fe..8f657844a91 100644 --- a/dbms/src/Storages/Page/V3/BlobStore.h +++ b/dbms/src/Storages/Page/V3/BlobStore.h @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -56,7 +57,12 @@ class BlobStore : private Allocator using PageMap = typename Trait::PageMap; public: - BlobStore(const String & storage_name, const FileProviderPtr & file_provider_, PSDiskDelegatorPtr delegator_, const BlobConfig & config); + BlobStore( + const String & storage_name, + const FileProviderPtr & file_provider_, + PSDiskDelegatorPtr delegator_, + const BlobConfig & config, + const PageTypeAndConfig & page_type_and_config_); void registerPaths(); @@ -64,14 +70,23 @@ class BlobStore : private Allocator FileUsageStatistics getFileUsageStatistics() const; - std::vector getGCStats(); + using PageTypeAndBlobIds = std::map>; + PageTypeAndBlobIds getGCStats(); - PageEntriesEdit gc(GcEntriesMap & entries_need_gc, - const PageSize & total_page_size, + void gc( + PageType page_type, + const GcEntriesMap & entries_need_gc, + const PageSize & total_page_size, + PageEntriesEdit & edit, + const WriteLimiterPtr & write_limiter = nullptr, + const ReadLimiterPtr & read_limiter = nullptr); + + using PageTypeAndGcInfo = std::vector>; + PageEntriesEdit gc(const PageTypeAndGcInfo & page_type_and_gc_info, const WriteLimiterPtr & write_limiter = nullptr, const ReadLimiterPtr & read_limiter = nullptr); - PageEntriesEdit write(typename Trait::WriteBatch && wb, const WriteLimiterPtr & write_limiter = nullptr); + PageEntriesEdit write(typename Trait::WriteBatch && wb, PageType page_type = PageType::Normal, const WriteLimiterPtr & write_limiter = nullptr); void remove(const PageEntries & del_entries); @@ -98,7 +113,7 @@ class BlobStore : private Allocator private: #endif - PageEntriesEdit handleLargeWrite(typename Trait::WriteBatch && wb, const WriteLimiterPtr & write_limiter = nullptr); + PageEntriesEdit handleLargeWrite(typename Trait::WriteBatch && wb, PageType page_type, const WriteLimiterPtr & write_limiter = nullptr); BlobFilePtr read(const PageId & page_id_v3, BlobFileId blob_id, BlobFileOffset offset, char * buffers, size_t size, const ReadLimiterPtr & read_limiter = nullptr, bool background = false); @@ -107,7 +122,7 @@ class BlobStore : private Allocator * We will lock BlobStats until we get a BlobStat that can hold the size. * Then lock the BlobStat to get the span. */ - std::pair getPosFromStats(size_t size); + std::pair getPosFromStats(size_t size, PageType page_type); /** * Request a specific BlobStat to delete a certain span. @@ -132,6 +147,7 @@ class BlobStore : private Allocator FileProviderPtr file_provider; BlobConfig config; + PageTypeAndConfig page_type_and_config; LoggerPtr log; diff --git a/dbms/src/Storages/Page/V3/CheckpointFile/tests/gtest_file_read_write.cpp b/dbms/src/Storages/Page/V3/CheckpointFile/tests/gtest_file_read_write.cpp index d036daeeee1..9df34a0084e 100644 --- a/dbms/src/Storages/Page/V3/CheckpointFile/tests/gtest_file_read_write.cpp +++ b/dbms/src/Storages/Page/V3/CheckpointFile/tests/gtest_file_read_write.cpp @@ -385,7 +385,11 @@ try { const auto delegator = std::make_shared(std::vector{dir}); const auto file_provider = DB::tests::TiFlashTestEnv::getDefaultFileProvider(); - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, BlobConfig{}); + PageTypeAndConfig page_type_and_config{ + {PageType::Normal, PageTypeConfig{.heavy_gc_valid_rate = 0.5}}, + {PageType::RaftData, PageTypeConfig{.heavy_gc_valid_rate = 0.01}}, + }; + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, BlobConfig{}, page_type_and_config); auto edits = universal::PageEntriesEdit{}; { @@ -393,7 +397,7 @@ try wb.putPage("page_foo", 0, "The flower carriage rocked", {4, 10, 12}); wb.delPage("id_bar"); wb.putPage("page_abc", 0, "Dreamed of the day that she was born"); - auto blob_store_edits = blob_store.write(std::move(wb), nullptr); + auto blob_store_edits = blob_store.write(std::move(wb)); ASSERT_EQ(blob_store_edits.size(), 3); diff --git a/dbms/src/Storages/Page/V3/GCDefines.cpp b/dbms/src/Storages/Page/V3/GCDefines.cpp index 004ce245c4e..3ebbfa09dcb 100644 --- a/dbms/src/Storages/Page/V3/GCDefines.cpp +++ b/dbms/src/Storages/Page/V3/GCDefines.cpp @@ -20,6 +20,7 @@ #include #include +#include #include namespace DB @@ -281,13 +282,18 @@ GCTimeStatistics ExternalPageCallbacksManager::doGC( } // Execute full gc - GET_METRIC(tiflash_storage_page_gc_count, type_v3_bs_full_gc).Increment(blob_ids_need_gc.size()); + GET_METRIC(tiflash_storage_page_gc_count, type_v3_bs_full_gc).Increment(std::accumulate(blob_ids_need_gc.begin(), blob_ids_need_gc.end(), 0, [&](Int64 acc, const auto & page_type_and_blob_ids) { + return acc + page_type_and_blob_ids.second.size(); + })); // 4. Filter out entries in MVCC by BlobId. // We also need to filter the version of the entry. // So that the `gc_apply` can proceed smoothly. - auto [blob_gc_info, total_page_size] = page_directory.getEntriesByBlobIds(blob_ids_need_gc); + auto page_type_gc_infos = page_directory.getEntriesByBlobIdsForDifferentPageTypes(blob_ids_need_gc); statistics.full_gc_get_entries_ms = gc_watch.elapsedMillisecondsFromLastTime(); - if (blob_gc_info.empty()) + auto entries_size_to_move = std::accumulate(page_type_gc_infos.begin(), page_type_gc_infos.end(), 0, [&](UInt64 acc, const auto & page_type_and_gc_info) { + return acc + std::get<2>(page_type_and_gc_info); + }); + if (entries_size_to_move == 0) { cleanExternalPage(page_directory, gc_watch, statistics); statistics.stage = GCStageType::FullGCNothingMoved; @@ -300,7 +306,7 @@ GCTimeStatistics ExternalPageCallbacksManager::doGC( // 5. Do the BlobStore GC // After BlobStore GC, these entries will be migrated to a new blob. // Then we should notify MVCC apply the change. - PageEntriesEdit gc_edit = blob_store.gc(blob_gc_info, total_page_size, write_limiter, read_limiter); + PageEntriesEdit gc_edit = blob_store.gc(page_type_gc_infos, write_limiter, read_limiter); statistics.full_gc_blobstore_copy_ms = gc_watch.elapsedMillisecondsFromLastTime(); GET_METRIC(tiflash_storage_page_gc_duration_seconds, type_fullgc_rewrite).Observe( // (statistics.full_gc_prepare_ms + statistics.full_gc_get_entries_ms + statistics.full_gc_blobstore_copy_ms) / 1000.0); diff --git a/dbms/src/Storages/Page/V3/PageDirectory.cpp b/dbms/src/Storages/Page/V3/PageDirectory.cpp index 71cc621621a..2519e4166b9 100644 --- a/dbms/src/Storages/Page/V3/PageDirectory.cpp +++ b/dbms/src/Storages/Page/V3/PageDirectory.cpp @@ -1814,6 +1814,22 @@ PageDirectory::getEntriesByBlobIds(const std::vector & blob_i return std::make_pair(std::move(blob_versioned_entries), total_page_size); } +template +typename PageDirectory::PageTypeAndGcInfo +PageDirectory::getEntriesByBlobIdsForDifferentPageTypes(const typename PageDirectory::PageTypeAndBlobIds & page_type_and_blob_ids) const +{ + PageDirectory::PageTypeAndGcInfo page_type_and_gc_info; + // Because raft related data should do full gc less frequently, so we get the gc info for different page types separately. + // TODO: get entries in a single traverse of PageDirectory + for (const auto & [page_type, blob_ids] : page_type_and_blob_ids) + { + auto [blob_versioned_entries, total_page_size] = getEntriesByBlobIds(blob_ids); + page_type_and_gc_info.emplace_back(page_type, std::move(blob_versioned_entries), total_page_size); + } + + return page_type_and_gc_info; +} + template bool PageDirectory::tryDumpSnapshot(const WriteLimiterPtr & write_limiter, bool force) { diff --git a/dbms/src/Storages/Page/V3/PageDirectory.h b/dbms/src/Storages/Page/V3/PageDirectory.h index abcdabf369b..668836b9d7b 100644 --- a/dbms/src/Storages/Page/V3/PageDirectory.h +++ b/dbms/src/Storages/Page/V3/PageDirectory.h @@ -490,6 +490,10 @@ class PageDirectory std::pair getEntriesByBlobIds(const std::vector & blob_ids) const; + using PageTypeAndBlobIds = std::map>; + using PageTypeAndGcInfo = std::vector>; + PageTypeAndGcInfo getEntriesByBlobIdsForDifferentPageTypes(const PageTypeAndBlobIds & page_type_and_blob_ids) const; + void gcApply(PageEntriesEdit && migrated_edit, const WriteLimiterPtr & write_limiter = nullptr); bool tryDumpSnapshot(const WriteLimiterPtr & write_limiter = nullptr, bool force = false); diff --git a/dbms/src/Storages/Page/V3/PageStorageImpl.cpp b/dbms/src/Storages/Page/V3/PageStorageImpl.cpp index 9757b82cd41..35079cd7531 100644 --- a/dbms/src/Storages/Page/V3/PageStorageImpl.cpp +++ b/dbms/src/Storages/Page/V3/PageStorageImpl.cpp @@ -51,7 +51,12 @@ PageStorageImpl::PageStorageImpl( const FileProviderPtr & file_provider_) : DB::PageStorage(name, delegator_, config_, file_provider_) , log(Logger::get(name)) - , blob_store(name, file_provider_, delegator, BlobConfig::from(config_)) + , blob_store( + name, + file_provider_, + delegator, + BlobConfig::from(config_), + PageTypeAndConfig{{PageType::Normal, PageTypeConfig{.heavy_gc_valid_rate = config.blob_heavy_gc_valid_rate}}}) { LOG_INFO(log, "PageStorageImpl start. Config{{ {} }}", config.toDebugStringV3()); } @@ -135,7 +140,7 @@ void PageStorageImpl::writeImpl(DB::WriteBatch && write_batch, const WriteLimite SCOPE_EXIT({ GET_METRIC(tiflash_storage_page_write_duration_seconds, type_total).Observe(watch.elapsedSeconds()); }); // Persist Page data to BlobStore - auto edit = blob_store.write(std::move(write_batch), write_limiter); + auto edit = blob_store.write(std::move(write_batch), PageType::Normal, write_limiter); page_directory->apply(std::move(edit), write_limiter); } diff --git a/dbms/src/Storages/Page/V3/PageType.h b/dbms/src/Storages/Page/V3/PageType.h new file mode 100644 index 00000000000..7bf3018025c --- /dev/null +++ b/dbms/src/Storages/Page/V3/PageType.h @@ -0,0 +1,65 @@ +// Copyright 2022 PingCAP, Ltd. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +namespace DB::PS::V3 +{ + +enum class PageType : UInt8 +{ + Normal = 0, + RaftData = 1, + + // Support at most 10 types + TypeCountLimit = 10, +}; + +struct PageTypeConfig +{ + SettingDouble heavy_gc_valid_rate = 0.5; +}; + +using PageTypes = std::vector; +using PageTypeAndConfig = std::unordered_map; + +struct PageTypeUtils +{ + static inline PageType getPageType(UInt64 file_id, bool treat_unknown_as_normal = true) + { + using T = std::underlying_type_t; + switch (file_id % static_cast(PageType::TypeCountLimit)) + { + case static_cast(PageType::Normal): + return PageType::Normal; + case static_cast(PageType::RaftData): + return PageType::RaftData; + default: + return treat_unknown_as_normal ? PageType::Normal : PageType::TypeCountLimit; + } + } + + static inline UInt64 nextFileID(PageType page_type, UInt64 cur_max_id) + { + using T = std::underlying_type_t; + auto step = static_cast(PageType::TypeCountLimit); + auto id = cur_max_id + step; + return id - (id % step) + static_cast(page_type); + } +}; +} // namespace DB::PS::V3 diff --git a/dbms/src/Storages/Page/V3/Universal/UniversalPageStorage.cpp b/dbms/src/Storages/Page/V3/Universal/UniversalPageStorage.cpp index 1db85a3698d..fe03d8cd0c8 100644 --- a/dbms/src/Storages/Page/V3/Universal/UniversalPageStorage.cpp +++ b/dbms/src/Storages/Page/V3/Universal/UniversalPageStorage.cpp @@ -48,12 +48,17 @@ UniversalPageStoragePtr UniversalPageStorage::create( const PageStorageConfig & config, const FileProviderPtr & file_provider) { + PageTypeAndConfig page_type_and_config{ + {PageType::Normal, PageTypeConfig{.heavy_gc_valid_rate = config.blob_heavy_gc_valid_rate}}, + {PageType::RaftData, PageTypeConfig{.heavy_gc_valid_rate = config.blob_heavy_gc_valid_rate_raft_data}}, + }; UniversalPageStoragePtr storage = std::make_shared(name, delegator, config, file_provider); storage->blob_store = std::make_unique( name, file_provider, delegator, - PS::V3::BlobConfig::from(config)); + PS::V3::BlobConfig::from(config), + page_type_and_config); if (S3::ClientFactory::instance().isEnabled()) { storage->remote_reader = std::make_unique(); @@ -79,7 +84,7 @@ size_t UniversalPageStorage::getNumberOfPages(const String & prefix) const return page_directory->numPagesWithPrefix(prefix); } -void UniversalPageStorage::write(UniversalWriteBatch && write_batch, const WriteLimiterPtr & write_limiter) const +void UniversalPageStorage::write(UniversalWriteBatch && write_batch, PageType page_type, const WriteLimiterPtr & write_limiter) const { if (unlikely(write_batch.empty())) return; @@ -96,7 +101,7 @@ void UniversalPageStorage::write(UniversalWriteBatch && write_batch, const Write // Note that if `remote_locks_local_mgr`'s store_id is not inited, it will blocks until inited remote_locks_local_mgr->createS3LockForWriteBatch(write_batch); } - auto edit = blob_store->write(std::move(write_batch), write_limiter); + auto edit = blob_store->write(std::move(write_batch), page_type, write_limiter); auto applied_lock_ids = page_directory->apply(std::move(edit), write_limiter); if (has_writes_from_remote) { diff --git a/dbms/src/Storages/Page/V3/Universal/UniversalPageStorage.h b/dbms/src/Storages/Page/V3/Universal/UniversalPageStorage.h index 5a840ce7ec8..3c3088d8aa5 100644 --- a/dbms/src/Storages/Page/V3/Universal/UniversalPageStorage.h +++ b/dbms/src/Storages/Page/V3/Universal/UniversalPageStorage.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -57,6 +58,9 @@ namespace PS::V3 class S3LockLocalManager; using S3LockLocalManagerPtr = std::unique_ptr; } // namespace PS::V3 +using PS::V3::PageType; +using PS::V3::PageTypeAndConfig; +using PS::V3::PageTypeConfig; class UniversalPageStorage; using UniversalPageStoragePtr = std::shared_ptr; @@ -115,7 +119,7 @@ class UniversalPageStorage final size_t getNumberOfPages(const String & prefix) const; - void write(UniversalWriteBatch && write_batch, const WriteLimiterPtr & write_limiter = nullptr) const; + void write(UniversalWriteBatch && write_batch, PageType page_type = PageType::Normal, const WriteLimiterPtr & write_limiter = nullptr) const; Page read(const UniversalPageId & page_id, const ReadLimiterPtr & read_limiter = nullptr, SnapshotPtr snapshot = {}, bool throw_on_not_exist = true) const; diff --git a/dbms/src/Storages/Page/V3/Universal/tests/gtest_remote_read.cpp b/dbms/src/Storages/Page/V3/Universal/tests/gtest_remote_read.cpp index a293268b558..edfd424e2ea 100644 --- a/dbms/src/Storages/Page/V3/Universal/tests/gtest_remote_read.cpp +++ b/dbms/src/Storages/Page/V3/Universal/tests/gtest_remote_read.cpp @@ -426,14 +426,18 @@ CATCH TEST_F(UniPageStorageRemoteReadTest, WriteReadWithFields) try { - auto blob_store = PS::V3::BlobStore(getCurrentTestName(), file_provider, delegator, PS::V3::BlobConfig{}); + PageTypeAndConfig page_type_and_config{ + {PageType::Normal, PageTypeConfig{.heavy_gc_valid_rate = 0.5}}, + {PageType::RaftData, PageTypeConfig{.heavy_gc_valid_rate = 0.1}}, + }; + auto blob_store = PS::V3::BlobStore(getCurrentTestName(), file_provider, delegator, PS::V3::BlobConfig{}, page_type_and_config); auto edits = PS::V3::universal::PageEntriesEdit{}; { UniversalWriteBatch wb; wb.disableRemoteLock(); wb.putPage("page_foo", 0, "The flower carriage rocked", {4, 10, 12}); - auto blob_store_edits = blob_store.write(std::move(wb), nullptr); + auto blob_store_edits = blob_store.write(std::move(wb)); edits.appendRecord({.type = PS::V3::EditRecordType::VAR_ENTRY, .page_id = "page_foo", .entry = blob_store_edits.getRecords()[0].entry}); edits.appendRecord({.type = PS::V3::EditRecordType::VAR_REF, .page_id = "page_foo2", .ori_page_id = "page_foo"}); diff --git a/dbms/src/Storages/Page/V3/tests/gtest_blob_stat.cpp b/dbms/src/Storages/Page/V3/tests/gtest_blob_stat.cpp index d0d0f22cf68..36fb3bd421a 100644 --- a/dbms/src/Storages/Page/V3/tests/gtest_blob_stat.cpp +++ b/dbms/src/Storages/Page/V3/tests/gtest_blob_stat.cpp @@ -69,8 +69,8 @@ TEST_F(BlobStoreStatsTest, RestoreEmpty) auto stats_copy = stats.getStats(); ASSERT_TRUE(stats_copy.empty()); - EXPECT_EQ(stats.roll_id, 1); - EXPECT_NO_THROW(stats.createStat(stats.roll_id, config.file_limit_size, stats.lock())); + EXPECT_EQ(stats.cur_max_id, 1); + EXPECT_NO_THROW(stats.createStat(stats.cur_max_id, config.file_limit_size, stats.lock())); } TEST_F(BlobStoreStatsTest, Restore) @@ -78,13 +78,20 @@ try { BlobStats stats(logger, delegator, config); - BlobFileId file_id1 = 10; - BlobFileId file_id2 = 12; + BlobFileId file_id1 = 1; + BlobFileId file_id2 = PageTypeUtils::nextFileID(PageType::Normal, file_id1); + BlobFileId file_id3 = PageTypeUtils::nextFileID(PageType::RaftData, file_id2); + BlobFileId file_id4 = PageTypeUtils::nextFileID(PageType::Normal, file_id3); + ASSERT_EQ(file_id2, 10); + ASSERT_EQ(file_id3, 21); + ASSERT_EQ(file_id4, 30); { const auto & lock = stats.lock(); stats.createStatNotChecking(file_id1, config.file_limit_size, lock); stats.createStatNotChecking(file_id2, config.file_limit_size, lock); + stats.createStatNotChecking(file_id3, config.file_limit_size, lock); + stats.createStatNotChecking(file_id4, config.file_limit_size, lock); } { @@ -112,14 +119,31 @@ try .offset = 2048, .checksum = 0x4567, }); + stats.restoreByEntry(PageEntryV3{ + .file_id = file_id3, + .size = 512, + .padded_size = 0, + .tag = 0, + .offset = 2048, + .checksum = 0x4567, + }); + stats.restoreByEntry(PageEntryV3{ + .file_id = file_id4, + .size = 512, + .padded_size = 0, + .tag = 0, + .offset = 2048, + .checksum = 0x4567, + }); stats.restore(); } auto stats_copy = stats.getStats(); ASSERT_EQ(stats_copy.size(), std::min(getTotalStatsNum(stats_copy), path_num)); - ASSERT_EQ(getTotalStatsNum(stats_copy), 2); - EXPECT_EQ(stats.roll_id, 13); + ASSERT_EQ(getTotalStatsNum(stats_copy), 4); + EXPECT_EQ(stats.cur_max_id, file_id4); + auto stat1 = stats.blobIdToStat(file_id1); EXPECT_EQ(stat1->sm_total_size, 2048 + 512); @@ -127,38 +151,45 @@ try auto stat2 = stats.blobIdToStat(file_id2); EXPECT_EQ(stat2->sm_total_size, 2048 + 512); EXPECT_EQ(stat2->sm_valid_size, 512); - - // This will throw exception since we try to create - // a new file bigger than restored `roll_id` - EXPECT_ANY_THROW({ stats.createStat(14, config.file_limit_size, stats.lock()); }); + auto stat3 = stats.blobIdToStat(file_id3); + EXPECT_EQ(stat2->sm_total_size, 2048 + 512); + EXPECT_EQ(stat2->sm_valid_size, 512); + auto stat4 = stats.blobIdToStat(file_id4); + EXPECT_EQ(stat2->sm_total_size, 2048 + 512); + EXPECT_EQ(stat2->sm_valid_size, 512); EXPECT_ANY_THROW({ stats.createStat(file_id1, config.file_limit_size, stats.lock()); }); EXPECT_ANY_THROW({ stats.createStat(file_id2, config.file_limit_size, stats.lock()); }); - EXPECT_ANY_THROW({ stats.createStat(stats.roll_id + 1, config.file_limit_size, stats.lock()); }); + EXPECT_ANY_THROW({ stats.createStat(file_id3, config.file_limit_size, stats.lock()); }); + EXPECT_ANY_THROW({ stats.createStat(file_id4, config.file_limit_size, stats.lock()); }); } CATCH TEST_F(BlobStoreStatsTest, testStats) { BlobStats stats(logger, delegator, config); - - auto stat = stats.createStat(0, config.file_limit_size, stats.lock()); + BlobFileId file_id0 = 10; + auto stat = stats.createStat(file_id0, config.file_limit_size, stats.lock()); ASSERT_TRUE(stat); ASSERT_TRUE(stat->smap); - stats.createStat(1, config.file_limit_size, stats.lock()); - stats.createStat(2, config.file_limit_size, stats.lock()); + BlobFileId file_id1 = PageTypeUtils::nextFileID(PageType::Normal, file_id0); + BlobFileId file_id2 = PageTypeUtils::nextFileID(PageType::Normal, file_id1); + { + auto lock = stats.lock(); + stats.createStat(file_id1, config.file_limit_size, lock); + stats.createStat(file_id2, config.file_limit_size, lock); + } + auto stats_copy = stats.getStats(); ASSERT_EQ(stats_copy.size(), std::min(getTotalStatsNum(stats_copy), path_num)); ASSERT_EQ(getTotalStatsNum(stats_copy), 3); - ASSERT_EQ(stats.roll_id, 3); - stats.eraseStat(0, stats.lock()); - stats.eraseStat(1, stats.lock()); + stats.eraseStat(10, stats.lock()); + stats.eraseStat(20, stats.lock()); ASSERT_EQ(getTotalStatsNum(stats.getStats()), 1); - ASSERT_EQ(stats.roll_id, 3); } @@ -169,20 +200,26 @@ TEST_F(BlobStoreStatsTest, testStat) BlobStats stats(logger, delegator, config); - std::tie(stat, blob_file_id) = stats.chooseStat(10, stats.lock()); - ASSERT_EQ(blob_file_id, 1); + std::tie(stat, blob_file_id) = stats.chooseStat(10, PageType::Normal, stats.lock()); + ASSERT_EQ(blob_file_id, 10); ASSERT_FALSE(stat); - // still 0 - std::tie(stat, blob_file_id) = stats.chooseStat(10, stats.lock()); - ASSERT_EQ(blob_file_id, 1); + std::tie(stat, blob_file_id) = stats.chooseStat(10, PageType::Normal, stats.lock()); + ASSERT_EQ(blob_file_id, 20); ASSERT_FALSE(stat); stats.createStat(0, config.file_limit_size, stats.lock()); - std::tie(stat, blob_file_id) = stats.chooseStat(10, stats.lock()); + std::tie(stat, blob_file_id) = stats.chooseStat(10, PageType::Normal, stats.lock()); ASSERT_EQ(blob_file_id, INVALID_BLOBFILE_ID); ASSERT_TRUE(stat); + // PageType::RaftData should not use the same stat with PageType::Normal + BlobStats::BlobStatPtr raft_stat; + std::tie(raft_stat, blob_file_id) = stats.chooseStat(10, PageType::RaftData, stats.lock()); + ASSERT_EQ(blob_file_id, 31); + ASSERT_FALSE(raft_stat); + + auto offset = stat->getPosFromStat(10, stat->lock()); ASSERT_EQ(offset, 0); @@ -236,46 +273,50 @@ TEST_F(BlobStoreStatsTest, testStat) TEST_F(BlobStoreStatsTest, testFullStats) { - BlobFileId blob_file_id = 0; - BlobStats::BlobStatPtr stat; - BlobFileOffset offset = 0; - BlobStats stats(logger, delegator, config); - stat = stats.createStat(1, config.file_limit_size, stats.lock()); - offset = stat->getPosFromStat(BLOBFILE_LIMIT_SIZE - 1, stat->lock()); - ASSERT_EQ(offset, 0); - - // Can't get pos from a full stat - offset = stat->getPosFromStat(100, stat->lock()); - ASSERT_EQ(offset, INVALID_BLOBFILE_OFFSET); - - // Stat internal property should not changed - ASSERT_EQ(stat->sm_total_size, BLOBFILE_LIMIT_SIZE - 1); - ASSERT_EQ(stat->sm_valid_size, BLOBFILE_LIMIT_SIZE - 1); - ASSERT_LE(stat->sm_valid_rate, 1); + { + auto lock = stats.lock(); + BlobFileId file_id = 10; + BlobStats::BlobStatPtr stat = stats.createStat(file_id, config.file_limit_size, lock); + auto offset = stat->getPosFromStat(BLOBFILE_LIMIT_SIZE - 1, stat->lock()); + ASSERT_EQ(offset, 0); + stats.cur_max_id = file_id; + + // Can't get pos from a full stat + offset = stat->getPosFromStat(100, stat->lock()); + ASSERT_EQ(offset, INVALID_BLOBFILE_OFFSET); + + // Stat internal property should not changed + ASSERT_EQ(stat->sm_total_size, BLOBFILE_LIMIT_SIZE - 1); + ASSERT_EQ(stat->sm_valid_size, BLOBFILE_LIMIT_SIZE - 1); + ASSERT_LE(stat->sm_valid_rate, 1); + } // Won't choose full one - std::tie(stat, blob_file_id) = stats.chooseStat(100, stats.lock()); - ASSERT_EQ(blob_file_id, 2); - ASSERT_FALSE(stat); + { + auto [stat, blob_file_id] = stats.chooseStat(100, PageType::Normal, stats.lock()); + ASSERT_EQ(blob_file_id, 20); + ASSERT_FALSE(stat); + } // A new stat can use - stat = stats.createStat(blob_file_id, config.file_limit_size, stats.lock()); - offset = stat->getPosFromStat(100, stat->lock()); - ASSERT_EQ(offset, 0); - - // Remove the stat which id is 0 , now remain the stat which id is 1 - stats.eraseStat(1, stats.lock()); - - // Then full the stat which id 2 - offset = stat->getPosFromStat(BLOBFILE_LIMIT_SIZE - 100, stat->lock()); - ASSERT_EQ(offset, 100); + { + stats.cur_max_id = PageTypeUtils::nextFileID(PageType::Normal, stats.cur_max_id); + ASSERT_EQ(stats.cur_max_id, 30); + auto stat = stats.createStat(stats.cur_max_id, config.file_limit_size, stats.lock()); + ASSERT_EQ(stat->getPosFromStat(100, stat->lock()), 0); + + // Then full the stat which id 2 + auto offset = stat->getPosFromStat(BLOBFILE_LIMIT_SIZE - 100, stat->lock()); + ASSERT_EQ(offset, 100); + } - // Then choose stat , it should return the stat id 3 - // Stat which id is 2 is full. - std::tie(stat, blob_file_id) = stats.chooseStat(100, stats.lock()); - ASSERT_EQ(blob_file_id, 3); - ASSERT_FALSE(stat); + { + // Then choose stat, it should return a new blob_file_id + auto [stat, blob_file_id] = stats.chooseStat(100, PageType::Normal, stats.lock()); + ASSERT_EQ(blob_file_id, 40); + ASSERT_FALSE(stat); + } } } // namespace DB::PS::V3::tests diff --git a/dbms/src/Storages/Page/V3/tests/gtest_blob_store.cpp b/dbms/src/Storages/Page/V3/tests/gtest_blob_store.cpp index 64c20a5ce92..d789ece40d3 100644 --- a/dbms/src/Storages/Page/V3/tests/gtest_blob_store.cpp +++ b/dbms/src/Storages/Page/V3/tests/gtest_blob_store.cpp @@ -75,6 +75,11 @@ class BlobStoreTest : public DB::base::TiFlashStorageTestBasic protected: BlobConfig config; + PageTypeAndConfig page_type_and_config{ + {PageType::Normal, PageTypeConfig{.heavy_gc_valid_rate = config.heavy_gc_valid_rate}}, + {PageType::RaftData, PageTypeConfig{.heavy_gc_valid_rate = config.heavy_gc_valid_rate_raft_data}}, + }; + PSDiskDelegatorPtr delegator; char fixed_buffer[1024]{}; @@ -86,7 +91,7 @@ try { const auto file_provider = DB::tests::TiFlashTestEnv::getDefaultFileProvider(); config.file_limit_size = 2560; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); BlobFileId file_id1 = 10; BlobFileId file_id2 = 12; @@ -170,17 +175,17 @@ try // write blob 1 write_batch.putPage(page_id, /* tag */ 0, std::make_shared(const_cast(c_buff), buff_size), buff_size); - blob_store.write(std::move(write_batch), nullptr); + blob_store.write(std::move(write_batch)); write_batch.clear(); // write blob 2 write_batch.putPage(page_id + 1, /* tag */ 0, std::make_shared(const_cast(c_buff), buff_size), buff_size); - blob_store.write(std::move(write_batch), nullptr); + blob_store.write(std::move(write_batch)); write_batch.clear(); // write blob 3 write_batch.putPage(page_id + 2, /* tag */ 0, std::make_shared(const_cast(c_buff), buff_size), buff_size); - blob_store.write(std::move(write_batch), nullptr); + blob_store.write(std::move(write_batch)); write_batch.clear(); }; @@ -223,17 +228,18 @@ try // Case 1, all of blob been restored { auto test_paths = delegator->listPaths(); - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); write_blob_datas(blob_store); - ASSERT_TRUE(check_in_disk_file(test_paths, {1, 2, 3})); + ASSERT_TRUE(check_in_disk_file(test_paths, {10, 20, 30})); - auto blob_store_check = BlobStore(getCurrentTestName(), file_provider, delegator, config); - restore_blobs(blob_store_check, {1, 2, 3}); + auto blob_store_check = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); + restore_blobs(blob_store_check, {10, 20, 30}); blob_store_check.blob_stats.restore(); - ASSERT_TRUE(check_in_disk_file(test_paths, {1, 2, 3})); + ASSERT_TRUE(check_in_disk_file(test_paths, {10, 20, 30})); for (const auto & path : test_paths) { DB::tests::TiFlashTestEnv::tryRemovePath(path); @@ -244,17 +250,17 @@ try // Case 2, only recover blob 1 { auto test_paths = delegator->listPaths(); - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); write_blob_datas(blob_store); - ASSERT_TRUE(check_in_disk_file(test_paths, {1, 2, 3})); + ASSERT_TRUE(check_in_disk_file(test_paths, {10, 20, 30})); - auto blob_store_check = BlobStore(getCurrentTestName(), file_provider, delegator, config); - restore_blobs(blob_store_check, {1}); + auto blob_store_check = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); + restore_blobs(blob_store_check, {10}); blob_store_check.blob_stats.restore(); - ASSERT_TRUE(check_in_disk_file(test_paths, {1})); + ASSERT_TRUE(check_in_disk_file(test_paths, {10})); for (const auto & path : test_paths) { DB::tests::TiFlashTestEnv::tryRemovePath(path); @@ -265,17 +271,17 @@ try // Case 3, only recover blob 2 { auto test_paths = delegator->listPaths(); - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); write_blob_datas(blob_store); - ASSERT_TRUE(check_in_disk_file(test_paths, {1, 2, 3})); + ASSERT_TRUE(check_in_disk_file(test_paths, {10, 20, 30})); - auto blob_store_check = BlobStore(getCurrentTestName(), file_provider, delegator, config); - restore_blobs(blob_store_check, {2}); + auto blob_store_check = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); + restore_blobs(blob_store_check, {20}); blob_store_check.blob_stats.restore(); - ASSERT_TRUE(check_in_disk_file(test_paths, {2})); + ASSERT_TRUE(check_in_disk_file(test_paths, {20})); for (const auto & path : test_paths) { DB::tests::TiFlashTestEnv::tryRemovePath(path); @@ -286,17 +292,17 @@ try // Case 4, only recover blob 3 { auto test_paths = delegator->listPaths(); - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); write_blob_datas(blob_store); - ASSERT_TRUE(check_in_disk_file(test_paths, {1, 2, 3})); + ASSERT_TRUE(check_in_disk_file(test_paths, {10, 20, 30})); - auto blob_store_check = BlobStore(getCurrentTestName(), file_provider, delegator, config); - restore_blobs(blob_store_check, {3}); + auto blob_store_check = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); + restore_blobs(blob_store_check, {30}); blob_store_check.blob_stats.restore(); - ASSERT_TRUE(check_in_disk_file(test_paths, {3})); + ASSERT_TRUE(check_in_disk_file(test_paths, {30})); for (const auto & path : test_paths) { DB::tests::TiFlashTestEnv::tryRemovePath(path); @@ -307,13 +313,13 @@ try // Case 5, recover a not exist blob { auto test_paths = delegator->listPaths(); - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); write_blob_datas(blob_store); - ASSERT_TRUE(check_in_disk_file(test_paths, {1, 2, 3})); + ASSERT_TRUE(check_in_disk_file(test_paths, {10, 20, 30})); - auto blob_store_check = BlobStore(getCurrentTestName(), file_provider, delegator, config); - ASSERT_THROW(restore_blobs(blob_store_check, {4}), DB::Exception); + auto blob_store_check = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); + ASSERT_THROW(restore_blobs(blob_store_check, {40}), DB::Exception); } } CATCH @@ -321,12 +327,11 @@ CATCH TEST_F(BlobStoreTest, testWriteRead) { const auto file_provider = DB::tests::TiFlashTestEnv::getDefaultFileProvider(); - PageIdU64 page_id = 50; size_t buff_nums = 21; size_t buff_size = 123; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); char c_buff[buff_size * buff_nums]; WriteBatch wb; @@ -343,7 +348,7 @@ TEST_F(BlobStoreTest, testWriteRead) } ASSERT_EQ(wb.getTotalDataSize(), buff_nums * buff_size); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); ASSERT_EQ(edit.size(), buff_nums); char c_buff_read[buff_size * buff_nums]; @@ -354,7 +359,7 @@ TEST_F(BlobStoreTest, testWriteRead) ASSERT_EQ(record.type, EditRecordType::PUT); ASSERT_EQ(record.entry.offset, index * buff_size); ASSERT_EQ(record.entry.size, buff_size); - ASSERT_EQ(record.entry.file_id, 1); + ASSERT_EQ(record.entry.file_id, 10); // Read directly from the file blob_store.read(buildV3Id(TEST_NAMESPACE_ID, page_id), @@ -410,8 +415,11 @@ TEST_F(BlobStoreTest, testWriteReadWithIOLimiter) size_t wb_nums = 5; size_t buff_size = 10ul * 1024; const size_t rate_target = buff_size - 1; - - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + PageTypeAndConfig page_type_and_config{ + {PageType::Normal, PageTypeConfig{.heavy_gc_valid_rate = config.heavy_gc_valid_rate}}, + {PageType::RaftData, PageTypeConfig{.heavy_gc_valid_rate = config.heavy_gc_valid_rate_raft_data}}, + }; + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); char c_buff[wb_nums * buff_size]; WriteBatch wbs[wb_nums]; @@ -433,7 +441,7 @@ TEST_F(BlobStoreTest, testWriteReadWithIOLimiter) AtomicStopwatch write_watch; for (size_t i = 0; i < wb_nums; ++i) { - edits[i] = blob_store.write(std::move(wbs[i]), write_limiter); + edits[i] = blob_store.write(std::move(wbs[i]), PageType::Normal, write_limiter); } auto write_elapsed = write_watch.elapsedSeconds(); auto write_actual_rate = write_limiter->getTotalBytesThrough() / write_elapsed; @@ -516,7 +524,6 @@ TEST_F(BlobStoreTest, testWriteReadWithFiled) try { const auto file_provider = DB::tests::TiFlashTestEnv::getDefaultFileProvider(); - PageIdU64 page_id1 = 50; PageIdU64 page_id2 = 51; PageIdU64 page_id3 = 53; @@ -524,7 +531,7 @@ try size_t buff_size = 120; WriteBatch wb; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); char c_buff[buff_size]; for (size_t j = 0; j < buff_size; ++j) @@ -538,7 +545,7 @@ try wb.putPage(page_id1, /* tag */ 0, buff1, buff_size, {20, 40, 40, 20}); wb.putPage(page_id2, /* tag */ 0, buff2, buff_size, {10, 50, 20, 20, 20}); wb.putPage(page_id3, /* tag */ 0, buff3, buff_size, {10, 5, 20, 20, 15, 5, 15, 30}); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); ASSERT_EQ(edit.size(), 3); BlobStore::FieldReadInfo read_info1(buildV3Id(TEST_NAMESPACE_ID, page_id1), edit.getRecords()[0].entry, {0, 1, 2, 3}); @@ -583,7 +590,6 @@ CATCH TEST_F(BlobStoreTest, testFeildOffsetWriteRead) { const auto file_provider = DB::tests::TiFlashTestEnv::getDefaultFileProvider(); - PageIdU64 page_id = 50; size_t buff_size = 20; size_t buff_nums = 5; @@ -597,7 +603,7 @@ TEST_F(BlobStoreTest, testFeildOffsetWriteRead) off += data_sz; } - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); char c_buff[buff_size * buff_nums]; WriteBatch wb; @@ -614,7 +620,7 @@ TEST_F(BlobStoreTest, testFeildOffsetWriteRead) } ASSERT_EQ(wb.getTotalDataSize(), buff_nums * buff_size); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); ASSERT_EQ(edit.size(), buff_nums); char c_buff_read[buff_size * buff_nums]; @@ -625,7 +631,7 @@ TEST_F(BlobStoreTest, testFeildOffsetWriteRead) ASSERT_EQ(record.type, EditRecordType::PUT); ASSERT_EQ(record.entry.offset, index * buff_size); ASSERT_EQ(record.entry.size, buff_size); - ASSERT_EQ(record.entry.file_id, 1); + ASSERT_EQ(record.entry.file_id, 10); PageFieldSizes check_field_sizes; for (const auto & [field_offset, crc] : record.entry.field_offsets) @@ -654,7 +660,7 @@ TEST_F(BlobStoreTest, testWrite) try { const auto file_provider = DB::tests::TiFlashTestEnv::getDefaultFileProvider(); - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); PageIdU64 page_id = 50; const size_t buff_size = 1024; @@ -675,7 +681,7 @@ try wb.putPage(page_id, /*tag*/ 0, buff1, buff_size); wb.putPage(page_id, /*tag*/ 0, buff2, buff_size); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); ASSERT_EQ(edit.size(), 2); auto records = edit.getRecords(); @@ -685,14 +691,14 @@ try ASSERT_EQ(record.page_id.low, page_id); ASSERT_EQ(record.entry.offset, 0); ASSERT_EQ(record.entry.size, buff_size); - ASSERT_EQ(record.entry.file_id, 1); + ASSERT_EQ(record.entry.file_id, 10); record = records[1]; ASSERT_EQ(record.type, EditRecordType::PUT); ASSERT_EQ(record.page_id.low, page_id); ASSERT_EQ(record.entry.offset, buff_size); ASSERT_EQ(record.entry.size, buff_size); - ASSERT_EQ(record.entry.file_id, 1); + ASSERT_EQ(record.entry.file_id, 10); } @@ -702,7 +708,7 @@ try wb.delPage(page_id + 1); wb.delPage(page_id); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); ASSERT_EQ(edit.size(), 3); auto records = edit.getRecords(); @@ -735,7 +741,7 @@ try wb.putRefPage(page_id + 1, page_id); wb.delPage(page_id); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); auto records = edit.getRecords(); auto record = records[0]; @@ -743,7 +749,7 @@ try ASSERT_EQ(record.page_id.low, page_id); ASSERT_EQ(record.entry.offset, buff_size * 2); ASSERT_EQ(record.entry.size, buff_size); - ASSERT_EQ(record.entry.file_id, 1); + ASSERT_EQ(record.entry.file_id, 10); record = records[1]; ASSERT_EQ(record.type, EditRecordType::REF); @@ -765,7 +771,7 @@ TEST_F(BlobStoreTest, DISABLED_testWriteOutOfLimitSize) { config.file_limit_size = buff_size - 1; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); WriteBatch wb; char c_buff[buff_size]; @@ -775,7 +781,7 @@ TEST_F(BlobStoreTest, DISABLED_testWriteOutOfLimitSize) bool catch_exception = false; try { - blob_store.write(std::move(wb), nullptr); + blob_store.write(std::move(wb)); } catch (DB::Exception & e) { @@ -789,7 +795,7 @@ TEST_F(BlobStoreTest, DISABLED_testWriteOutOfLimitSize) size_t buffer_sizes[] = {buff_size, buff_size - 1, buff_size / 2 + 1}; for (const auto & buf_size : buffer_sizes) { - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); WriteBatch wb; char c_buff1[buf_size]; @@ -800,7 +806,7 @@ TEST_F(BlobStoreTest, DISABLED_testWriteOutOfLimitSize) wb.putPage(50, /*tag*/ 0, buff1, buf_size); - auto edit = blob_store.write(std::move(wb), nullptr); + auto edit = blob_store.write(std::move(wb)); ASSERT_EQ(edit.size(), 1); auto records = edit.getRecords(); @@ -813,7 +819,7 @@ TEST_F(BlobStoreTest, DISABLED_testWriteOutOfLimitSize) wb.clear(); wb.putPage(51, /*tag*/ 0, buff2, buf_size); - edit = blob_store.write(std::move(wb), nullptr); + edit = blob_store.write(std::move(wb)); ASSERT_EQ(edit.size(), 1); records = edit.getRecords(); @@ -832,7 +838,7 @@ TEST_F(BlobStoreTest, testBlobStoreGcStats) size_t buff_size = 1024; size_t buff_nums = 10; PageIdU64 page_id = 50; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); std::list remove_entries_idx1 = {1, 3, 4, 7, 9}; std::list remove_entries_idx2 = {6, 8}; @@ -850,7 +856,7 @@ TEST_F(BlobStoreTest, testBlobStoreGcStats) } } - auto edit = blob_store.write(std::move(wb), nullptr); + auto edit = blob_store.write(std::move(wb)); size_t idx = 0; PageEntriesV3 entries_del1, entries_del2, remain_entries; @@ -887,9 +893,9 @@ TEST_F(BlobStoreTest, testBlobStoreGcStats) // After remove `entries_del1`. // Remain entries index [0, 2, 5, 6, 8] blob_store.remove(entries_del1); - ASSERT_EQ(entries_del1.begin()->file_id, 1); + ASSERT_EQ(entries_del1.begin()->file_id, 10); - auto stat = blob_store.blob_stats.blobIdToStat(1); + auto stat = blob_store.blob_stats.blobIdToStat(10); ASSERT_EQ(stat->sm_valid_rate, 0.5); ASSERT_EQ(stat->sm_total_size, buff_size * buff_nums); @@ -912,7 +918,7 @@ TEST_F(BlobStoreTest, testBlobStoreGcStats) ASSERT_EQ(stat->sm_valid_size, buff_size * 3); // Check disk file have been truncate to right margin - String path = blob_store.getBlobFile(1)->getPath(); + String path = blob_store.getBlobFile(10)->getPath(); Poco::File blob_file_in_disk(path); ASSERT_EQ(blob_file_in_disk.getSize(), stat->sm_total_size); @@ -928,7 +934,7 @@ TEST_F(BlobStoreTest, testBlobStoreGcStats2) size_t buff_size = 1024; size_t buff_nums = 10; PageIdU64 page_id = 50; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); std::list remove_entries_idx = {0, 1, 2, 3, 4, 5, 6, 7}; WriteBatch wb; @@ -945,7 +951,7 @@ TEST_F(BlobStoreTest, testBlobStoreGcStats2) } } - auto edit = blob_store.write(std::move(wb), nullptr); + auto edit = blob_store.write(std::move(wb)); size_t idx = 0; PageEntriesV3 entries_del; @@ -967,7 +973,7 @@ TEST_F(BlobStoreTest, testBlobStoreGcStats2) // Remain entries index [8, 9]. blob_store.remove(entries_del); - auto stat = blob_store.blob_stats.blobIdToStat(1); + auto stat = blob_store.blob_stats.blobIdToStat(10); const auto & gc_stats = blob_store.getGCStats(); ASSERT_FALSE(gc_stats.empty()); @@ -977,7 +983,66 @@ TEST_F(BlobStoreTest, testBlobStoreGcStats2) ASSERT_EQ(stat->sm_valid_size, buff_size * 2); // Then we must do heavy GC - ASSERT_EQ(*gc_stats.begin(), 1); + ASSERT_EQ(*(gc_stats.begin()->second.begin()), 10); +} + +TEST_F(BlobStoreTest, testBlobStoreRaftDataGcStats) +{ + const auto file_provider = DB::tests::TiFlashTestEnv::getDefaultFileProvider(); + size_t buff_size = 1024; + size_t buff_nums = 10; + PageIdU64 page_id = 50; + PageTypeAndConfig page_type_and_config; + page_type_and_config.emplace(PageType::Normal, PageTypeConfig{.heavy_gc_valid_rate = 0.5}); + page_type_and_config.emplace(PageType::RaftData, PageTypeConfig{.heavy_gc_valid_rate = 0.05}); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); + std::list remove_entries_idx = {0, 1, 2, 3, 4, 5, 6, 7}; + + WriteBatch wb; + char c_buff[buff_size * buff_nums]; + { + for (size_t i = 0; i < buff_nums; ++i) + { + for (size_t j = 0; j < buff_size; ++j) + { + c_buff[j + i * buff_size] = static_cast((j & 0xff) + i); + } + ReadBufferPtr buff = std::make_shared(const_cast(c_buff + i * buff_size), buff_size); + wb.putPage(page_id, /* tag */ 0, buff, buff_size); + } + } + + auto edit = blob_store.write(std::move(wb), PageType::RaftData, nullptr); + + size_t idx = 0; + PageEntriesV3 entries_del; + for (const auto & record : edit.getRecords()) + { + for (size_t index : remove_entries_idx) + { + if (idx == index) + { + entries_del.emplace_back(record.entry); + break; + } + } + + idx++; + } + + // After remove `entries_del`. + // Remain entries index [8, 9]. + blob_store.remove(entries_del); + + auto stat = blob_store.blob_stats.blobIdToStat(PageTypeUtils::nextFileID(PageType::RaftData, 1)); + ASSERT_NE(stat, nullptr); + const auto & gc_stats = blob_store.getGCStats(); + // No full gc for raft data + ASSERT_TRUE(gc_stats.empty()); + + ASSERT_EQ(stat->sm_valid_rate, 0.2); + ASSERT_EQ(stat->sm_total_size, buff_size * buff_nums); + ASSERT_EQ(stat->sm_valid_size, buff_size * 2); } @@ -988,7 +1053,7 @@ TEST_F(BlobStoreTest, GC) size_t buff_nums = 21; size_t buff_size = 123; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config, page_type_and_config); char c_buff[buff_size * buff_nums]; WriteBatch wb; @@ -1005,7 +1070,7 @@ TEST_F(BlobStoreTest, GC) } ASSERT_EQ(wb.getTotalDataSize(), buff_nums * buff_size); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); ASSERT_EQ(edit.size(), buff_nums); PageDirectory::GcEntries versioned_pageid_entries; @@ -1014,13 +1079,16 @@ TEST_F(BlobStoreTest, GC) versioned_pageid_entries.emplace_back(buildV3Id(TEST_NAMESPACE_ID, page_id), 1, record.entry); } PageDirectory::GcEntriesMap gc_context; - gc_context[1] = versioned_pageid_entries; + gc_context[10] = versioned_pageid_entries; // Before we do BlobStore we need change BlobFile0 to Read-Only - auto stat = blob_store.blob_stats.blobIdToStat(1); + auto stat = blob_store.blob_stats.blobIdToStat(10); stat->changeToReadOnly(); - const auto & gc_edit = blob_store.gc(gc_context, static_cast(buff_size * buff_nums)); + using PageTypeAndGcInfo = typename u128::PageDirectoryType::PageTypeAndGcInfo; + PageTypeAndGcInfo page_type_and_gc_info; + page_type_and_gc_info.emplace_back(PageType::Normal, gc_context, static_cast(buff_size * buff_nums)); + const auto & gc_edit = blob_store.gc(page_type_and_gc_info); // Check copy_list which will apply for Mvcc ASSERT_EQ(gc_edit.size(), buff_nums); @@ -1029,19 +1097,19 @@ TEST_F(BlobStoreTest, GC) { ASSERT_EQ(record.page_id.low, page_id); auto it_entry = std::get<2>(*it); - ASSERT_EQ(record.entry.file_id, 2); + ASSERT_EQ(record.entry.file_id, 20); ASSERT_EQ(record.entry.checksum, it_entry.checksum); ASSERT_EQ(record.entry.size, it_entry.size); it++; } // Check blobfile1 - Poco::File file1(blob_store.getBlobFile(1)->getPath()); - Poco::File file2(blob_store.getBlobFile(2)->getPath()); + Poco::File file1(blob_store.getBlobFile(10)->getPath()); + Poco::File file2(blob_store.getBlobFile(20)->getPath()); ASSERT_TRUE(file1.exists()); ASSERT_TRUE(file2.exists()); ASSERT_EQ(file1.getSize(), file2.getSize()); - ASSERT_EQ(blob_store.blob_stats.blobIdToStat(2)->sm_total_size, file2.getSize()); + ASSERT_EQ(blob_store.blob_stats.blobIdToStat(20)->sm_total_size, file2.getSize()); } @@ -1056,7 +1124,7 @@ try BlobConfig config_with_small_file_limit_size; config_with_small_file_limit_size.file_limit_size = 100; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size, page_type_and_config); char c_buff[buff_size * buff_nums]; WriteBatch wb; @@ -1072,7 +1140,7 @@ try ReadBufferPtr buff = std::make_shared(const_cast(c_buff + i * buff_size), buff_size); wb.putPage(page_id, /* tag */ 0, buff, buff_size); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 1); @@ -1091,7 +1159,10 @@ try wb.clear(); } - const auto & edit = blob_store.gc(gc_context, static_cast(buff_size * buff_nums)); + using PageTypeAndGcInfo = typename u128::PageDirectoryType::PageTypeAndGcInfo; + PageTypeAndGcInfo page_type_and_gc_info; + page_type_and_gc_info.emplace_back(PageType::Normal, gc_context, static_cast(buff_size * buff_nums)); + const auto & edit = blob_store.gc(page_type_and_gc_info); ASSERT_EQ(edit.size(), buff_nums); } CATCH @@ -1107,7 +1178,7 @@ try BlobConfig config_with_small_file_limit_size; config_with_small_file_limit_size.file_limit_size = 100; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size, page_type_and_config); char c_buff[buff_size * buff_nums]; WriteBatch wb; @@ -1123,7 +1194,7 @@ try ReadBufferPtr buff = std::make_shared(const_cast(c_buff + i * buff_size), buff_size); PageFieldSizes field_sizes{1, 2, 4, 8, (buff_size - 1 - 2 - 4 - 8)}; wb.putPage(page_id, /* tag */ 0, buff, buff_size, field_sizes); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 1); @@ -1152,7 +1223,7 @@ try BlobConfig test_config; test_config.file_limit_size = 4 * MB; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, test_config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, test_config, page_type_and_config); // PUT page_id 50 into blob 1 (normal write) { @@ -1163,15 +1234,15 @@ try WriteBatch wb; ReadBufferPtr read_buff = buffer.tryGetReadBuffer(); wb.putPage(page_id, /* tag */ 0, read_buff, serialized_size); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 1); - ASSERT_EQ(records[0].entry.file_id, 1); + ASSERT_EQ(records[0].entry.file_id, 10); ASSERT_EQ(records[0].entry.offset, 0); ASSERT_EQ(records[0].entry.size, serialized_size); - const auto & stat = blob_store.blob_stats.blobIdToStat(1); + const auto & stat = blob_store.blob_stats.blobIdToStat(10); ASSERT_TRUE(stat->isNormal()); ASSERT_EQ(stat->sm_max_caps, test_config.file_limit_size - serialized_size); ASSERT_DOUBLE_EQ(stat->sm_valid_rate, 1.0); @@ -1199,16 +1270,16 @@ try WriteBatch wb; ReadBufferPtr read_buff = buffer.tryGetReadBuffer(); wb.putPage(page_id, /* tag */ 0, read_buff, serialized_size); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 1); - ASSERT_EQ(records[0].entry.file_id, 2); + ASSERT_EQ(records[0].entry.file_id, 20); ASSERT_EQ(records[0].entry.offset, 0); ASSERT_EQ(records[0].entry.size, serialized_size); // verify blobstat - const auto & stat = blob_store.blob_stats.blobIdToStat(2); + const auto & stat = blob_store.blob_stats.blobIdToStat(20); ASSERT_TRUE(stat->isReadOnly()); // large write, this stat is read only ASSERT_EQ(stat->sm_max_caps, 0); ASSERT_DOUBLE_EQ(stat->sm_valid_rate, 1.0); @@ -1232,15 +1303,15 @@ try WriteBatch wb; ReadBufferPtr read_buff = buffer.tryGetReadBuffer(); wb.putPage(page_id, /* tag */ 0, read_buff, serialized_size); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 1); - ASSERT_EQ(records[0].entry.file_id, 1); + ASSERT_EQ(records[0].entry.file_id, 10); ASSERT_EQ(records[0].entry.offset, 200); ASSERT_EQ(records[0].entry.size, 100); - const auto & stat = blob_store.blob_stats.blobIdToStat(1); + const auto & stat = blob_store.blob_stats.blobIdToStat(10); ASSERT_TRUE(stat->isNormal()); ASSERT_EQ(stat->sm_max_caps, test_config.file_limit_size - 200 - 100); ASSERT_DOUBLE_EQ(stat->sm_valid_rate, 1.0); @@ -1268,15 +1339,15 @@ try WriteBatch wb; ReadBufferPtr read_buff = buffer.tryGetReadBuffer(); wb.putPage(page_id, /* tag */ 0, read_buff, serialized_size); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 1); - ASSERT_EQ(records[0].entry.file_id, 3); + ASSERT_EQ(records[0].entry.file_id, 30); ASSERT_EQ(records[0].entry.offset, 0); ASSERT_EQ(records[0].entry.size, serialized_size); - const auto & stat = blob_store.blob_stats.blobIdToStat(3); + const auto & stat = blob_store.blob_stats.blobIdToStat(30); ASSERT_TRUE(stat->isReadOnly()); // large write, this stat is read only ASSERT_EQ(stat->sm_max_caps, 0); ASSERT_DOUBLE_EQ(stat->sm_valid_rate, 1.0); @@ -1311,30 +1382,30 @@ try } ASSERT_EQ(test_scales.size(), actual_size.size()); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 4); - // PUT page_id 54 into blob 4 (large write) + // PUT page_id 54 into blob 40 (large write) ASSERT_EQ(records[0].page_id.low, 54); - ASSERT_EQ(records[0].entry.file_id, 4); + ASSERT_EQ(records[0].entry.file_id, 40); ASSERT_EQ(records[0].entry.offset, 0); ASSERT_EQ(records[0].entry.size, actual_size[0]); - // PUT page_id 55 into blob 1 or 3 + // PUT page_id 55 into blob 10 or 30 ASSERT_EQ(records[1].page_id.low, 55); - ASSERT_TRUE(records[1].entry.file_id == 1 || records[1].entry.file_id == 3); + ASSERT_TRUE(records[1].entry.file_id == 10 || records[1].entry.file_id == 30); ASSERT_EQ(records[1].entry.size, actual_size[1]); // PUT page_id 56 into blob 5 ASSERT_EQ(records[2].page_id.low, 56); - ASSERT_TRUE(records[2].entry.file_id == 1 || records[2].entry.file_id == 3); + ASSERT_TRUE(records[2].entry.file_id == 10 || records[2].entry.file_id == 30); ASSERT_EQ(records[2].entry.size, actual_size[2]); // PUT page_id 57 into blob 6 (large write) ASSERT_EQ(records[3].page_id.low, 57); - ASSERT_EQ(records[3].entry.file_id, 5); + ASSERT_EQ(records[3].entry.file_id, 50); ASSERT_EQ(records[3].entry.offset, 0); ASSERT_EQ(records[3].entry.size, actual_size[3]); } @@ -1350,7 +1421,7 @@ try BlobConfig test_config; test_config.file_limit_size = 4 * MB; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, test_config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, test_config, page_type_and_config); // PUT page_id 50 into blob 1 (large write) { @@ -1371,16 +1442,16 @@ try return sizes; }(); wb.putPage(page_id, /* tag */ 0, read_buff, serialized_size, field_sizes); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 1); - ASSERT_EQ(records[0].entry.file_id, 1); + ASSERT_EQ(records[0].entry.file_id, 10); ASSERT_EQ(records[0].entry.offset, 0); ASSERT_EQ(records[0].entry.size, serialized_size); // verify blobstat - const auto & stat = blob_store.blob_stats.blobIdToStat(1); + const auto & stat = blob_store.blob_stats.blobIdToStat(10); ASSERT_TRUE(stat->isReadOnly()); // large write, this stat is read only ASSERT_EQ(stat->sm_max_caps, 0); ASSERT_DOUBLE_EQ(stat->sm_valid_rate, 1.0); @@ -1439,7 +1510,7 @@ try BlobConfig test_config; test_config.file_limit_size = 4 * MB; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, test_config); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, test_config, page_type_and_config); // PUT page_id 50 into blob 1 (normal write) { @@ -1450,15 +1521,15 @@ try WriteBatch wb; ReadBufferPtr read_buff = buffer.tryGetReadBuffer(); wb.putPage(page_id, /* tag */ 0, read_buff, serialized_size); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 1); - ASSERT_EQ(records[0].entry.file_id, 1); + ASSERT_EQ(records[0].entry.file_id, 10); ASSERT_EQ(records[0].entry.offset, 0); ASSERT_EQ(records[0].entry.size, serialized_size); - const auto & stat = blob_store.blob_stats.blobIdToStat(1); + const auto & stat = blob_store.blob_stats.blobIdToStat(10); ASSERT_TRUE(stat->isNormal()); ASSERT_EQ(stat->sm_max_caps, test_config.file_limit_size - serialized_size); ASSERT_DOUBLE_EQ(stat->sm_valid_rate, 1.0); @@ -1494,7 +1565,7 @@ try FailPointHelper::enableFailPoint(FailPoints::exception_after_large_write_exceed, static_cast(test_config.file_limit_size)); try { - blob_store.write(std::move(wb), nullptr); + blob_store.write(std::move(wb)); } catch (DB::Exception & e) { @@ -1502,7 +1573,7 @@ try } // no new-added stat - ASSERT_EQ(blob_store.blob_stats.blobIdToStat(2, /*ignore_not_exist*/ true), nullptr); + ASSERT_EQ(blob_store.blob_stats.blobIdToStat(20, /*ignore_not_exist*/ true), nullptr); page_id++; } @@ -1518,7 +1589,7 @@ try BlobConfig config_with_small_file_limit_size; config_with_small_file_limit_size.file_limit_size = 400; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size, page_type_and_config); { size_t size_500 = 500; @@ -1527,7 +1598,7 @@ try WriteBatch wb; ReadBufferPtr buff = std::make_shared(const_cast(c_buff), size_500); wb.putPage(page_id, /* tag */ 0, buff, size_500); - PageEntriesEdit edit = blob_store.write(std::move(wb), nullptr); + PageEntriesEdit edit = blob_store.write(std::move(wb)); const auto & gc_info = blob_store.getGCStats(); ASSERT_TRUE(gc_info.empty()); @@ -1553,26 +1624,26 @@ try PageEntryV3 entry_from_write; { - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size, page_type_and_config); size_t size_500 = 500; char c_buff[size_500]; WriteBatch wb; ReadBufferPtr buff = std::make_shared(const_cast(c_buff), size_500); wb.putPage(page_id, /* tag */ 0, buff, size_500); - auto edit = blob_store.write(std::move(wb), nullptr); + auto edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 1); entry_from_write = records[0].entry; } { - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size, page_type_and_config); blob_store.registerPaths(); blob_store.blob_stats.restoreByEntry(entry_from_write); blob_store.blob_stats.restore(); - const auto & stat = blob_store.blob_stats.blobIdToStat(1); + const auto & stat = blob_store.blob_stats.blobIdToStat(10); ASSERT_EQ(stat->sm_max_caps, 0); ASSERT_DOUBLE_EQ(stat->sm_valid_rate, 1.0); ASSERT_EQ(stat->sm_valid_size, 500); @@ -1593,7 +1664,7 @@ try PageEntryV3 entry_from_write1; PageEntryV3 entry_from_write2; { - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size, page_type_and_config); size_t size_500 = 500; size_t size_200 = 200; char c_buff1[size_500]; @@ -1604,7 +1675,7 @@ try ReadBufferPtr buff2 = std::make_shared(const_cast(c_buff2), size_200); wb.putPage(page_id, /* tag */ 0, buff1, size_500); wb.putPage(page_id + 1, /* tag */ 0, buff2, size_200); - auto edit = blob_store.write(std::move(wb), nullptr); + auto edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 2); entry_from_write1 = records[0].entry; @@ -1612,17 +1683,17 @@ try ASSERT_EQ(entry_from_write1.size, 500); ASSERT_EQ(entry_from_write2.size, 200); - ASSERT_TRUE(blob_store.blob_stats.blobIdToStat(1)->isNormal()); + ASSERT_TRUE(blob_store.blob_stats.blobIdToStat(10)->isNormal()); } config_with_small_file_limit_size.file_limit_size = 400; { - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size, page_type_and_config); blob_store.registerPaths(); blob_store.blob_stats.restoreByEntry(entry_from_write1); blob_store.blob_stats.restoreByEntry(entry_from_write2); blob_store.blob_stats.restore(); - const auto & stat = blob_store.blob_stats.blobIdToStat(1); + const auto & stat = blob_store.blob_stats.blobIdToStat(10); ASSERT_EQ(stat->sm_max_caps, 0); ASSERT_DOUBLE_EQ(stat->sm_valid_rate, 1.0); ASSERT_EQ(stat->sm_valid_size, 700); @@ -1638,10 +1709,10 @@ try WriteBatch wb; ReadBufferPtr buff = std::make_shared(const_cast(c_buff), size_100); wb.putPage(page_id, /* tag */ 0, buff, size_100); - auto edit = blob_store.write(std::move(wb), nullptr); + auto edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 1); - ASSERT_EQ(records[0].entry.file_id, 2); + ASSERT_EQ(records[0].entry.file_id, 20); ASSERT_EQ(getTotalStatsNum(blob_store.blob_stats.getStats()), 2); // remove one shot blob file @@ -1665,7 +1736,7 @@ try PageEntryV3 entry_from_write1; PageEntryV3 entry_from_write2; PageEntryV3 entry_from_write3; - auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size); + auto blob_store = BlobStore(getCurrentTestName(), file_provider, delegator, config_with_small_file_limit_size, page_type_and_config); { size_t size_100 = 100; size_t size_500 = 500; @@ -1681,14 +1752,14 @@ try wb.putPage(page_id1, /* tag */ 0, buff1, size_100); wb.putPage(page_id2, /* tag */ 0, buff2, size_500); wb.putPage(page_id3, /* tag */ 0, buff3, size_200); - auto edit = blob_store.write(std::move(wb), nullptr); + auto edit = blob_store.write(std::move(wb)); const auto & records = edit.getRecords(); ASSERT_EQ(records.size(), 3); entry_from_write1 = records[0].entry; entry_from_write2 = records[1].entry; entry_from_write3 = records[2].entry; - ASSERT_TRUE(blob_store.blob_stats.blobIdToStat(1)->isNormal()); + ASSERT_TRUE(blob_store.blob_stats.blobIdToStat(10)->isNormal()); } config_with_small_file_limit_size.file_limit_size = 400; @@ -1696,9 +1767,9 @@ try { blob_store.reloadConfig(config_with_small_file_limit_size); - Poco::File file1(blob_store.getBlobFile(1)->getPath()); + Poco::File file1(blob_store.getBlobFile(10)->getPath()); ASSERT_EQ(file1.getSize(), 800); - ASSERT_TRUE(blob_store.blob_stats.blobIdToStat(1)->isNormal()); // BlobStat type doesn't change after reload + ASSERT_TRUE(blob_store.blob_stats.blobIdToStat(10)->isNormal()); // BlobStat type doesn't change after reload blob_store.remove({entry_from_write3}); auto blob_need_gc = blob_store.getGCStats(); ASSERT_EQ(blob_need_gc.size(), 0); @@ -1711,11 +1782,14 @@ try PageDirectory::GcEntriesMap gc_context; PageDirectory::GcEntries versioned_pageid_entries; versioned_pageid_entries.emplace_back(page_id2, 1, entry_from_write2); - gc_context[1] = versioned_pageid_entries; - PageEntriesEdit gc_edit = blob_store.gc(gc_context, 500); + gc_context[10] = versioned_pageid_entries; + using PageTypeAndGcInfo = typename u128::PageDirectoryType::PageTypeAndGcInfo; + PageTypeAndGcInfo page_type_and_gc_info; + page_type_and_gc_info.emplace_back(PageType::Normal, gc_context, 500); + PageEntriesEdit gc_edit = blob_store.gc(page_type_and_gc_info); const auto & records = gc_edit.getRecords(); ASSERT_EQ(records.size(), 1); - ASSERT_EQ(records[0].entry.file_id, 2); + ASSERT_EQ(records[0].entry.file_id, 20); ASSERT_EQ(records[0].entry.size, 500); } } diff --git a/dbms/src/Storages/Page/V3/tests/gtest_page_storage.cpp b/dbms/src/Storages/Page/V3/tests/gtest_page_storage.cpp index c975bcf6e2d..eed9e4535c7 100644 --- a/dbms/src/Storages/Page/V3/tests/gtest_page_storage.cpp +++ b/dbms/src/Storages/Page/V3/tests/gtest_page_storage.cpp @@ -350,7 +350,7 @@ try // Make sure in-disk data is encrypted. - RandomAccessFilePtr file_read = std::make_shared(fmt::format("{}/{}{}", getTemporaryPath(), BlobFile::BLOB_PREFIX_NAME, 1), + RandomAccessFilePtr file_read = std::make_shared(fmt::format("{}/{}{}", getTemporaryPath(), BlobFile::BLOB_PREFIX_NAME, PageTypeUtils::nextFileID(PageType::Normal, 1)), -1, nullptr); file_read->pread(c_buff_read, buf_sz, 0); @@ -1093,8 +1093,8 @@ TEST_F(PageStorageWith2PagesTest, RemoveReadOnlyFile) cfg.blob_heavy_gc_valid_rate = 1.0; page_storage = reopenWithConfig(cfg); - auto blob_file1 = Poco::File(getTemporaryPath() + "/blobfile_1"); - auto blob_file2 = Poco::File(getTemporaryPath() + "/blobfile_2"); + auto blob_file1 = Poco::File(getTemporaryPath() + "/blobfile_10"); + auto blob_file2 = Poco::File(getTemporaryPath() + "/blobfile_20"); ASSERT_EQ(blob_file1.exists(), true); ASSERT_EQ(blob_file2.exists(), false); @@ -1125,8 +1125,8 @@ TEST_F(PageStorageWith2PagesTest, ReuseEmptyFileAfterRestart) cfg.blob_heavy_gc_valid_rate = 1.0; page_storage = reopenWithConfig(cfg); - auto blob_file1 = Poco::File(getTemporaryPath() + "/blobfile_1"); - auto blob_file2 = Poco::File(getTemporaryPath() + "/blobfile_2"); + auto blob_file1 = Poco::File(getTemporaryPath() + "/blobfile_10"); + auto blob_file2 = Poco::File(getTemporaryPath() + "/blobfile_20"); ASSERT_EQ(blob_file1.exists(), true); ASSERT_EQ(blob_file2.exists(), false); @@ -1592,7 +1592,7 @@ try page_storage->write(std::move(batch)); } - auto blob_file = Poco::File(getTemporaryPath() + "/blobfile_1"); + auto blob_file = Poco::File(getTemporaryPath() + "/blobfile_10"); page_storage = reopenWithConfig(config); EXPECT_GT(blob_file.getSize(), 0); diff --git a/dbms/src/Storages/Transaction/ProxyFFI.cpp b/dbms/src/Storages/Transaction/ProxyFFI.cpp index 4f4c0968c53..abec6febbcb 100644 --- a/dbms/src/Storages/Transaction/ProxyFFI.cpp +++ b/dbms/src/Storages/Transaction/ProxyFFI.cpp @@ -273,7 +273,7 @@ void HandleConsumeWriteBatch(const EngineStoreServerWrap * server, RawVoidPtr pt auto uni_ps = server->tmt->getContext().getWriteNodePageStorage(); auto * wb = reinterpret_cast(ptr); LOG_TRACE(&Poco::Logger::get("ProxyFFI"), fmt::format("FFI consume write batch {}", wb->toString())); - uni_ps->write(std::move(*wb)); + uni_ps->write(std::move(*wb), DB::PS::V3::PageType::RaftData, nullptr); wb->clear(); } catch (...) diff --git a/dbms/src/Storages/Transaction/tests/gtest_kvstore_fast_add_peer.cpp b/dbms/src/Storages/Transaction/tests/gtest_kvstore_fast_add_peer.cpp index b81ff68b808..405aa05ef38 100644 --- a/dbms/src/Storages/Transaction/tests/gtest_kvstore_fast_add_peer.cpp +++ b/dbms/src/Storages/Transaction/tests/gtest_kvstore_fast_add_peer.cpp @@ -174,7 +174,7 @@ void persistAfterWrite(Context & ctx, KVStore & kvs, std::unique_ptrdoApply(kvs, ctx.getTMTContext(), cond, region_id, index); auto region = proxy_instance->getRegion(region_id); auto wb = region->persistMeta(); - page_storage->write(std::move(wb), nullptr); + page_storage->write(std::move(wb)); // There shall be data to flush. ASSERT_EQ(kvs.needFlushRegionData(region_id, ctx.getTMTContext()), true); ASSERT_EQ(kvs.tryFlushRegionData(region_id, false, false, ctx.getTMTContext(), 0, 0), true);