diff --git a/contrib/tipb b/contrib/tipb index f85f989ac0d..72b51a7f16e 160000 --- a/contrib/tipb +++ b/contrib/tipb @@ -1 +1 @@ -Subproject commit f85f989ac0d73c53f36369f9f2d0707590e3b917 +Subproject commit 72b51a7f16e9b979c04abbe0cb6ff5d1dc548e6a diff --git a/dbms/src/Storages/StorageDisaggregated.cpp b/dbms/src/Storages/StorageDisaggregated.cpp index 33e553e67ea..d3038fe829b 100644 --- a/dbms/src/Storages/StorageDisaggregated.cpp +++ b/dbms/src/Storages/StorageDisaggregated.cpp @@ -64,9 +64,10 @@ void StorageDisaggregated::read( return readThroughS3(exec_context, group_builder, db_context, num_streams); } -std::vector StorageDisaggregated::buildRemoteTableRanges() +std::tuple, UInt64> StorageDisaggregated::buildRemoteTableRanges() { std::unordered_map all_remote_regions; + UInt64 region_num = 0; for (auto physical_table_id : table_scan.getPhysicalTableIDs()) { const auto & table_regions_info = context.getDAGContext()->getTableRegionsInfoByTableID(physical_table_id); @@ -74,6 +75,7 @@ std::vector StorageDisaggregated::buildR RUNTIME_CHECK_MSG( table_regions_info.local_regions.empty(), "in disaggregated_compute_mode, local_regions should be empty"); + region_num += table_regions_info.remote_regions.size(); for (const auto & reg : table_regions_info.remote_regions) all_remote_regions[physical_table_id].emplace_back(std::cref(reg)); } @@ -87,7 +89,7 @@ std::vector StorageDisaggregated::buildR auto key_ranges = RemoteRequest::buildKeyRanges(remote_regions); remote_table_ranges.emplace_back(RemoteTableRange{physical_table_id, key_ranges}); } - return remote_table_ranges; + return std::make_tuple(std::move(remote_table_ranges), region_num); } std::vector StorageDisaggregated::buildBatchCopTasks( diff --git a/dbms/src/Storages/StorageDisaggregated.h b/dbms/src/Storages/StorageDisaggregated.h index 5b6fe907900..70a7753e23c 100644 --- a/dbms/src/Storages/StorageDisaggregated.h +++ b/dbms/src/Storages/StorageDisaggregated.h @@ -125,7 +125,7 @@ class StorageDisaggregated : public IStorage size_t num_streams); using RemoteTableRange = std::pair; - std::vector buildRemoteTableRanges(); + std::tuple, UInt64> buildRemoteTableRanges(); std::vector buildBatchCopTasks( const std::vector & remote_table_ranges, const pingcap::kv::LabelFilter & label_filter); diff --git a/dbms/src/Storages/StorageDisaggregatedRemote.cpp b/dbms/src/Storages/StorageDisaggregatedRemote.cpp index 883c276eec1..f0b8c492f0f 100644 --- a/dbms/src/Storages/StorageDisaggregatedRemote.cpp +++ b/dbms/src/Storages/StorageDisaggregatedRemote.cpp @@ -199,7 +199,8 @@ DM::SegmentReadTasks StorageDisaggregated::buildReadTask( // First split the read task for different write nodes. // For each write node, a BatchCopTask is built. { - auto remote_table_ranges = buildRemoteTableRanges(); + auto [remote_table_ranges, region_num] = buildRemoteTableRanges(); + scan_context->setRegionNumOfCurrentInstance(region_num); // only send to tiflash node with label [{"engine":"tiflash"}, {"engine-role":"write"}] const auto label_filter = pingcap::kv::labelFilterOnlyTiFlashWriteNode; batch_cop_tasks = buildBatchCopTasks(remote_table_ranges, label_filter); @@ -227,7 +228,7 @@ DM::SegmentReadTasks StorageDisaggregated::buildReadTask( { // TODO } - + scan_context->num_segments = output_seg_tasks.size(); return output_seg_tasks; }