From b62e8fa975508bc010cefe4e7c4a981edaf6d7b7 Mon Sep 17 00:00:00 2001 From: "sean.liu" Date: Thu, 22 Dec 2022 16:52:15 +0800 Subject: [PATCH] fix conflict --- store/copr/BUILD.bazel | 4 -- store/copr/batch_coprocessor.go | 85 ++------------------------------- 2 files changed, 3 insertions(+), 86 deletions(-) diff --git a/store/copr/BUILD.bazel b/store/copr/BUILD.bazel index 0160ba63ce6c5..1de1a6ba66af6 100644 --- a/store/copr/BUILD.bazel +++ b/store/copr/BUILD.bazel @@ -78,11 +78,7 @@ go_test( "//util/paging", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_kvproto//pkg/coprocessor", -<<<<<<< HEAD -======= "@com_github_pingcap_kvproto//pkg/mpp", - "@com_github_stathat_consistent//:consistent", ->>>>>>> aeccf77637 (*: optimize mpp probe (#39932)) "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//config", "@com_github_tikv_client_go_v2//testutils", diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index 8dfc7a203d188..9bc53d3aabc45 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -502,43 +502,12 @@ func balanceBatchCopTask(ctx context.Context, kvStore *kvStore, originalTasks [] return ret } -<<<<<<< HEAD -func buildBatchCopTasksForNonPartitionedTable(bo *backoff.Backoffer, store *kvStore, ranges *KeyRanges, storeType kv.StoreType, mppStoreLastFailTime *sync.Map, ttl time.Duration, balanceWithContinuity bool, balanceContinuousRegionCount int64) ([]*batchCopTask, error) { - return buildBatchCopTasksCore(bo, store, []*KeyRanges{ranges}, storeType, mppStoreLastFailTime, ttl, balanceWithContinuity, balanceContinuousRegionCount) -} - -func buildBatchCopTasksForPartitionedTable(bo *backoff.Backoffer, store *kvStore, rangesForEachPhysicalTable []*KeyRanges, storeType kv.StoreType, mppStoreLastFailTime *sync.Map, ttl time.Duration, balanceWithContinuity bool, balanceContinuousRegionCount int64, partitionIDs []int64) ([]*batchCopTask, error) { - batchTasks, err := buildBatchCopTasksCore(bo, store, rangesForEachPhysicalTable, storeType, mppStoreLastFailTime, ttl, balanceWithContinuity, balanceContinuousRegionCount) -======= -func buildBatchCopTasksForNonPartitionedTable(bo *backoff.Backoffer, - store *kvStore, - ranges *KeyRanges, - storeType kv.StoreType, - isMPP bool, - ttl time.Duration, - balanceWithContinuity bool, - balanceContinuousRegionCount int64) ([]*batchCopTask, error) { - if config.GetGlobalConfig().DisaggregatedTiFlash { - return buildBatchCopTasksConsistentHash(bo, store, []*KeyRanges{ranges}, storeType, isMPP, ttl, balanceWithContinuity, balanceContinuousRegionCount) - } +func buildBatchCopTasksForNonPartitionedTable(bo *backoff.Backoffer, store *kvStore, ranges *KeyRanges, storeType kv.StoreType, isMPP bool, ttl time.Duration, balanceWithContinuity bool, balanceContinuousRegionCount int64) ([]*batchCopTask, error) { return buildBatchCopTasksCore(bo, store, []*KeyRanges{ranges}, storeType, isMPP, ttl, balanceWithContinuity, balanceContinuousRegionCount) } -func buildBatchCopTasksForPartitionedTable(bo *backoff.Backoffer, - store *kvStore, - rangesForEachPhysicalTable []*KeyRanges, - storeType kv.StoreType, - isMPP bool, - ttl time.Duration, - balanceWithContinuity bool, - balanceContinuousRegionCount int64, - partitionIDs []int64) (batchTasks []*batchCopTask, err error) { - if config.GetGlobalConfig().DisaggregatedTiFlash { - batchTasks, err = buildBatchCopTasksConsistentHash(bo, store, rangesForEachPhysicalTable, storeType, isMPP, ttl, balanceWithContinuity, balanceContinuousRegionCount) - } else { - batchTasks, err = buildBatchCopTasksCore(bo, store, rangesForEachPhysicalTable, storeType, isMPP, ttl, balanceWithContinuity, balanceContinuousRegionCount) - } ->>>>>>> aeccf77637 (*: optimize mpp probe (#39932)) +func buildBatchCopTasksForPartitionedTable(bo *backoff.Backoffer, store *kvStore, rangesForEachPhysicalTable []*KeyRanges, storeType kv.StoreType, isMPP bool, ttl time.Duration, balanceWithContinuity bool, balanceContinuousRegionCount int64, partitionIDs []int64) ([]*batchCopTask, error) { + batchTasks, err := buildBatchCopTasksCore(bo, store, rangesForEachPhysicalTable, storeType, isMPP, ttl, balanceWithContinuity, balanceContinuousRegionCount) if err != nil { return nil, err } @@ -547,54 +516,6 @@ func buildBatchCopTasksForPartitionedTable(bo *backoff.Backoffer, return batchTasks, nil } -<<<<<<< HEAD -======= -func buildBatchCopTasksConsistentHash(bo *backoff.Backoffer, store *kvStore, rangesForEachPhysicalTable []*KeyRanges, storeType kv.StoreType, isMPP bool, ttl time.Duration, balanceWithContinuity bool, balanceContinuousRegionCount int64) ([]*batchCopTask, error) { - batchTasks, err := buildBatchCopTasksCore(bo, store, rangesForEachPhysicalTable, storeType, isMPP, ttl, balanceWithContinuity, balanceContinuousRegionCount) - if err != nil { - return nil, err - } - cache := store.GetRegionCache() - stores, err := cache.GetTiFlashComputeStores(bo.TiKVBackoffer()) - if err != nil { - return nil, err - } - if len(stores) == 0 { - return nil, errors.New("No available tiflash_compute node") - } - - hasher := consistent.New() - for _, store := range stores { - hasher.Add(store.GetAddr()) - } - for _, task := range batchTasks { - addr, err := hasher.Get(task.storeAddr) - if err != nil { - return nil, err - } - var store *tikv.Store - for _, s := range stores { - if s.GetAddr() == addr { - store = s - break - } - } - if store == nil { - return nil, errors.New("cannot find tiflash_compute store: " + addr) - } - - task.storeAddr = addr - task.ctx.Store = store - task.ctx.Addr = addr - } - logutil.BgLogger().Info("build batchCop tasks for disaggregated tiflash using ConsistentHash done.", zap.Int("len(tasks)", len(batchTasks))) - for _, task := range batchTasks { - logutil.BgLogger().Debug("batchTasks detailed info", zap.String("addr", task.storeAddr), zap.Int("RegionInfo number", len(task.regionInfos))) - } - return batchTasks, nil -} - ->>>>>>> aeccf77637 (*: optimize mpp probe (#39932)) // When `partitionIDs != nil`, it means that buildBatchCopTasksCore is constructing a batch cop tasks for PartitionTableScan. // At this time, `len(rangesForEachPhysicalTable) == len(partitionIDs)` and `rangesForEachPhysicalTable[i]` is for partition `partitionIDs[i]`. // Otherwise, `rangesForEachPhysicalTable[0]` indicates the range for the single physical table.