From b19dcc410266634e0875fc9d57c38be606abe8f6 Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Wed, 22 Jun 2022 01:29:30 +0800 Subject: [PATCH] *: replace `Id` with `ID` Signed-off-by: lhy1024 --- server/api/region_test.go | 36 ++++++++++----------- server/core/basic_cluster.go | 2 +- server/core/region.go | 4 +-- server/core/region_option.go | 8 ++--- server/core/region_tree_test.go | 2 +- server/schedule/checker/merge_checker.go | 2 +- server/schedule/checker/replica_strategy.go | 2 +- server/schedule/test_util.go | 4 +-- server/schedulers/balance_region.go | 2 +- server/schedulers/grant_hot_region.go | 2 +- server/schedulers/hot_region.go | 2 +- server/schedulers/shuffle_hot_region.go | 2 +- server/schedulers/shuffle_region.go | 2 +- tests/server/cluster/cluster_work_test.go | 2 +- tools/pd-simulator/simulator/event.go | 2 +- tools/pd-simulator/simulator/raft.go | 6 ++-- 16 files changed, 40 insertions(+), 40 deletions(-) diff --git a/server/api/region_test.go b/server/api/region_test.go index 7fc80254132..816fefcc092 100644 --- a/server/api/region_test.go +++ b/server/api/region_test.go @@ -456,57 +456,57 @@ func (s *testGetRegionSuite) TestScanRegionByKeys(c *C) { mustRegionHeartbeat(c, s.svr, r) url := fmt.Sprintf("%s/regions/key?key=%s", s.urlPrefix, "b") - regionIds := []uint64{3, 4, 5, 99} + regionIDs := []uint64{3, 4, 5, 99} regions := &RegionsInfo{} err := tu.ReadGetJSON(c, testDialClient, url, regions) c.Assert(err, IsNil) - c.Assert(regionIds, HasLen, regions.Count) - for i, v := range regionIds { + c.Assert(regionIDs, HasLen, regions.Count) + for i, v := range regionIDs { c.Assert(v, Equals, regions.Regions[i].ID) } url = fmt.Sprintf("%s/regions/key?key=%s", s.urlPrefix, "d") - regionIds = []uint64{4, 5, 99} + regionIDs = []uint64{4, 5, 99} regions = &RegionsInfo{} err = tu.ReadGetJSON(c, testDialClient, url, regions) c.Assert(err, IsNil) - c.Assert(regionIds, HasLen, regions.Count) - for i, v := range regionIds { + c.Assert(regionIDs, HasLen, regions.Count) + for i, v := range regionIDs { c.Assert(v, Equals, regions.Regions[i].ID) } url = fmt.Sprintf("%s/regions/key?key=%s", s.urlPrefix, "g") - regionIds = []uint64{5, 99} + regionIDs = []uint64{5, 99} regions = &RegionsInfo{} err = tu.ReadGetJSON(c, testDialClient, url, regions) c.Assert(err, IsNil) - c.Assert(regionIds, HasLen, regions.Count) - for i, v := range regionIds { + c.Assert(regionIDs, HasLen, regions.Count) + for i, v := range regionIDs { c.Assert(v, Equals, regions.Regions[i].ID) } url = fmt.Sprintf("%s/regions/key?end_key=%s", s.urlPrefix, "e") - regionIds = []uint64{2, 3, 4} + regionIDs = []uint64{2, 3, 4} regions = &RegionsInfo{} err = tu.ReadGetJSON(c, testDialClient, url, regions) c.Assert(err, IsNil) - c.Assert(len(regionIds), Equals, regions.Count) - for i, v := range regionIds { + c.Assert(len(regionIDs), Equals, regions.Count) + for i, v := range regionIDs { c.Assert(v, Equals, regions.Regions[i].ID) } url = fmt.Sprintf("%s/regions/key?key=%s&end_key=%s", s.urlPrefix, "b", "g") - regionIds = []uint64{3, 4} + regionIDs = []uint64{3, 4} regions = &RegionsInfo{} err = tu.ReadGetJSON(c, testDialClient, url, regions) c.Assert(err, IsNil) - c.Assert(len(regionIds), Equals, regions.Count) - for i, v := range regionIds { + c.Assert(len(regionIDs), Equals, regions.Count) + for i, v := range regionIDs { c.Assert(v, Equals, regions.Regions[i].ID) } url = fmt.Sprintf("%s/regions/key?key=%s&end_key=%s", s.urlPrefix, "b", []byte{0xFF, 0xFF, 0xCC}) - regionIds = []uint64{3, 4, 5, 99} + regionIDs = []uint64{3, 4, 5, 99} regions = &RegionsInfo{} err = tu.ReadGetJSON(c, testDialClient, url, regions) c.Assert(err, IsNil) - c.Assert(len(regionIds), Equals, regions.Count) - for i, v := range regionIds { + c.Assert(len(regionIDs), Equals, regions.Count) + for i, v := range regionIDs { c.Assert(v, Equals, regions.Regions[i].ID) } } diff --git a/server/core/basic_cluster.go b/server/core/basic_cluster.go index 9100636d376..6509857398d 100644 --- a/server/core/basic_cluster.go +++ b/server/core/basic_cluster.go @@ -95,7 +95,7 @@ func (bc *BasicCluster) GetRegionStores(region *RegionInfo) []*StoreInfo { bc.RLock() defer bc.RUnlock() var Stores []*StoreInfo - for id := range region.GetStoreIds() { + for id := range region.GetStoreIDs() { if store := bc.Stores.GetStore(id); store != nil { Stores = append(Stores, store) } diff --git a/server/core/region.go b/server/core/region.go index cc688712ad8..8114a0e411d 100644 --- a/server/core/region.go +++ b/server/core/region.go @@ -349,8 +349,8 @@ func (r *RegionInfo) GetStoreLearner(storeID uint64) *metapb.Peer { return nil } -// GetStoreIds returns a map indicate the region distributed. -func (r *RegionInfo) GetStoreIds() map[uint64]struct{} { +// GetStoreIDs returns a map indicate the region distributed. +func (r *RegionInfo) GetStoreIDs() map[uint64]struct{} { peers := r.meta.GetPeers() stores := make(map[uint64]struct{}, len(peers)) for _, peer := range peers { diff --git a/server/core/region_option.go b/server/core/region_option.go index b06c784c0f6..c0d2204de2b 100644 --- a/server/core/region_option.go +++ b/server/core/region_option.go @@ -96,14 +96,14 @@ func WithNewRegionID(id uint64) RegionCreateOption { } } -// WithNewPeerIds sets new ids for peers. -func WithNewPeerIds(peerIds ...uint64) RegionCreateOption { +// WithNewPeerIDs sets new ids for peers. +func WithNewPeerIDs(peerIDs ...uint64) RegionCreateOption { return func(region *RegionInfo) { - if len(peerIds) != len(region.meta.GetPeers()) { + if len(peerIDs) != len(region.meta.GetPeers()) { return } for i, p := range region.meta.GetPeers() { - p.Id = peerIds[i] + p.Id = peerIDs[i] } } } diff --git a/server/core/region_tree_test.go b/server/core/region_tree_test.go index 0f813717fcb..7538f04dd74 100644 --- a/server/core/region_tree_test.go +++ b/server/core/region_tree_test.go @@ -80,7 +80,7 @@ func TestRegionInfo(t *testing.T) { r = r.Clone(WithEndKey([]byte{1})) re.Regexp(".*EndKey Changed.*", DiffRegionKeyInfo(r, info)) - stores := r.GetStoreIds() + stores := r.GetStoreIDs() re.Len(stores, int(n)) for i := uint64(0); i < n; i++ { _, ok := stores[i] diff --git a/server/schedule/checker/merge_checker.go b/server/schedule/checker/merge_checker.go index e5624d48d2d..fcfc087a3cb 100644 --- a/server/schedule/checker/merge_checker.go +++ b/server/schedule/checker/merge_checker.go @@ -282,7 +282,7 @@ func isTableIDSame(region, adjacent *core.RegionInfo) bool { // while the source region has no peer on it. This is to prevent from bringing // any other peer into an offline store to slow down the offline process. func checkPeerStore(cluster schedule.Cluster, region, adjacent *core.RegionInfo) bool { - regionStoreIDs := region.GetStoreIds() + regionStoreIDs := region.GetStoreIDs() for _, peer := range adjacent.GetPeers() { storeID := peer.GetStoreId() store := cluster.GetStore(storeID) diff --git a/server/schedule/checker/replica_strategy.go b/server/schedule/checker/replica_strategy.go index 6ccad30a32d..4fb9d87410e 100644 --- a/server/schedule/checker/replica_strategy.go +++ b/server/schedule/checker/replica_strategy.go @@ -53,7 +53,7 @@ func (s *ReplicaStrategy) SelectStoreToAdd(coLocationStores []*core.StoreInfo, e // The reason for it is to prevent the non-optimal replica placement due // to the short-term state, resulting in redundant scheduling. filters := []filter.Filter{ - filter.NewExcludedFilter(s.checkerName, nil, s.region.GetStoreIds()), + filter.NewExcludedFilter(s.checkerName, nil, s.region.GetStoreIDs()), filter.NewStorageThresholdFilter(s.checkerName), filter.NewSpecialUseFilter(s.checkerName), &filter.StoreStateFilter{ActionScope: s.checkerName, MoveRegion: true, AllowTemporaryStates: true}, diff --git a/server/schedule/test_util.go b/server/schedule/test_util.go index b6484042d3b..67b6f891474 100644 --- a/server/schedule/test_util.go +++ b/server/schedule/test_util.go @@ -79,10 +79,10 @@ func ApplyOperator(mc *mockcluster.Cluster, op *operator.Operator) { region = ApplyOperatorStep(region, op) } mc.PutRegion(region) - for id := range region.GetStoreIds() { + for id := range region.GetStoreIDs() { mc.UpdateStoreStatus(id) } - for id := range origin.GetStoreIds() { + for id := range origin.GetStoreIDs() { mc.UpdateStoreStatus(id) } } diff --git a/server/schedulers/balance_region.go b/server/schedulers/balance_region.go index f8d4679acbc..033446ae380 100644 --- a/server/schedulers/balance_region.go +++ b/server/schedulers/balance_region.go @@ -216,7 +216,7 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*operator. // transferPeer selects the best store to create a new peer to replace the old peer. func (s *balanceRegionScheduler) transferPeer(plan *balancePlan) *operator.Operator { filters := []filter.Filter{ - filter.NewExcludedFilter(s.GetName(), nil, plan.region.GetStoreIds()), + filter.NewExcludedFilter(s.GetName(), nil, plan.region.GetStoreIDs()), filter.NewPlacementSafeguard(s.GetName(), plan.GetOpts(), plan.GetBasicCluster(), plan.GetRuleManager(), plan.region, plan.source), filter.NewRegionScoreFilter(s.GetName(), plan.source, plan.GetOpts()), filter.NewSpecialUseFilter(s.GetName()), diff --git a/server/schedulers/grant_hot_region.go b/server/schedulers/grant_hot_region.go index 4decd1b1340..086bd7e26e2 100644 --- a/server/schedulers/grant_hot_region.go +++ b/server/schedulers/grant_hot_region.go @@ -361,7 +361,7 @@ func (s *grantHotRegionScheduler) transfer(cluster schedule.Cluster, regionID ui candidate = []uint64{s.conf.GetStoreLeaderID()} } else { filters = append(filters, &filter.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true}, - filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIds(), srcRegion.GetStoreIds())) + filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIDs(), srcRegion.GetStoreIDs())) candidate = s.conf.StoreIDs } for _, storeID := range candidate { diff --git a/server/schedulers/hot_region.go b/server/schedulers/hot_region.go index 3ca40a7133f..f6a38e8ecb5 100644 --- a/server/schedulers/hot_region.go +++ b/server/schedulers/hot_region.go @@ -728,7 +728,7 @@ func (bs *balanceSolver) filterDstStores() map[uint64]*statistics.StoreLoadDetai case movePeer: filters = []filter.Filter{ &filter.StoreStateFilter{ActionScope: bs.sche.GetName(), MoveRegion: true}, - filter.NewExcludedFilter(bs.sche.GetName(), bs.cur.region.GetStoreIds(), bs.cur.region.GetStoreIds()), + filter.NewExcludedFilter(bs.sche.GetName(), bs.cur.region.GetStoreIDs(), bs.cur.region.GetStoreIDs()), filter.NewSpecialUseFilter(bs.sche.GetName(), filter.SpecialUseHotRegion), filter.NewPlacementSafeguard(bs.sche.GetName(), bs.GetOpts(), bs.GetBasicCluster(), bs.GetRuleManager(), bs.cur.region, srcStore), } diff --git a/server/schedulers/shuffle_hot_region.go b/server/schedulers/shuffle_hot_region.go index 55a894f3c70..d33f3a5159b 100644 --- a/server/schedulers/shuffle_hot_region.go +++ b/server/schedulers/shuffle_hot_region.go @@ -179,7 +179,7 @@ func (s *shuffleHotRegionScheduler) randomSchedule(cluster schedule.Cluster, loa filters := []filter.Filter{ &filter.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true}, - filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIds(), srcRegion.GetStoreIds()), + filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIDs(), srcRegion.GetStoreIDs()), filter.NewPlacementSafeguard(s.GetName(), cluster.GetOpts(), cluster.GetBasicCluster(), cluster.GetRuleManager(), srcRegion, srcStore), } stores := cluster.GetStores() diff --git a/server/schedulers/shuffle_region.go b/server/schedulers/shuffle_region.go index b0a6286ceb0..f947ca79ad2 100644 --- a/server/schedulers/shuffle_region.go +++ b/server/schedulers/shuffle_region.go @@ -159,7 +159,7 @@ func (s *shuffleRegionScheduler) scheduleAddPeer(cluster schedule.Cluster, regio return nil } scoreGuard := filter.NewPlacementSafeguard(s.GetName(), cluster.GetOpts(), cluster.GetBasicCluster(), cluster.GetRuleManager(), region, store) - excludedFilter := filter.NewExcludedFilter(s.GetName(), nil, region.GetStoreIds()) + excludedFilter := filter.NewExcludedFilter(s.GetName(), nil, region.GetStoreIDs()) target := filter.NewCandidates(cluster.GetStores()). FilterTarget(cluster.GetOpts(), s.filters...). diff --git a/tests/server/cluster/cluster_work_test.go b/tests/server/cluster/cluster_work_test.go index 5dee7da02cd..cafe3b61358 100644 --- a/tests/server/cluster/cluster_work_test.go +++ b/tests/server/cluster/cluster_work_test.go @@ -150,7 +150,7 @@ func TestSuspectRegions(t *testing.T) { } res, err := rc.HandleAskBatchSplit(req) re.NoError(err) - ids := []uint64{regions[0].GetMeta().GetId(), res.Ids[0].NewRegionId, res.Ids[1].NewRegionId} + ids := []uint64{regions[0].GetMeta().GetId(), res.Ids[0].NewRegionID, res.Ids[1].NewRegionID} sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] }) suspects := rc.GetSuspectRegions() sort.Slice(suspects, func(i, j int) bool { return suspects[i] < suspects[j] }) diff --git a/tools/pd-simulator/simulator/event.go b/tools/pd-simulator/simulator/event.go index 13032e45d20..f2504de9021 100644 --- a/tools/pd-simulator/simulator/event.go +++ b/tools/pd-simulator/simulator/event.go @@ -187,7 +187,7 @@ func (e *DeleteNodes) Run(raft *RaftEngine, tickCount int64) bool { regions := raft.GetRegions() for _, region := range regions { - storeIDs := region.GetStoreIds() + storeIDs := region.GetStoreIDs() if _, ok := storeIDs[id]; ok { downPeer := &pdpb.PeerStats{ Peer: region.GetStorePeer(id), diff --git a/tools/pd-simulator/simulator/raft.go b/tools/pd-simulator/simulator/raft.go index c3194071884..644a86ef7d5 100644 --- a/tools/pd-simulator/simulator/raft.go +++ b/tools/pd-simulator/simulator/raft.go @@ -146,7 +146,7 @@ func (r *RaftEngine) stepSplit(region *core.RegionInfo) { } left := region.Clone( core.WithNewRegionID(ids[len(ids)-1]), - core.WithNewPeerIds(ids[0:len(ids)-1]...), + core.WithNewPeerIDs(ids[0:len(ids)-1]...), core.WithIncVersion(), core.SetApproximateKeys(region.GetApproximateKeys()/2), core.SetApproximateSize(region.GetApproximateSize()/2), @@ -196,7 +196,7 @@ func (r *RaftEngine) updateRegionStore(region *core.RegionInfo, size int64) { core.SetApproximateSize(region.GetApproximateSize()+size), core.SetWrittenBytes(uint64(size)), ) - storeIDs := region.GetStoreIds() + storeIDs := region.GetStoreIDs() for storeID := range storeIDs { r.conn.Nodes[storeID].incUsedSize(uint64(size)) } @@ -220,7 +220,7 @@ func (r *RaftEngine) electNewLeader(region *core.RegionInfo) *metapb.Peer { unhealthy int newLeaderStoreID uint64 ) - ids := region.GetStoreIds() + ids := region.GetStoreIDs() for id := range ids { if r.conn.nodeHealth(id) { newLeaderStoreID = id