diff --git a/server/api/region_test.go b/server/api/region_test.go index 7fc802541329..816fefcc092e 100644 --- a/server/api/region_test.go +++ b/server/api/region_test.go @@ -456,57 +456,57 @@ func (s *testGetRegionSuite) TestScanRegionByKeys(c *C) { mustRegionHeartbeat(c, s.svr, r) url := fmt.Sprintf("%s/regions/key?key=%s", s.urlPrefix, "b") - regionIds := []uint64{3, 4, 5, 99} + regionIDs := []uint64{3, 4, 5, 99} regions := &RegionsInfo{} err := tu.ReadGetJSON(c, testDialClient, url, regions) c.Assert(err, IsNil) - c.Assert(regionIds, HasLen, regions.Count) - for i, v := range regionIds { + c.Assert(regionIDs, HasLen, regions.Count) + for i, v := range regionIDs { c.Assert(v, Equals, regions.Regions[i].ID) } url = fmt.Sprintf("%s/regions/key?key=%s", s.urlPrefix, "d") - regionIds = []uint64{4, 5, 99} + regionIDs = []uint64{4, 5, 99} regions = &RegionsInfo{} err = tu.ReadGetJSON(c, testDialClient, url, regions) c.Assert(err, IsNil) - c.Assert(regionIds, HasLen, regions.Count) - for i, v := range regionIds { + c.Assert(regionIDs, HasLen, regions.Count) + for i, v := range regionIDs { c.Assert(v, Equals, regions.Regions[i].ID) } url = fmt.Sprintf("%s/regions/key?key=%s", s.urlPrefix, "g") - regionIds = []uint64{5, 99} + regionIDs = []uint64{5, 99} regions = &RegionsInfo{} err = tu.ReadGetJSON(c, testDialClient, url, regions) c.Assert(err, IsNil) - c.Assert(regionIds, HasLen, regions.Count) - for i, v := range regionIds { + c.Assert(regionIDs, HasLen, regions.Count) + for i, v := range regionIDs { c.Assert(v, Equals, regions.Regions[i].ID) } url = fmt.Sprintf("%s/regions/key?end_key=%s", s.urlPrefix, "e") - regionIds = []uint64{2, 3, 4} + regionIDs = []uint64{2, 3, 4} regions = &RegionsInfo{} err = tu.ReadGetJSON(c, testDialClient, url, regions) c.Assert(err, IsNil) - c.Assert(len(regionIds), Equals, regions.Count) - for i, v := range regionIds { + c.Assert(len(regionIDs), Equals, regions.Count) + for i, v := range regionIDs { c.Assert(v, Equals, regions.Regions[i].ID) } url = fmt.Sprintf("%s/regions/key?key=%s&end_key=%s", s.urlPrefix, "b", "g") - regionIds = []uint64{3, 4} + regionIDs = []uint64{3, 4} regions = &RegionsInfo{} err = tu.ReadGetJSON(c, testDialClient, url, regions) c.Assert(err, IsNil) - c.Assert(len(regionIds), Equals, regions.Count) - for i, v := range regionIds { + c.Assert(len(regionIDs), Equals, regions.Count) + for i, v := range regionIDs { c.Assert(v, Equals, regions.Regions[i].ID) } url = fmt.Sprintf("%s/regions/key?key=%s&end_key=%s", s.urlPrefix, "b", []byte{0xFF, 0xFF, 0xCC}) - regionIds = []uint64{3, 4, 5, 99} + regionIDs = []uint64{3, 4, 5, 99} regions = &RegionsInfo{} err = tu.ReadGetJSON(c, testDialClient, url, regions) c.Assert(err, IsNil) - c.Assert(len(regionIds), Equals, regions.Count) - for i, v := range regionIds { + c.Assert(len(regionIDs), Equals, regions.Count) + for i, v := range regionIDs { c.Assert(v, Equals, regions.Regions[i].ID) } } diff --git a/server/core/basic_cluster.go b/server/core/basic_cluster.go index 9100636d3766..6509857398db 100644 --- a/server/core/basic_cluster.go +++ b/server/core/basic_cluster.go @@ -95,7 +95,7 @@ func (bc *BasicCluster) GetRegionStores(region *RegionInfo) []*StoreInfo { bc.RLock() defer bc.RUnlock() var Stores []*StoreInfo - for id := range region.GetStoreIds() { + for id := range region.GetStoreIDs() { if store := bc.Stores.GetStore(id); store != nil { Stores = append(Stores, store) } diff --git a/server/core/region.go b/server/core/region.go index cc688712ad89..8114a0e411db 100644 --- a/server/core/region.go +++ b/server/core/region.go @@ -349,8 +349,8 @@ func (r *RegionInfo) GetStoreLearner(storeID uint64) *metapb.Peer { return nil } -// GetStoreIds returns a map indicate the region distributed. -func (r *RegionInfo) GetStoreIds() map[uint64]struct{} { +// GetStoreIDs returns a map indicate the region distributed. +func (r *RegionInfo) GetStoreIDs() map[uint64]struct{} { peers := r.meta.GetPeers() stores := make(map[uint64]struct{}, len(peers)) for _, peer := range peers { diff --git a/server/core/region_option.go b/server/core/region_option.go index b06c784c0f65..c0d2204de2bb 100644 --- a/server/core/region_option.go +++ b/server/core/region_option.go @@ -96,14 +96,14 @@ func WithNewRegionID(id uint64) RegionCreateOption { } } -// WithNewPeerIds sets new ids for peers. -func WithNewPeerIds(peerIds ...uint64) RegionCreateOption { +// WithNewPeerIDs sets new ids for peers. +func WithNewPeerIDs(peerIDs ...uint64) RegionCreateOption { return func(region *RegionInfo) { - if len(peerIds) != len(region.meta.GetPeers()) { + if len(peerIDs) != len(region.meta.GetPeers()) { return } for i, p := range region.meta.GetPeers() { - p.Id = peerIds[i] + p.Id = peerIDs[i] } } } diff --git a/server/core/region_tree_test.go b/server/core/region_tree_test.go index 0f813717fcb0..7538f04dd744 100644 --- a/server/core/region_tree_test.go +++ b/server/core/region_tree_test.go @@ -80,7 +80,7 @@ func TestRegionInfo(t *testing.T) { r = r.Clone(WithEndKey([]byte{1})) re.Regexp(".*EndKey Changed.*", DiffRegionKeyInfo(r, info)) - stores := r.GetStoreIds() + stores := r.GetStoreIDs() re.Len(stores, int(n)) for i := uint64(0); i < n; i++ { _, ok := stores[i] diff --git a/server/schedule/checker/merge_checker.go b/server/schedule/checker/merge_checker.go index e5624d48d2de..fcfc087a3cb5 100644 --- a/server/schedule/checker/merge_checker.go +++ b/server/schedule/checker/merge_checker.go @@ -282,7 +282,7 @@ func isTableIDSame(region, adjacent *core.RegionInfo) bool { // while the source region has no peer on it. This is to prevent from bringing // any other peer into an offline store to slow down the offline process. func checkPeerStore(cluster schedule.Cluster, region, adjacent *core.RegionInfo) bool { - regionStoreIDs := region.GetStoreIds() + regionStoreIDs := region.GetStoreIDs() for _, peer := range adjacent.GetPeers() { storeID := peer.GetStoreId() store := cluster.GetStore(storeID) diff --git a/server/schedule/checker/replica_strategy.go b/server/schedule/checker/replica_strategy.go index 5856e3e91cc2..3a8214e420ee 100644 --- a/server/schedule/checker/replica_strategy.go +++ b/server/schedule/checker/replica_strategy.go @@ -53,7 +53,7 @@ func (s *ReplicaStrategy) SelectStoreToAdd(coLocationStores []*core.StoreInfo, e // The reason for it is to prevent the non-optimal replica placement due // to the short-term state, resulting in redundant scheduling. filters := []filter.Filter{ - filter.NewExcludedFilter(s.checkerName, nil, s.region.GetStoreIds()), + filter.NewExcludedFilter(s.checkerName, nil, s.region.GetStoreIDs()), filter.NewStorageThresholdFilter(s.checkerName), filter.NewSpecialUseFilter(s.checkerName), &filter.LongTermStateFilter{ActionScope: s.checkerName, MoveRegion: true}, diff --git a/server/schedule/test_util.go b/server/schedule/test_util.go index b6484042d3bd..67b6f8914742 100644 --- a/server/schedule/test_util.go +++ b/server/schedule/test_util.go @@ -79,10 +79,10 @@ func ApplyOperator(mc *mockcluster.Cluster, op *operator.Operator) { region = ApplyOperatorStep(region, op) } mc.PutRegion(region) - for id := range region.GetStoreIds() { + for id := range region.GetStoreIDs() { mc.UpdateStoreStatus(id) } - for id := range origin.GetStoreIds() { + for id := range origin.GetStoreIDs() { mc.UpdateStoreStatus(id) } } diff --git a/server/schedulers/balance_region.go b/server/schedulers/balance_region.go index 97ee764ded49..964fc88da2c4 100644 --- a/server/schedulers/balance_region.go +++ b/server/schedulers/balance_region.go @@ -217,7 +217,7 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*operator. // transferPeer selects the best store to create a new peer to replace the old peer. func (s *balanceRegionScheduler) transferPeer(plan *balancePlan) *operator.Operator { filters := []filter.Filter{ - filter.NewExcludedFilter(s.GetName(), nil, plan.region.GetStoreIds()), + filter.NewExcludedFilter(s.GetName(), nil, plan.region.GetStoreIDs()), filter.NewPlacementSafeguard(s.GetName(), plan.GetOpts(), plan.GetBasicCluster(), plan.GetRuleManager(), plan.region, plan.source), filter.NewRegionScoreFilter(s.GetName(), plan.source, plan.GetOpts()), filter.NewSpecialUseFilter(s.GetName()), diff --git a/server/schedulers/grant_hot_region.go b/server/schedulers/grant_hot_region.go index ac67f78a5372..9d52a5b0f1fc 100644 --- a/server/schedulers/grant_hot_region.go +++ b/server/schedulers/grant_hot_region.go @@ -363,7 +363,7 @@ func (s *grantHotRegionScheduler) transfer(cluster schedule.Cluster, regionID ui } else { filters = append(filters, &filter.LongTermStateFilter{ActionScope: s.GetName(), MoveRegion: true}, &filter.TemporaryStateFilter{ActionScope: s.GetName(), MoveRegion: true}, - filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIds(), srcRegion.GetStoreIds())) + filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIDs(), srcRegion.GetStoreIDs())) candidate = s.conf.StoreIDs } for _, storeID := range candidate { diff --git a/server/schedulers/hot_region.go b/server/schedulers/hot_region.go index 31c3b84c28d1..4f561723cff7 100644 --- a/server/schedulers/hot_region.go +++ b/server/schedulers/hot_region.go @@ -729,7 +729,7 @@ func (bs *balanceSolver) filterDstStores() map[uint64]*statistics.StoreLoadDetai filters = []filter.Filter{ &filter.LongTermStateFilter{ActionScope: bs.sche.GetName(), MoveRegion: true}, &filter.TemporaryStateFilter{ActionScope: bs.sche.GetName(), MoveRegion: true}, - filter.NewExcludedFilter(bs.sche.GetName(), bs.cur.region.GetStoreIds(), bs.cur.region.GetStoreIds()), + filter.NewExcludedFilter(bs.sche.GetName(), bs.cur.region.GetStoreIDs(), bs.cur.region.GetStoreIDs()), filter.NewSpecialUseFilter(bs.sche.GetName(), filter.SpecialUseHotRegion), filter.NewPlacementSafeguard(bs.sche.GetName(), bs.GetOpts(), bs.GetBasicCluster(), bs.GetRuleManager(), bs.cur.region, srcStore), } diff --git a/server/schedulers/shuffle_hot_region.go b/server/schedulers/shuffle_hot_region.go index 832091fe7552..2301df4754f0 100644 --- a/server/schedulers/shuffle_hot_region.go +++ b/server/schedulers/shuffle_hot_region.go @@ -180,7 +180,7 @@ func (s *shuffleHotRegionScheduler) randomSchedule(cluster schedule.Cluster, loa filters := []filter.Filter{ &filter.LongTermStateFilter{ActionScope: s.GetName(), MoveRegion: true}, &filter.TemporaryStateFilter{ActionScope: s.GetName(), MoveRegion: true}, - filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIds(), srcRegion.GetStoreIds()), + filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIDs(), srcRegion.GetStoreIDs()), filter.NewPlacementSafeguard(s.GetName(), cluster.GetOpts(), cluster.GetBasicCluster(), cluster.GetRuleManager(), srcRegion, srcStore), } stores := cluster.GetStores() diff --git a/server/schedulers/shuffle_region.go b/server/schedulers/shuffle_region.go index e6dbfd2898f9..17a96f77f95c 100644 --- a/server/schedulers/shuffle_region.go +++ b/server/schedulers/shuffle_region.go @@ -160,7 +160,7 @@ func (s *shuffleRegionScheduler) scheduleAddPeer(cluster schedule.Cluster, regio return nil } scoreGuard := filter.NewPlacementSafeguard(s.GetName(), cluster.GetOpts(), cluster.GetBasicCluster(), cluster.GetRuleManager(), region, store) - excludedFilter := filter.NewExcludedFilter(s.GetName(), nil, region.GetStoreIds()) + excludedFilter := filter.NewExcludedFilter(s.GetName(), nil, region.GetStoreIDs()) target := filter.NewCandidates(cluster.GetStores()). FilterTarget(cluster.GetOpts(), s.filters...). diff --git a/tools/pd-simulator/simulator/event.go b/tools/pd-simulator/simulator/event.go index 13032e45d20c..f2504de9021f 100644 --- a/tools/pd-simulator/simulator/event.go +++ b/tools/pd-simulator/simulator/event.go @@ -187,7 +187,7 @@ func (e *DeleteNodes) Run(raft *RaftEngine, tickCount int64) bool { regions := raft.GetRegions() for _, region := range regions { - storeIDs := region.GetStoreIds() + storeIDs := region.GetStoreIDs() if _, ok := storeIDs[id]; ok { downPeer := &pdpb.PeerStats{ Peer: region.GetStorePeer(id), diff --git a/tools/pd-simulator/simulator/raft.go b/tools/pd-simulator/simulator/raft.go index c31940718842..644a86ef7d52 100644 --- a/tools/pd-simulator/simulator/raft.go +++ b/tools/pd-simulator/simulator/raft.go @@ -146,7 +146,7 @@ func (r *RaftEngine) stepSplit(region *core.RegionInfo) { } left := region.Clone( core.WithNewRegionID(ids[len(ids)-1]), - core.WithNewPeerIds(ids[0:len(ids)-1]...), + core.WithNewPeerIDs(ids[0:len(ids)-1]...), core.WithIncVersion(), core.SetApproximateKeys(region.GetApproximateKeys()/2), core.SetApproximateSize(region.GetApproximateSize()/2), @@ -196,7 +196,7 @@ func (r *RaftEngine) updateRegionStore(region *core.RegionInfo, size int64) { core.SetApproximateSize(region.GetApproximateSize()+size), core.SetWrittenBytes(uint64(size)), ) - storeIDs := region.GetStoreIds() + storeIDs := region.GetStoreIDs() for storeID := range storeIDs { r.conn.Nodes[storeID].incUsedSize(uint64(size)) } @@ -220,7 +220,7 @@ func (r *RaftEngine) electNewLeader(region *core.RegionInfo) *metapb.Peer { unhealthy int newLeaderStoreID uint64 ) - ids := region.GetStoreIds() + ids := region.GetStoreIDs() for id := range ids { if r.conn.nodeHealth(id) { newLeaderStoreID = id