Skip to content

Commit

Permalink
*: replace Id with Ids
Browse files Browse the repository at this point in the history
Signed-off-by: lhy1024 <[email protected]>
  • Loading branch information
lhy1024 committed Jun 21, 2022
1 parent 2efa259 commit a3a4c35
Show file tree
Hide file tree
Showing 16 changed files with 40 additions and 40 deletions.
36 changes: 18 additions & 18 deletions server/api/region_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -456,57 +456,57 @@ func (s *testGetRegionSuite) TestScanRegionByKeys(c *C) {
mustRegionHeartbeat(c, s.svr, r)

url := fmt.Sprintf("%s/regions/key?key=%s", s.urlPrefix, "b")
regionIds := []uint64{3, 4, 5, 99}
regionIDs := []uint64{3, 4, 5, 99}
regions := &RegionsInfo{}
err := tu.ReadGetJSON(c, testDialClient, url, regions)
c.Assert(err, IsNil)
c.Assert(regionIds, HasLen, regions.Count)
for i, v := range regionIds {
c.Assert(regionIDs, HasLen, regions.Count)
for i, v := range regionIDs {
c.Assert(v, Equals, regions.Regions[i].ID)
}
url = fmt.Sprintf("%s/regions/key?key=%s", s.urlPrefix, "d")
regionIds = []uint64{4, 5, 99}
regionIDs = []uint64{4, 5, 99}
regions = &RegionsInfo{}
err = tu.ReadGetJSON(c, testDialClient, url, regions)
c.Assert(err, IsNil)
c.Assert(regionIds, HasLen, regions.Count)
for i, v := range regionIds {
c.Assert(regionIDs, HasLen, regions.Count)
for i, v := range regionIDs {
c.Assert(v, Equals, regions.Regions[i].ID)
}
url = fmt.Sprintf("%s/regions/key?key=%s", s.urlPrefix, "g")
regionIds = []uint64{5, 99}
regionIDs = []uint64{5, 99}
regions = &RegionsInfo{}
err = tu.ReadGetJSON(c, testDialClient, url, regions)
c.Assert(err, IsNil)
c.Assert(regionIds, HasLen, regions.Count)
for i, v := range regionIds {
c.Assert(regionIDs, HasLen, regions.Count)
for i, v := range regionIDs {
c.Assert(v, Equals, regions.Regions[i].ID)
}
url = fmt.Sprintf("%s/regions/key?end_key=%s", s.urlPrefix, "e")
regionIds = []uint64{2, 3, 4}
regionIDs = []uint64{2, 3, 4}
regions = &RegionsInfo{}
err = tu.ReadGetJSON(c, testDialClient, url, regions)
c.Assert(err, IsNil)
c.Assert(len(regionIds), Equals, regions.Count)
for i, v := range regionIds {
c.Assert(len(regionIDs), Equals, regions.Count)
for i, v := range regionIDs {
c.Assert(v, Equals, regions.Regions[i].ID)
}
url = fmt.Sprintf("%s/regions/key?key=%s&end_key=%s", s.urlPrefix, "b", "g")
regionIds = []uint64{3, 4}
regionIDs = []uint64{3, 4}
regions = &RegionsInfo{}
err = tu.ReadGetJSON(c, testDialClient, url, regions)
c.Assert(err, IsNil)
c.Assert(len(regionIds), Equals, regions.Count)
for i, v := range regionIds {
c.Assert(len(regionIDs), Equals, regions.Count)
for i, v := range regionIDs {
c.Assert(v, Equals, regions.Regions[i].ID)
}
url = fmt.Sprintf("%s/regions/key?key=%s&end_key=%s", s.urlPrefix, "b", []byte{0xFF, 0xFF, 0xCC})
regionIds = []uint64{3, 4, 5, 99}
regionIDs = []uint64{3, 4, 5, 99}
regions = &RegionsInfo{}
err = tu.ReadGetJSON(c, testDialClient, url, regions)
c.Assert(err, IsNil)
c.Assert(len(regionIds), Equals, regions.Count)
for i, v := range regionIds {
c.Assert(len(regionIDs), Equals, regions.Count)
for i, v := range regionIDs {
c.Assert(v, Equals, regions.Regions[i].ID)
}
}
Expand Down
2 changes: 1 addition & 1 deletion server/core/basic_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func (bc *BasicCluster) GetRegionStores(region *RegionInfo) []*StoreInfo {
bc.RLock()
defer bc.RUnlock()
var Stores []*StoreInfo
for id := range region.GetStoreIds() {
for id := range region.GetStoreIDs() {
if store := bc.Stores.GetStore(id); store != nil {
Stores = append(Stores, store)
}
Expand Down
4 changes: 2 additions & 2 deletions server/core/region.go
Original file line number Diff line number Diff line change
Expand Up @@ -349,8 +349,8 @@ func (r *RegionInfo) GetStoreLearner(storeID uint64) *metapb.Peer {
return nil
}

// GetStoreIds returns a map indicate the region distributed.
func (r *RegionInfo) GetStoreIds() map[uint64]struct{} {
// GetStoreIDs returns a map indicate the region distributed.
func (r *RegionInfo) GetStoreIDs() map[uint64]struct{} {
peers := r.meta.GetPeers()
stores := make(map[uint64]struct{}, len(peers))
for _, peer := range peers {
Expand Down
8 changes: 4 additions & 4 deletions server/core/region_option.go
Original file line number Diff line number Diff line change
Expand Up @@ -96,14 +96,14 @@ func WithNewRegionID(id uint64) RegionCreateOption {
}
}

// WithNewPeerIds sets new ids for peers.
func WithNewPeerIds(peerIds ...uint64) RegionCreateOption {
// WithNewPeerIDs sets new ids for peers.
func WithNewPeerIDs(peerIDs ...uint64) RegionCreateOption {
return func(region *RegionInfo) {
if len(peerIds) != len(region.meta.GetPeers()) {
if len(peerIDs) != len(region.meta.GetPeers()) {
return
}
for i, p := range region.meta.GetPeers() {
p.Id = peerIds[i]
p.Id = peerIDs[i]
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion server/core/region_tree_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func TestRegionInfo(t *testing.T) {
r = r.Clone(WithEndKey([]byte{1}))
re.Regexp(".*EndKey Changed.*", DiffRegionKeyInfo(r, info))

stores := r.GetStoreIds()
stores := r.GetStoreIDs()
re.Len(stores, int(n))
for i := uint64(0); i < n; i++ {
_, ok := stores[i]
Expand Down
2 changes: 1 addition & 1 deletion server/schedule/checker/merge_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -282,7 +282,7 @@ func isTableIDSame(region, adjacent *core.RegionInfo) bool {
// while the source region has no peer on it. This is to prevent from bringing
// any other peer into an offline store to slow down the offline process.
func checkPeerStore(cluster schedule.Cluster, region, adjacent *core.RegionInfo) bool {
regionStoreIDs := region.GetStoreIds()
regionStoreIDs := region.GetStoreIDs()
for _, peer := range adjacent.GetPeers() {
storeID := peer.GetStoreId()
store := cluster.GetStore(storeID)
Expand Down
2 changes: 1 addition & 1 deletion server/schedule/checker/replica_strategy.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ func (s *ReplicaStrategy) SelectStoreToAdd(coLocationStores []*core.StoreInfo, e
// The reason for it is to prevent the non-optimal replica placement due
// to the short-term state, resulting in redundant scheduling.
filters := []filter.Filter{
filter.NewExcludedFilter(s.checkerName, nil, s.region.GetStoreIds()),
filter.NewExcludedFilter(s.checkerName, nil, s.region.GetStoreIDs()),
filter.NewStorageThresholdFilter(s.checkerName),
filter.NewSpecialUseFilter(s.checkerName),
&filter.StoreStateFilter{ActionScope: s.checkerName, MoveRegion: true, AllowTemporaryStates: true},
Expand Down
4 changes: 2 additions & 2 deletions server/schedule/test_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,10 +79,10 @@ func ApplyOperator(mc *mockcluster.Cluster, op *operator.Operator) {
region = ApplyOperatorStep(region, op)
}
mc.PutRegion(region)
for id := range region.GetStoreIds() {
for id := range region.GetStoreIDs() {
mc.UpdateStoreStatus(id)
}
for id := range origin.GetStoreIds() {
for id := range origin.GetStoreIDs() {
mc.UpdateStoreStatus(id)
}
}
2 changes: 1 addition & 1 deletion server/schedulers/balance_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ func (s *balanceRegionScheduler) Schedule(cluster schedule.Cluster) []*operator.
// transferPeer selects the best store to create a new peer to replace the old peer.
func (s *balanceRegionScheduler) transferPeer(plan *balancePlan) *operator.Operator {
filters := []filter.Filter{
filter.NewExcludedFilter(s.GetName(), nil, plan.region.GetStoreIds()),
filter.NewExcludedFilter(s.GetName(), nil, plan.region.GetStoreIDs()),
filter.NewPlacementSafeguard(s.GetName(), plan.GetOpts(), plan.GetBasicCluster(), plan.GetRuleManager(), plan.region, plan.source),
filter.NewRegionScoreFilter(s.GetName(), plan.source, plan.GetOpts()),
filter.NewSpecialUseFilter(s.GetName()),
Expand Down
2 changes: 1 addition & 1 deletion server/schedulers/grant_hot_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -359,7 +359,7 @@ func (s *grantHotRegionScheduler) transfer(cluster schedule.Cluster, regionID ui
candidate = []uint64{s.conf.GetStoreLeaderID()}
} else {
filters = append(filters, &filter.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true},
filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIds(), srcRegion.GetStoreIds()))
filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIDs(), srcRegion.GetStoreIDs()))
candidate = s.conf.StoreIDs
}
for _, storeID := range candidate {
Expand Down
2 changes: 1 addition & 1 deletion server/schedulers/hot_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -728,7 +728,7 @@ func (bs *balanceSolver) filterDstStores() map[uint64]*statistics.StoreLoadDetai
case movePeer:
filters = []filter.Filter{
&filter.StoreStateFilter{ActionScope: bs.sche.GetName(), MoveRegion: true},
filter.NewExcludedFilter(bs.sche.GetName(), bs.cur.region.GetStoreIds(), bs.cur.region.GetStoreIds()),
filter.NewExcludedFilter(bs.sche.GetName(), bs.cur.region.GetStoreIDs(), bs.cur.region.GetStoreIDs()),
filter.NewSpecialUseFilter(bs.sche.GetName(), filter.SpecialUseHotRegion),
filter.NewPlacementSafeguard(bs.sche.GetName(), bs.GetOpts(), bs.GetBasicCluster(), bs.GetRuleManager(), bs.cur.region, srcStore),
}
Expand Down
2 changes: 1 addition & 1 deletion server/schedulers/shuffle_hot_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ func (s *shuffleHotRegionScheduler) randomSchedule(cluster schedule.Cluster, loa

filters := []filter.Filter{
&filter.StoreStateFilter{ActionScope: s.GetName(), MoveRegion: true},
filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIds(), srcRegion.GetStoreIds()),
filter.NewExcludedFilter(s.GetName(), srcRegion.GetStoreIDs(), srcRegion.GetStoreIDs()),
filter.NewPlacementSafeguard(s.GetName(), cluster.GetOpts(), cluster.GetBasicCluster(), cluster.GetRuleManager(), srcRegion, srcStore),
}
stores := cluster.GetStores()
Expand Down
2 changes: 1 addition & 1 deletion server/schedulers/shuffle_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ func (s *shuffleRegionScheduler) scheduleAddPeer(cluster schedule.Cluster, regio
return nil
}
scoreGuard := filter.NewPlacementSafeguard(s.GetName(), cluster.GetOpts(), cluster.GetBasicCluster(), cluster.GetRuleManager(), region, store)
excludedFilter := filter.NewExcludedFilter(s.GetName(), nil, region.GetStoreIds())
excludedFilter := filter.NewExcludedFilter(s.GetName(), nil, region.GetStoreIDs())

target := filter.NewCandidates(cluster.GetStores()).
FilterTarget(cluster.GetOpts(), s.filters...).
Expand Down
2 changes: 1 addition & 1 deletion tests/server/cluster/cluster_work_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ func (s *clusterWorkerTestSuite) TestSuspectRegions(c *C) {
}
res, err := rc.HandleAskBatchSplit(req)
c.Assert(err, IsNil)
ids := []uint64{regions[0].GetMeta().GetId(), res.Ids[0].NewRegionId, res.Ids[1].NewRegionId}
ids := []uint64{regions[0].GetMeta().GetId(), res.IDs[0].NewRegionID, res.IDs[1].NewRegionID}
sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] })
suspects := rc.GetSuspectRegions()
sort.Slice(suspects, func(i, j int) bool { return suspects[i] < suspects[j] })
Expand Down
2 changes: 1 addition & 1 deletion tools/pd-simulator/simulator/event.go
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,7 @@ func (e *DeleteNodes) Run(raft *RaftEngine, tickCount int64) bool {

regions := raft.GetRegions()
for _, region := range regions {
storeIDs := region.GetStoreIds()
storeIDs := region.GetStoreIDs()
if _, ok := storeIDs[id]; ok {
downPeer := &pdpb.PeerStats{
Peer: region.GetStorePeer(id),
Expand Down
6 changes: 3 additions & 3 deletions tools/pd-simulator/simulator/raft.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ func (r *RaftEngine) stepSplit(region *core.RegionInfo) {
}
left := region.Clone(
core.WithNewRegionID(ids[len(ids)-1]),
core.WithNewPeerIds(ids[0:len(ids)-1]...),
core.WithNewPeerIDs(ids[0:len(ids)-1]...),
core.WithIncVersion(),
core.SetApproximateKeys(region.GetApproximateKeys()/2),
core.SetApproximateSize(region.GetApproximateSize()/2),
Expand Down Expand Up @@ -196,7 +196,7 @@ func (r *RaftEngine) updateRegionStore(region *core.RegionInfo, size int64) {
core.SetApproximateSize(region.GetApproximateSize()+size),
core.SetWrittenBytes(uint64(size)),
)
storeIDs := region.GetStoreIds()
storeIDs := region.GetStoreIDs()
for storeID := range storeIDs {
r.conn.Nodes[storeID].incUsedSize(uint64(size))
}
Expand All @@ -220,7 +220,7 @@ func (r *RaftEngine) electNewLeader(region *core.RegionInfo) *metapb.Peer {
unhealthy int
newLeaderStoreID uint64
)
ids := region.GetStoreIds()
ids := region.GetStoreIDs()
for id := range ids {
if r.conn.nodeHealth(id) {
newLeaderStoreID = id
Expand Down

0 comments on commit a3a4c35

Please sign in to comment.