-
Notifications
You must be signed in to change notification settings - Fork 726
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
*: add hot region schedule #611
Changes from 29 commits
e2982b7
2a2ef7f
ca4c248
7d8e7bf
0f6c35c
382986e
465e6e7
20e78b0
0d21c20
5050c94
f5c9de4
e4f281d
034e975
9228598
a787547
1097525
ef4fe14
3eae118
f18d1c4
d9ad9f7
95e3530
5e8f5a8
174782e
1c3605a
30bcb69
ce794ce
bad2c8a
18e60e9
c2469d7
7d80df0
2760bc9
79a39d8
79b9de3
91e69c6
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,41 @@ | ||
// Copyright 2017 PingCAP, Inc. | ||
// | ||
// Licensed under the Apache License, Version 2.0 (the "License"); | ||
// you may not use this file except in compliance with the License. | ||
// You may obtain a copy of the License at | ||
// | ||
// http://www.apache.org/licenses/LICENSE-2.0 | ||
// | ||
// Unless required by applicable law or agreed to in writing, software | ||
// distributed under the License is distributed on an "AS IS" BASIS, | ||
// See the License for the specific language governing permissions and | ||
// limitations under the License. | ||
|
||
package api | ||
|
||
import ( | ||
"net/http" | ||
|
||
"github.com/pingcap/pd/server" | ||
"github.com/unrolled/render" | ||
) | ||
|
||
type hotStatusHandler struct { | ||
*server.Handler | ||
rd *render.Render | ||
} | ||
|
||
func newHotStatusHandler(handler *server.Handler, rd *render.Render) *hotStatusHandler { | ||
return &hotStatusHandler{ | ||
Handler: handler, | ||
rd: rd, | ||
} | ||
} | ||
|
||
func (h *hotStatusHandler) GetHotRegions(w http.ResponseWriter, r *http.Request) { | ||
h.rd.JSON(w, http.StatusOK, h.GetHotWriteRegions()) | ||
} | ||
|
||
func (h *hotStatusHandler) GetHotStores(w http.ResponseWriter, r *http.Request) { | ||
h.rd.JSON(w, http.StatusOK, h.GetHotWriteStores()) | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,6 +15,9 @@ package server | |
|
||
import ( | ||
"math" | ||
"math/rand" | ||
"sort" | ||
"sync" | ||
"time" | ||
|
||
log "github.com/Sirupsen/logrus" | ||
|
@@ -370,3 +373,278 @@ func (r *replicaChecker) checkBestReplacement(region *RegionInfo) Operator { | |
} | ||
return newTransferPeer(region, oldPeer, newPeer) | ||
} | ||
|
||
// RegionStat records each hot region's statistics | ||
type RegionStat struct { | ||
RegionID uint64 `json:"region_id"` | ||
WrittenBytes uint64 `json:"written_bytes"` | ||
// HotDegree records the hot region update times | ||
HotDegree int `json:"hot_degree"` | ||
// LastUpdateTime used to calculate average write | ||
LastUpdateTime time.Time `json:"last_update_time"` | ||
StoreID uint64 `json:"-"` | ||
// antiCount used to eliminate some noise when remove region in cache | ||
antiCount int | ||
// version used to check the region split times | ||
version uint64 | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Any need to explain what do WrittenBytes / HotDegree / antiCount mean and what they are used for? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. for JSON encoding There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess @disksing wants to know the meaning of some fields, not only for JSON. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yep. I know what they do generally, but I think there should be some explanation here, for potential future readers. |
||
|
||
// RegionsStat is a list of a group region state type | ||
type RegionsStat []RegionStat | ||
|
||
func (m RegionsStat) Len() int { return len(m) } | ||
func (m RegionsStat) Swap(i, j int) { m[i], m[j] = m[j], m[i] } | ||
func (m RegionsStat) Less(i, j int) bool { return m[i].WrittenBytes < m[j].WrittenBytes } | ||
|
||
// StoreHotRegions records all hot regions in one store with sequence | ||
type StoreHotRegions struct { | ||
TotalWrittenBytes uint64 `json:"total_written"` | ||
RegionCount int `json:"region_count"` | ||
RegionsStat RegionsStat `json:"stats"` | ||
} | ||
|
||
type balanceHotRegionScheduler struct { | ||
sync.RWMutex | ||
opt *scheduleOption | ||
limit uint64 | ||
scoreStatus map[uint64]*StoreHotRegions // store id -> regions status in this store | ||
r *rand.Rand | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why not using global Rand directly? Using Source directly is not thread safe. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It just used in one thread. |
||
} | ||
|
||
func newBalanceHotRegionScheduler(opt *scheduleOption) *balanceHotRegionScheduler { | ||
return &balanceHotRegionScheduler{ | ||
opt: opt, | ||
limit: 1, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we modify the limit? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. use a const var |
||
scoreStatus: make(map[uint64]*StoreHotRegions), | ||
r: rand.New(rand.NewSource(time.Now().UnixNano())), | ||
} | ||
} | ||
|
||
func (l *balanceHotRegionScheduler) GetName() string { | ||
return "balance-hot-region-scheduler" | ||
} | ||
|
||
func (l *balanceHotRegionScheduler) GetResourceKind() ResourceKind { | ||
return priorityKind | ||
} | ||
|
||
func (l *balanceHotRegionScheduler) GetResourceLimit() uint64 { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. s/l/s/ |
||
return l.limit | ||
} | ||
|
||
func (l *balanceHotRegionScheduler) Prepare(cluster *clusterInfo) error { return nil } | ||
|
||
func (l *balanceHotRegionScheduler) Cleanup(cluster *clusterInfo) {} | ||
|
||
func (l *balanceHotRegionScheduler) Schedule(cluster *clusterInfo) Operator { | ||
l.calculateScore(cluster) | ||
region := l.SelectSourceRegion(cluster) | ||
if region == nil { | ||
return nil | ||
} | ||
newLeader := l.selectTransferLeader(region) | ||
if newLeader != nil { | ||
return newPriorityTransferLeader(region, newLeader) | ||
} | ||
peer := l.selectTransferPeer(region, cluster) | ||
if peer != nil { | ||
return newPriorityTransferPeer(region, region.Leader, peer) | ||
} | ||
return nil | ||
} | ||
|
||
func (l *balanceHotRegionScheduler) calculateScore(cluster *clusterInfo) { | ||
l.Lock() | ||
defer l.Unlock() | ||
l.scoreStatus = make(map[uint64]*StoreHotRegions) | ||
items := cluster.writeStatistics.elems() | ||
for _, item := range items { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is it safe to access items after unlocked? |
||
r, ok := item.value.(*RegionStat) | ||
if !ok { | ||
continue | ||
} | ||
if r.HotDegree < hotRegionLowThreshold { | ||
continue | ||
} | ||
|
||
regionInfo := cluster.getRegion(r.RegionID) | ||
storeID := regionInfo.Leader.GetStoreId() | ||
status, ok := l.scoreStatus[storeID] | ||
if !ok { | ||
status = &StoreHotRegions{ | ||
RegionsStat: make(RegionsStat, 0, 100), | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. use const for 100 |
||
} | ||
l.scoreStatus[storeID] = status | ||
} | ||
status.TotalWrittenBytes += r.WrittenBytes | ||
status.RegionsStat = append(status.RegionsStat, RegionStat{ | ||
RegionID: r.RegionID, | ||
WrittenBytes: r.WrittenBytes, | ||
HotDegree: r.HotDegree, | ||
LastUpdateTime: r.LastUpdateTime, | ||
StoreID: storeID, | ||
antiCount: r.antiCount, | ||
version: r.version, | ||
}) | ||
status.RegionCount++ | ||
} | ||
|
||
for _, rs := range l.scoreStatus { | ||
sort.Sort(sort.Reverse(rs.RegionsStat)) | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
should be ok. |
||
} | ||
|
||
func (l *balanceHotRegionScheduler) SelectSourceRegion(cluster *clusterInfo) *RegionInfo { | ||
var ( | ||
maxWritten uint64 | ||
sourceStore uint64 | ||
maxHotStoreRegionCount int | ||
) | ||
// choose a hot store as transfer source | ||
// the numbers of the hot regions in that store has higher priority than TotalWrittenBytes | ||
for sid, s := range l.scoreStatus { | ||
if s.RegionsStat.Len() < 2 { | ||
continue | ||
} | ||
|
||
if maxHotStoreRegionCount < s.RegionsStat.Len() { | ||
maxHotStoreRegionCount = s.RegionsStat.Len() | ||
maxWritten = s.TotalWrittenBytes | ||
sourceStore = sid | ||
continue | ||
} | ||
|
||
if maxHotStoreRegionCount == s.RegionsStat.Len() && maxWritten < s.TotalWrittenBytes { | ||
maxWritten = s.TotalWrittenBytes | ||
sourceStore = sid | ||
} | ||
} | ||
|
||
if sourceStore == 0 { | ||
return nil | ||
} | ||
|
||
length := l.scoreStatus[sourceStore].RegionsStat.Len() | ||
// the hottest region in the store not move | ||
// radmonly pick a region from 1 .. length-1 | ||
// TODO: consider hot degree when pick | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please add comment |
||
rr := l.r.Int31n(int32(length-1)) + 1 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please add comment for this algorithm. |
||
pickedRegionStat := l.scoreStatus[sourceStore].RegionsStat[rr] | ||
if pickedRegionStat.antiCount < hotRegionAntiCount { | ||
return nil | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Can we try to peek another hot region in that condition? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It will retry in scheduleController |
||
} | ||
sourceRegion := cluster.getRegion(pickedRegionStat.RegionID) | ||
if len(sourceRegion.DownPeers) != 0 || len(sourceRegion.PendingPeers) != 0 { | ||
return nil | ||
} | ||
// use written bytes per second | ||
sourceRegion.WrittenBytes = pickedRegionStat.WrittenBytes | ||
l.adjustBalanceLimit(sourceStore) | ||
return sourceRegion | ||
} | ||
|
||
func (l *balanceHotRegionScheduler) adjustBalanceLimit(storeID uint64) { | ||
s := l.scoreStatus[storeID] | ||
var hotRegionTotalCount float64 | ||
for _, m := range l.scoreStatus { | ||
hotRegionTotalCount += float64(m.RegionsStat.Len()) | ||
} | ||
|
||
avgRegionCount := hotRegionTotalCount / float64(len(l.scoreStatus)) | ||
// Multiplied by 0.75 to avoid transfer back and forth | ||
limit := uint64((float64(s.RegionsStat.Len()) - avgRegionCount) * 0.75) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. define a const var for 0.75 |
||
l.limit = maxUint64(1, limit) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 1 means ? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. at least 1. |
||
} | ||
|
||
func (l *balanceHotRegionScheduler) GetStatus() map[uint64]*StoreHotRegions { | ||
l.RLock() | ||
defer l.RUnlock() | ||
status := make(map[uint64]*StoreHotRegions) | ||
for id, stat := range l.scoreStatus { | ||
clone := *stat | ||
status[id] = &clone | ||
} | ||
return status | ||
} | ||
|
||
func (l *balanceHotRegionScheduler) selectTransferLeader(sourceRegion *RegionInfo) *metapb.Peer { | ||
followPeers := sourceRegion.GetFollowers() | ||
storeIDs := make([]uint64, 0, len(followPeers)) | ||
for _, peer := range followPeers { | ||
storeIDs = append(storeIDs, peer.GetStoreId()) | ||
} | ||
|
||
targetStoreID := l.selectBestStore(storeIDs, sourceRegion) | ||
var targetPeer *metapb.Peer | ||
for _, peer := range followPeers { | ||
if peer.GetStoreId() == targetStoreID { | ||
targetPeer = peer | ||
} | ||
} | ||
return targetPeer | ||
} | ||
|
||
func (l *balanceHotRegionScheduler) selectTransferPeer(sourceRegion *RegionInfo, cluster *clusterInfo) *metapb.Peer { | ||
var filters []Filter | ||
stores := cluster.getStores() | ||
|
||
filters = append(filters, newExcludedFilter(sourceRegion.GetStoreIds(), sourceRegion.GetStoreIds())) | ||
filters = append(filters, newDistinctScoreFilter(l.opt.GetReplication(), stores, cluster.getLeaderStore(sourceRegion))) | ||
filters = append(filters, newStateFilter(l.opt)) | ||
filters = append(filters, newStorageThresholdFilter(l.opt)) | ||
|
||
storeIDs := make([]uint64, 0, len(stores)) | ||
for _, store := range stores { | ||
if filterTarget(store, filters) { | ||
continue | ||
} | ||
storeIDs = append(storeIDs, store.GetId()) | ||
} | ||
targetStoreID := l.selectBestStore(storeIDs, sourceRegion) | ||
targetStore := cluster.getStore(targetStoreID) | ||
if targetStore == nil { | ||
return nil | ||
} | ||
newPeer, err := cluster.allocPeer(targetStore.GetId()) | ||
if err != nil { | ||
log.Errorf("failed to allocate peer: %v", err) | ||
return nil | ||
} | ||
|
||
return newPeer | ||
} | ||
|
||
// select a store to transfer peer | ||
// preferred to the store that with the least number of regions | ||
// and then choose the least total written bytes store | ||
func (l *balanceHotRegionScheduler) selectBestStore(stores []uint64, sourceRegion *RegionInfo) uint64 { | ||
sr := l.scoreStatus[sourceRegion.Leader.GetStoreId()] | ||
sourceStoreWrittenBytes := sr.TotalWrittenBytes | ||
sourceStoreHotRegionCount := sr.RegionsStat.Len() | ||
|
||
var ( | ||
targetStore uint64 | ||
minWrittenBytes uint64 = math.MaxUint64 | ||
) | ||
minRegionCount := int(math.MaxInt32) | ||
for _, store := range stores { | ||
if s, ok := l.scoreStatus[store]; ok { | ||
if sourceStoreHotRegionCount-s.RegionsStat.Len() > 1 && minRegionCount > s.RegionsStat.Len() { | ||
targetStore = store | ||
minWrittenBytes = s.TotalWrittenBytes | ||
minRegionCount = s.RegionsStat.Len() | ||
continue | ||
} | ||
if minRegionCount == s.RegionsStat.Len() && minWrittenBytes > s.TotalWrittenBytes && | ||
uint64(float64(sourceStoreWrittenBytes)*hotRegionScheduleFactor) > s.TotalWrittenBytes+2*sourceRegion.WrittenBytes { | ||
minWrittenBytes = s.TotalWrittenBytes | ||
targetStore = store | ||
} | ||
|
||
} else { | ||
targetStore = store | ||
break | ||
} | ||
} | ||
return targetStore | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Please let pd-ctl support this later.