Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

scheduler: update hot region scheduler default config for v2 #5590

Merged
merged 2 commits into from
Oct 22, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions server/schedulers/hot_region_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ const (

var defaultPrioritiesConfig = prioritiesConfig{
read: []string{statistics.QueryPriority, statistics.BytePriority},
writeLeader: []string{statistics.KeyPriority, statistics.BytePriority},
writeLeader: []string{statistics.QueryPriority, statistics.BytePriority},
writePeer: []string{statistics.BytePriority, statistics.KeyPriority},
}

Expand Down Expand Up @@ -73,7 +73,7 @@ func initHotRegionScheduleConfig() *hotRegionSchedulerConfig {
DstToleranceRatio: 1.05, // Tolerate 5% difference
StrictPickingStore: true,
EnableForTiFlash: true,
RankFormulaVersion: "", // Use default value when it is "". Depends on getRankFormulaVersionLocked.
RankFormulaVersion: "v2",
HunDunDM marked this conversation as resolved.
Show resolved Hide resolved
ForbidRWType: "none",
}
cfg.applyPrioritiesConfig(defaultPrioritiesConfig)
Expand Down Expand Up @@ -305,7 +305,7 @@ func (conf *hotRegionSchedulerConfig) getRankFormulaVersionLocked() string {
switch conf.RankFormulaVersion {
case "v2":
HunDunDM marked this conversation as resolved.
Show resolved Hide resolved
return "v2"
default:
default: // Use "v1" when it is ""
return "v1"
}
}
Expand Down
51 changes: 32 additions & 19 deletions server/schedulers/hot_region_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -595,6 +595,8 @@ func TestHotWriteRegionScheduleWithKeyRate(t *testing.T) {
re.NoError(err)
hb.(*hotScheduler).conf.SetDstToleranceRatio(1)
hb.(*hotScheduler).conf.SetSrcToleranceRatio(1)
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority}
hb.(*hotScheduler).conf.RankFormulaVersion = "v1"

tc := mockcluster.NewCluster(ctx, opt)
tc.SetHotRegionCacheHitsThreshold(0)
Expand Down Expand Up @@ -732,6 +734,7 @@ func TestHotWriteRegionScheduleWithLeader(t *testing.T) {
statistics.Denoising = false
opt := config.NewTestOptions()
hb, err := schedule.CreateScheduler(statistics.Write.String(), schedule.NewOperatorController(ctx, nil, nil), storage.NewStorageWithMemoryBackend(), nil)
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority}
re.NoError(err)

tc := mockcluster.NewCluster(ctx, opt)
Expand Down Expand Up @@ -794,6 +797,8 @@ func TestHotWriteRegionScheduleWithPendingInfluence(t *testing.T) {
opt := config.NewTestOptions()
hb, err := schedule.CreateScheduler(statistics.Write.String(), schedule.NewOperatorController(ctx, nil, nil), storage.NewStorageWithMemoryBackend(), nil)
re.NoError(err)
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority}
hb.(*hotScheduler).conf.RankFormulaVersion = "v1"
old := pendingAmpFactor
pendingAmpFactor = 0.0
defer func() {
Expand Down Expand Up @@ -885,6 +890,8 @@ func TestHotWriteRegionScheduleWithRuleEnabled(t *testing.T) {
tc.SetEnablePlacementRules(true)
hb, err := schedule.CreateScheduler(statistics.Write.String(), schedule.NewOperatorController(ctx, nil, nil), storage.NewStorageWithMemoryBackend(), nil)
re.NoError(err)
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority}

tc.SetHotRegionCacheHitsThreshold(0)
key, err := hex.DecodeString("")
re.NoError(err)
Expand Down Expand Up @@ -1088,6 +1095,7 @@ func TestHotReadRegionScheduleWithQuery(t *testing.T) {
re.NoError(err)
hb.(*hotScheduler).conf.SetSrcToleranceRatio(1)
hb.(*hotScheduler).conf.SetDstToleranceRatio(1)
hb.(*hotScheduler).conf.RankFormulaVersion = "v1"

tc := mockcluster.NewCluster(ctx, opt)
tc.SetHotRegionCacheHitsThreshold(0)
Expand Down Expand Up @@ -1120,6 +1128,7 @@ func TestHotReadRegionScheduleWithKeyRate(t *testing.T) {
opt := config.NewTestOptions()
hb, err := schedule.CreateScheduler(statistics.Read.String(), schedule.NewOperatorController(ctx, nil, nil), storage.NewStorageWithMemoryBackend(), nil)
re.NoError(err)
hb.(*hotScheduler).conf.RankFormulaVersion = "v1"
hb.(*hotScheduler).conf.SetSrcToleranceRatio(1)
hb.(*hotScheduler).conf.SetDstToleranceRatio(1)
hb.(*hotScheduler).conf.ReadPriorities = []string{statistics.BytePriority, statistics.KeyPriority}
Expand Down Expand Up @@ -1177,6 +1186,7 @@ func TestHotReadRegionScheduleWithPendingInfluence(t *testing.T) {
hb, err := schedule.CreateScheduler(statistics.Read.String(), schedule.NewOperatorController(ctx, nil, nil), storage.NewStorageWithMemoryBackend(), nil)
re.NoError(err)
// For test
hb.(*hotScheduler).conf.RankFormulaVersion = "v1"
hb.(*hotScheduler).conf.GreatDecRatio = 0.99
hb.(*hotScheduler).conf.MinorDecRatio = 1
hb.(*hotScheduler).conf.DstToleranceRatio = 1
Expand Down Expand Up @@ -1936,6 +1946,8 @@ func TestHotScheduleWithPriority(t *testing.T) {
testutil.CheckTransferLeader(re, ops[0], operator.OpHotRegion, 1, 3)

hb, err = schedule.CreateScheduler(statistics.Write.String(), schedule.NewOperatorController(ctx, nil, nil), storage.NewStorageWithMemoryBackend(), nil)
hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{statistics.KeyPriority, statistics.BytePriority}
hb.(*hotScheduler).conf.RankFormulaVersion = "v1"
re.NoError(err)

// assert loose store picking
Expand Down Expand Up @@ -1980,6 +1992,7 @@ func TestHotScheduleWithStddev(t *testing.T) {
re.NoError(err)
hb.(*hotScheduler).conf.SetDstToleranceRatio(1.0)
hb.(*hotScheduler).conf.SetSrcToleranceRatio(1.0)
hb.(*hotScheduler).conf.RankFormulaVersion = "v1"
tc := mockcluster.NewCluster(ctx, opt)
tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0))
tc.SetHotRegionCacheHitsThreshold(0)
Expand Down Expand Up @@ -2084,7 +2097,7 @@ func TestCompatibility(t *testing.T) {
// default
checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{
{statistics.QueryDim, statistics.ByteDim},
{statistics.KeyDim, statistics.ByteDim},
{statistics.QueryDim, statistics.ByteDim},
{statistics.ByteDim, statistics.KeyDim},
})
// config error value
Expand All @@ -2093,7 +2106,7 @@ func TestCompatibility(t *testing.T) {
hb.(*hotScheduler).conf.WritePeerPriorities = []string{statistics.QueryPriority, statistics.BytePriority, statistics.KeyPriority}
checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{
{statistics.QueryDim, statistics.ByteDim},
{statistics.KeyDim, statistics.ByteDim},
{statistics.QueryDim, statistics.ByteDim},
{statistics.ByteDim, statistics.KeyDim},
})
// low version
Expand Down Expand Up @@ -2135,7 +2148,7 @@ func TestCompatibility(t *testing.T) {
re.False(hb.(*hotScheduler).conf.lastQuerySupported) // it will updated after scheduling
checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{
{statistics.QueryDim, statistics.ByteDim},
{statistics.KeyDim, statistics.ByteDim},
{statistics.QueryDim, statistics.ByteDim},
{statistics.ByteDim, statistics.KeyDim},
})
re.True(hb.(*hotScheduler).conf.lastQuerySupported)
Expand All @@ -2148,12 +2161,12 @@ func TestCompatibilityConfig(t *testing.T) {
opt := config.NewTestOptions()
tc := mockcluster.NewCluster(ctx, opt)

// From new or 3.x cluster
// From new or 3.x cluster, it will use new config
hb, err := schedule.CreateScheduler(HotRegionType, schedule.NewOperatorController(ctx, tc, nil), storage.NewStorageWithMemoryBackend(), schedule.ConfigSliceDecoder("hot-region", nil))
re.NoError(err)
checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{
{statistics.QueryDim, statistics.ByteDim},
{statistics.KeyDim, statistics.ByteDim},
{statistics.QueryDim, statistics.ByteDim},
{statistics.ByteDim, statistics.KeyDim},
})

Expand All @@ -2163,7 +2176,7 @@ func TestCompatibilityConfig(t *testing.T) {
re.NoError(err)
checkPriority(re, hb.(*hotScheduler), tc, [3][2]int{
{statistics.QueryDim, statistics.ByteDim},
{statistics.KeyDim, statistics.ByteDim},
{statistics.QueryDim, statistics.ByteDim},
{statistics.ByteDim, statistics.KeyDim},
})

Expand Down Expand Up @@ -2211,6 +2224,18 @@ func TestCompatibilityConfig(t *testing.T) {
})
}

func checkPriority(re *require.Assertions, hb *hotScheduler, tc *mockcluster.Cluster, dims [3][2]int) {
readSolver := newBalanceSolver(hb, tc, statistics.Read, transferLeader)
writeLeaderSolver := newBalanceSolver(hb, tc, statistics.Write, transferLeader)
writePeerSolver := newBalanceSolver(hb, tc, statistics.Write, movePeer)
re.Equal(dims[0][0], readSolver.firstPriority)
re.Equal(dims[0][1], readSolver.secondPriority)
re.Equal(dims[1][0], writeLeaderSolver.firstPriority)
re.Equal(dims[1][1], writeLeaderSolver.secondPriority)
re.Equal(dims[2][0], writePeerSolver.firstPriority)
re.Equal(dims[2][1], writePeerSolver.secondPriority)
}

func TestConfigValidation(t *testing.T) {
re := require.New(t)

Expand Down Expand Up @@ -2246,7 +2271,7 @@ func TestConfigValidation(t *testing.T) {
// rank-formula-version
// default
hc = initHotRegionScheduleConfig()
re.Equal("v1", hc.GetRankFormulaVersion())
re.Equal("v2", hc.GetRankFormulaVersion())
// v1
hc.RankFormulaVersion = "v1"
err = hc.valid()
Expand Down Expand Up @@ -2285,18 +2310,6 @@ func TestConfigValidation(t *testing.T) {
re.Error(err)
}

func checkPriority(re *require.Assertions, hb *hotScheduler, tc *mockcluster.Cluster, dims [3][2]int) {
readSolver := newBalanceSolver(hb, tc, statistics.Read, transferLeader)
writeLeaderSolver := newBalanceSolver(hb, tc, statistics.Write, transferLeader)
writePeerSolver := newBalanceSolver(hb, tc, statistics.Write, movePeer)
re.Equal(dims[0][0], readSolver.firstPriority)
re.Equal(dims[0][1], readSolver.secondPriority)
re.Equal(dims[1][0], writeLeaderSolver.firstPriority)
re.Equal(dims[1][1], writeLeaderSolver.secondPriority)
re.Equal(dims[2][0], writePeerSolver.firstPriority)
re.Equal(dims[2][1], writePeerSolver.secondPriority)
}

type maxZombieDurTestCase struct {
typ resourceType
isTiFlash bool
Expand Down
2 changes: 1 addition & 1 deletion tests/pdctl/scheduler/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,7 @@ func TestScheduler(t *testing.T) {
"write-peer-priorities": []interface{}{"byte", "key"},
"strict-picking-store": "true",
"enable-for-tiflash": "true",
"rank-formula-version": "v1",
"rank-formula-version": "v2",
}
var conf map[string]interface{}
mustExec([]string{"-u", pdAddr, "scheduler", "config", "balance-hot-region-scheduler", "list"}, &conf)
Expand Down