diff --git a/server/schedulers/balance_region.go b/server/schedulers/balance_region.go index 9b703ee622a..79dfd904354 100644 --- a/server/schedulers/balance_region.go +++ b/server/schedulers/balance_region.go @@ -150,23 +150,34 @@ func (s *balanceRegionScheduler) Schedule(cluster opt.Cluster) []*operator.Opera return stores[i].RegionScore(opts.GetRegionScoreFormulaVersion(), opts.GetHighSpaceRatio(), opts.GetLowSpaceRatio(), iOp, -1) > stores[j].RegionScore(opts.GetRegionScoreFormulaVersion(), opts.GetHighSpaceRatio(), opts.GetLowSpaceRatio(), jOp, -1) }) + + var allowBalanceEmptyRegion func(*core.RegionInfo) bool + + switch cluster.(type) { + case *schedule.RangeCluster: + // allow empty region to be scheduled in range cluster + allowBalanceEmptyRegion = func(region *core.RegionInfo) bool { return true } + default: + allowBalanceEmptyRegion = opt.AllowBalanceEmptyRegion(cluster) + } + for _, plan.source = range stores { retryLimit := s.retryQuota.GetLimit(plan.source) for i := 0; i < retryLimit; i++ { // Priority pick the region that has a pending peer. // Pending region may means the disk is overload, remove the pending region firstly. - plan.region = cluster.RandPendingRegion(plan.SourceStoreID(), s.conf.Ranges, opt.HealthAllowPending(cluster), opt.ReplicatedRegion(cluster), opt.AllowBalanceEmptyRegion(cluster)) + plan.region = cluster.RandPendingRegion(plan.SourceStoreID(), s.conf.Ranges, opt.HealthAllowPending(cluster), opt.ReplicatedRegion(cluster), allowBalanceEmptyRegion) if plan.region == nil { // Then pick the region that has a follower in the source store. - plan.region = cluster.RandFollowerRegion(plan.SourceStoreID(), s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster), opt.AllowBalanceEmptyRegion(cluster)) + plan.region = cluster.RandFollowerRegion(plan.SourceStoreID(), s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster), allowBalanceEmptyRegion) } if plan.region == nil { // Then pick the region has the leader in the source store. - plan.region = cluster.RandLeaderRegion(plan.SourceStoreID(), s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster), opt.AllowBalanceEmptyRegion(cluster)) + plan.region = cluster.RandLeaderRegion(plan.SourceStoreID(), s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster), allowBalanceEmptyRegion) } if plan.region == nil { // Finally pick learner. - plan.region = cluster.RandLearnerRegion(plan.SourceStoreID(), s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster), opt.AllowBalanceEmptyRegion(cluster)) + plan.region = cluster.RandLearnerRegion(plan.SourceStoreID(), s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster), allowBalanceEmptyRegion) } if plan.region == nil { schedulerCounter.WithLabelValues(s.GetName(), "no-region").Inc() diff --git a/server/schedulers/balance_test.go b/server/schedulers/balance_test.go index 56b597cc2d4..9d9dfd2720d 100644 --- a/server/schedulers/balance_test.go +++ b/server/schedulers/balance_test.go @@ -1052,28 +1052,29 @@ func (s *testRandomMergeSchedulerSuite) TestMerge(c *C) { c.Assert(mb.IsScheduleAllowed(tc), IsFalse) } -var _ = Suite(&testScatterRangeLeaderSuite{}) +var _ = Suite(&testScatterRangeSuite{}) -type testScatterRangeLeaderSuite struct { +type testScatterRangeSuite struct { ctx context.Context cancel context.CancelFunc } -func (s *testScatterRangeLeaderSuite) SetUpSuite(c *C) { +func (s *testScatterRangeSuite) SetUpSuite(c *C) { s.ctx, s.cancel = context.WithCancel(context.Background()) } -func (s *testScatterRangeLeaderSuite) TearDownSuite(c *C) { +func (s *testScatterRangeSuite) TearDownSuite(c *C) { s.cancel() } -func (s *testScatterRangeLeaderSuite) TestBalance(c *C) { +func (s *testScatterRangeSuite) TestBalance(c *C) { opt := config.NewTestOptions() // TODO: enable palcementrules opt.SetPlacementRuleEnabled(false) tc := mockcluster.NewCluster(s.ctx, opt) tc.DisableFeature(versioninfo.JointConsensus) - tc.SetTolerantSizeRatio(2.5) + // range cluster use a special tolerant ratio, cluster opt take no impact + tc.SetTolerantSizeRatio(10000) // Add stores 1,2,3,4,5. tc.AddRegionStore(1, 0) tc.AddRegionStore(2, 0) @@ -1098,17 +1099,16 @@ func (s *testScatterRangeLeaderSuite) TestBalance(c *C) { }) id += 4 } - // empty case + // empty region case regions[49].EndKey = []byte("") for _, meta := range regions { leader := rand.Intn(4) % 3 regionInfo := core.NewRegionInfo( meta, meta.Peers[leader], - core.SetApproximateKeys(96), - core.SetApproximateSize(96), + core.SetApproximateKeys(1), + core.SetApproximateSize(1), ) - tc.Regions.SetRegion(regionInfo) } for i := 0; i < 100; i++ { @@ -1132,7 +1132,7 @@ func (s *testScatterRangeLeaderSuite) TestBalance(c *C) { } } -func (s *testScatterRangeLeaderSuite) TestBalanceLeaderLimit(c *C) { +func (s *testScatterRangeSuite) TestBalanceLeaderLimit(c *C) { opt := config.NewTestOptions() opt.SetPlacementRuleEnabled(false) tc := mockcluster.NewCluster(s.ctx, opt) @@ -1163,7 +1163,6 @@ func (s *testScatterRangeLeaderSuite) TestBalanceLeaderLimit(c *C) { id += 4 } - // empty case regions[49].EndKey = []byte("") for _, meta := range regions { leader := rand.Intn(4) % 3 @@ -1208,7 +1207,7 @@ func (s *testScatterRangeLeaderSuite) TestBalanceLeaderLimit(c *C) { c.Check(maxLeaderCount-minLeaderCount, Greater, 10) } -func (s *testScatterRangeLeaderSuite) TestConcurrencyUpdateConfig(c *C) { +func (s *testScatterRangeSuite) TestConcurrencyUpdateConfig(c *C) { opt := config.NewTestOptions() tc := mockcluster.NewCluster(s.ctx, opt) oc := schedule.NewOperatorController(s.ctx, nil, nil) @@ -1234,7 +1233,7 @@ func (s *testScatterRangeLeaderSuite) TestConcurrencyUpdateConfig(c *C) { ch <- struct{}{} } -func (s *testScatterRangeLeaderSuite) TestBalanceWhenRegionNotHeartbeat(c *C) { +func (s *testScatterRangeSuite) TestBalanceWhenRegionNotHeartbeat(c *C) { opt := config.NewTestOptions() tc := mockcluster.NewCluster(s.ctx, opt) // Add stores 1,2,3. diff --git a/server/schedulers/utils.go b/server/schedulers/utils.go index 21f8d3fa378..ab3b88ae786 100644 --- a/server/schedulers/utils.go +++ b/server/schedulers/utils.go @@ -23,6 +23,7 @@ import ( "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/typeutil" "github.com/tikv/pd/server/core" + "github.com/tikv/pd/server/schedule" "github.com/tikv/pd/server/schedule/operator" "github.com/tikv/pd/server/schedule/opt" "github.com/tikv/pd/server/statistics" @@ -133,7 +134,14 @@ func (p *balancePlan) getTolerantResource() int64 { } func adjustTolerantRatio(cluster opt.Cluster, kind core.ScheduleKind) float64 { - tolerantSizeRatio := cluster.GetOpts().GetTolerantSizeRatio() + var tolerantSizeRatio float64 + switch c := cluster.(type) { + case *schedule.RangeCluster: + // range cluster use a separate configuration + tolerantSizeRatio = c.GetTolerantSizeRatio() + default: + tolerantSizeRatio = cluster.GetOpts().GetTolerantSizeRatio() + } if kind.Resource == core.LeaderKind && kind.Policy == core.ByCount { if tolerantSizeRatio == 0 { return leaderTolerantSizeRatio