diff --git a/server/cluster/cluster_stat_test.go b/server/cluster/cluster_stat_test.go index 0b2a094b5b3..e5352b7ac0a 100644 --- a/server/cluster/cluster_stat_test.go +++ b/server/cluster/cluster_stat_test.go @@ -16,16 +16,12 @@ package cluster import ( "fmt" + "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" ) -var _ = Suite(&testClusterStatSuite{}) - -type testClusterStatSuite struct { -} - func cpu(usage int64) []*pdpb.RecordPair { n := 10 name := "cpu" @@ -39,19 +35,20 @@ func cpu(usage int64) []*pdpb.RecordPair { return pairs } -func (s *testClusterStatSuite) TestCPUEntriesAppend(c *C) { +func TestCPUEntriesAppend(t *testing.T) { + re := require.New(t) N := 10 checkAppend := func(appended bool, usage int64, threads ...string) { entries := NewCPUEntries(N) - c.Assert(entries, NotNil) + re.NotNil(entries) for i := 0; i < N; i++ { entry := &StatEntry{ CpuUsages: cpu(usage), } - c.Assert(entries.Append(entry, threads...), Equals, appended) + re.Equal(appended, entries.Append(entry, threads...)) } - c.Assert(entries.cpu.Get(), Equals, float64(usage)) + re.Equal(float64(usage), entries.cpu.Get()) } checkAppend(true, 20) @@ -59,10 +56,11 @@ func (s *testClusterStatSuite) TestCPUEntriesAppend(c *C) { checkAppend(false, 0, "cup") } -func (s *testClusterStatSuite) TestCPUEntriesCPU(c *C) { +func TestCPUEntriesCPU(t *testing.T) { + re := require.New(t) N := 10 entries := NewCPUEntries(N) - c.Assert(entries, NotNil) + re.NotNil(entries) usages := cpu(20) for i := 0; i < N; i++ { @@ -71,13 +69,14 @@ func (s *testClusterStatSuite) TestCPUEntriesCPU(c *C) { } entries.Append(entry) } - c.Assert(entries.CPU(), Equals, float64(20)) + re.Equal(float64(20), entries.CPU()) } -func (s *testClusterStatSuite) TestStatEntriesAppend(c *C) { +func TestStatEntriesAppend(t *testing.T) { + re := require.New(t) N := 10 cst := NewStatEntries(N) - c.Assert(cst, NotNil) + re.NotNil(cst) ThreadsCollected = []string{"cpu:"} // fill 2*N entries, 2 entries for each store @@ -86,19 +85,20 @@ func (s *testClusterStatSuite) TestStatEntriesAppend(c *C) { StoreId: uint64(i % N), CpuUsages: cpu(20), } - c.Assert(cst.Append(entry), IsTrue) + re.True(cst.Append(entry)) } // use i as the store ID for i := 0; i < N; i++ { - c.Assert(cst.stats[uint64(i)].CPU(), Equals, float64(20)) + re.Equal(float64(20), cst.stats[uint64(i)].CPU()) } } -func (s *testClusterStatSuite) TestStatEntriesCPU(c *C) { +func TestStatEntriesCPU(t *testing.T) { + re := require.New(t) N := 10 cst := NewStatEntries(N) - c.Assert(cst, NotNil) + re.NotNil(cst) // the average cpu usage is 20% usages := cpu(20) @@ -110,14 +110,15 @@ func (s *testClusterStatSuite) TestStatEntriesCPU(c *C) { StoreId: uint64(i % N), CpuUsages: usages, } - c.Assert(cst.Append(entry), IsTrue) + re.True(cst.Append(entry)) } - c.Assert(cst.total, Equals, int64(2*N)) + re.Equal(int64(2*N), cst.total) // the cpu usage of the whole cluster is 20% - c.Assert(cst.CPU(), Equals, float64(20)) + re.Equal(float64(20), cst.CPU()) } -func (s *testClusterStatSuite) TestStatEntriesCPUStale(c *C) { +func TestStatEntriesCPUStale(t *testing.T) { + re := require.New(t) N := 10 cst := NewStatEntries(N) // make all entries stale immediately @@ -132,13 +133,14 @@ func (s *testClusterStatSuite) TestStatEntriesCPUStale(c *C) { } cst.Append(entry) } - c.Assert(cst.CPU(), Equals, float64(0)) + re.Equal(float64(0), cst.CPU()) } -func (s *testClusterStatSuite) TestStatEntriesState(c *C) { +func TestStatEntriesState(t *testing.T) { + re := require.New(t) Load := func(usage int64) *State { cst := NewStatEntries(10) - c.Assert(cst, NotNil) + re.NotNil(cst) usages := cpu(usage) ThreadsCollected = []string{"cpu:"} @@ -152,8 +154,8 @@ func (s *testClusterStatSuite) TestStatEntriesState(c *C) { } return &State{cst} } - c.Assert(Load(0).State(), Equals, LoadStateIdle) - c.Assert(Load(5).State(), Equals, LoadStateLow) - c.Assert(Load(10).State(), Equals, LoadStateNormal) - c.Assert(Load(30).State(), Equals, LoadStateHigh) + re.Equal(LoadStateIdle, Load(0).State()) + re.Equal(LoadStateLow, Load(5).State()) + re.Equal(LoadStateNormal, Load(10).State()) + re.Equal(LoadStateHigh, Load(30).State()) } diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index 530abff2b87..1d2120744e1 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -19,16 +19,15 @@ import ( "fmt" "math" "math/rand" - "strings" "sync" "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/errs" "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/pkg/progress" @@ -44,29 +43,14 @@ import ( "github.com/tikv/pd/server/versioninfo" ) -func Test(t *testing.T) { - TestingT(t) -} - -var _ = Suite(&testClusterInfoSuite{}) - -type testClusterInfoSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testClusterInfoSuite) TearDownTest(c *C) { - s.cancel() -} +func TestStoreHeartbeat(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() -func (s *testClusterInfoSuite) SetUpTest(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testClusterInfoSuite) TestStoreHeartbeat(c *C) { _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) n, np := uint64(3), uint64(3) stores := newTestStores(n, "2.0.0") @@ -74,9 +58,9 @@ func (s *testClusterInfoSuite) TestStoreHeartbeat(c *C) { regions := newTestRegions(n, n, np) for _, region := range regions { - c.Assert(cluster.putRegion(region), IsNil) + re.NoError(cluster.putRegion(region)) } - c.Assert(cluster.core.Regions.GetRegionCount(), Equals, int(n)) + re.Equal(int(n), cluster.core.Regions.GetRegionCount()) for i, store := range stores { storeStats := &pdpb.StoreStats{ @@ -85,30 +69,30 @@ func (s *testClusterInfoSuite) TestStoreHeartbeat(c *C) { Available: 50, RegionCount: 1, } - c.Assert(cluster.HandleStoreHeartbeat(storeStats), NotNil) + re.Error(cluster.HandleStoreHeartbeat(storeStats)) - c.Assert(cluster.putStoreLocked(store), IsNil) - c.Assert(cluster.GetStoreCount(), Equals, i+1) + re.NoError(cluster.putStoreLocked(store)) + re.Equal(i+1, cluster.GetStoreCount()) - c.Assert(store.GetLastHeartbeatTS().UnixNano(), Equals, int64(0)) + re.Equal(int64(0), store.GetLastHeartbeatTS().UnixNano()) - c.Assert(cluster.HandleStoreHeartbeat(storeStats), IsNil) + re.NoError(cluster.HandleStoreHeartbeat(storeStats)) s := cluster.GetStore(store.GetID()) - c.Assert(s.GetLastHeartbeatTS().UnixNano(), Not(Equals), int64(0)) - c.Assert(s.GetStoreStats(), DeepEquals, storeStats) + re.NotEqual(int64(0), s.GetLastHeartbeatTS().UnixNano()) + re.Equal(storeStats, s.GetStoreStats()) storeMetasAfterHeartbeat = append(storeMetasAfterHeartbeat, s.GetMeta()) } - c.Assert(cluster.GetStoreCount(), Equals, int(n)) + re.Equal(int(n), cluster.GetStoreCount()) for i, store := range stores { tmp := &metapb.Store{} ok, err := cluster.storage.LoadStore(store.GetID(), tmp) - c.Assert(ok, IsTrue) - c.Assert(err, IsNil) - c.Assert(tmp, DeepEquals, storeMetasAfterHeartbeat[i]) + re.True(ok) + re.NoError(err) + re.Equal(storeMetasAfterHeartbeat[i], tmp) } hotHeartBeat := &pdpb.StoreStats{ StoreId: 1, @@ -137,56 +121,60 @@ func (s *testClusterInfoSuite) TestStoreHeartbeat(c *C) { }, PeerStats: []*pdpb.PeerStat{}, } - c.Assert(cluster.HandleStoreHeartbeat(hotHeartBeat), IsNil) - c.Assert(cluster.HandleStoreHeartbeat(hotHeartBeat), IsNil) - c.Assert(cluster.HandleStoreHeartbeat(hotHeartBeat), IsNil) + re.NoError(cluster.HandleStoreHeartbeat(hotHeartBeat)) + re.NoError(cluster.HandleStoreHeartbeat(hotHeartBeat)) + re.NoError(cluster.HandleStoreHeartbeat(hotHeartBeat)) time.Sleep(20 * time.Millisecond) storeStats := cluster.hotStat.RegionStats(statistics.Read, 3) - c.Assert(storeStats[1], HasLen, 1) - c.Assert(storeStats[1][0].RegionID, Equals, uint64(1)) + re.Len(storeStats[1], 1) + re.Equal(uint64(1), storeStats[1][0].RegionID) interval := float64(hotHeartBeat.Interval.EndTimestamp - hotHeartBeat.Interval.StartTimestamp) - c.Assert(storeStats[1][0].Loads, HasLen, int(statistics.RegionStatCount)) - c.Assert(storeStats[1][0].Loads[statistics.RegionReadBytes], Equals, float64(hotHeartBeat.PeerStats[0].ReadBytes)/interval) - c.Assert(storeStats[1][0].Loads[statistics.RegionReadKeys], Equals, float64(hotHeartBeat.PeerStats[0].ReadKeys)/interval) - c.Assert(storeStats[1][0].Loads[statistics.RegionReadQuery], Equals, float64(hotHeartBeat.PeerStats[0].QueryStats.Get)/interval) + re.Len(storeStats[1][0].Loads, int(statistics.RegionStatCount)) + re.Equal(float64(hotHeartBeat.PeerStats[0].ReadBytes)/interval, storeStats[1][0].Loads[statistics.RegionReadBytes]) + re.Equal(float64(hotHeartBeat.PeerStats[0].ReadKeys)/interval, storeStats[1][0].Loads[statistics.RegionReadKeys]) + re.Equal(float64(hotHeartBeat.PeerStats[0].QueryStats.Get)/interval, storeStats[1][0].Loads[statistics.RegionReadQuery]) // After cold heartbeat, we won't find region 1 peer in regionStats - c.Assert(cluster.HandleStoreHeartbeat(coldHeartBeat), IsNil) + re.NoError(cluster.HandleStoreHeartbeat(coldHeartBeat)) time.Sleep(20 * time.Millisecond) storeStats = cluster.hotStat.RegionStats(statistics.Read, 1) - c.Assert(storeStats[1], HasLen, 0) + re.Len(storeStats[1], 0) // After hot heartbeat, we can find region 1 peer again - c.Assert(cluster.HandleStoreHeartbeat(hotHeartBeat), IsNil) + re.NoError(cluster.HandleStoreHeartbeat(hotHeartBeat)) time.Sleep(20 * time.Millisecond) storeStats = cluster.hotStat.RegionStats(statistics.Read, 3) - c.Assert(storeStats[1], HasLen, 1) - c.Assert(storeStats[1][0].RegionID, Equals, uint64(1)) + re.Len(storeStats[1], 1) + re.Equal(uint64(1), storeStats[1][0].RegionID) // after several cold heartbeats, and one hot heartbeat, we also can't find region 1 peer - c.Assert(cluster.HandleStoreHeartbeat(coldHeartBeat), IsNil) - c.Assert(cluster.HandleStoreHeartbeat(coldHeartBeat), IsNil) - c.Assert(cluster.HandleStoreHeartbeat(coldHeartBeat), IsNil) + re.NoError(cluster.HandleStoreHeartbeat(coldHeartBeat)) + re.NoError(cluster.HandleStoreHeartbeat(coldHeartBeat)) + re.NoError(cluster.HandleStoreHeartbeat(coldHeartBeat)) time.Sleep(20 * time.Millisecond) storeStats = cluster.hotStat.RegionStats(statistics.Read, 0) - c.Assert(storeStats[1], HasLen, 0) - c.Assert(cluster.HandleStoreHeartbeat(hotHeartBeat), IsNil) + re.Len(storeStats[1], 0) + re.Nil(cluster.HandleStoreHeartbeat(hotHeartBeat)) time.Sleep(20 * time.Millisecond) storeStats = cluster.hotStat.RegionStats(statistics.Read, 1) - c.Assert(storeStats[1], HasLen, 1) - c.Assert(storeStats[1][0].RegionID, Equals, uint64(1)) + re.Len(storeStats[1], 1) + re.Equal(uint64(1), storeStats[1][0].RegionID) storeStats = cluster.hotStat.RegionStats(statistics.Read, 3) - c.Assert(storeStats[1], HasLen, 0) + re.Len(storeStats[1], 0) // after 2 hot heartbeats, wo can find region 1 peer again - c.Assert(cluster.HandleStoreHeartbeat(hotHeartBeat), IsNil) - c.Assert(cluster.HandleStoreHeartbeat(hotHeartBeat), IsNil) + re.NoError(cluster.HandleStoreHeartbeat(hotHeartBeat)) + re.NoError(cluster.HandleStoreHeartbeat(hotHeartBeat)) time.Sleep(20 * time.Millisecond) storeStats = cluster.hotStat.RegionStats(statistics.Read, 3) - c.Assert(storeStats[1], HasLen, 1) - c.Assert(storeStats[1][0].RegionID, Equals, uint64(1)) + re.Len(storeStats[1], 1) + re.Equal(uint64(1), storeStats[1][0].RegionID) } -func (s *testClusterInfoSuite) TestFilterUnhealthyStore(c *C) { +func TestFilterUnhealthyStore(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) stores := newTestStores(3, "2.0.0") for _, store := range stores { @@ -196,9 +184,9 @@ func (s *testClusterInfoSuite) TestFilterUnhealthyStore(c *C) { Available: 50, RegionCount: 1, } - c.Assert(cluster.putStoreLocked(store), IsNil) - c.Assert(cluster.HandleStoreHeartbeat(storeStats), IsNil) - c.Assert(cluster.hotStat.GetRollingStoreStats(store.GetID()), NotNil) + re.NoError(cluster.putStoreLocked(store)) + re.NoError(cluster.HandleStoreHeartbeat(storeStats)) + re.NotNil(cluster.hotStat.GetRollingStoreStats(store.GetID())) } for _, store := range stores { @@ -209,17 +197,21 @@ func (s *testClusterInfoSuite) TestFilterUnhealthyStore(c *C) { RegionCount: 1, } newStore := store.Clone(core.TombstoneStore()) - c.Assert(cluster.putStoreLocked(newStore), IsNil) - c.Assert(cluster.HandleStoreHeartbeat(storeStats), IsNil) - c.Assert(cluster.hotStat.GetRollingStoreStats(store.GetID()), IsNil) + re.NoError(cluster.putStoreLocked(newStore)) + re.NoError(cluster.HandleStoreHeartbeat(storeStats)) + re.Nil(cluster.hotStat.GetRollingStoreStats(store.GetID())) } } -func (s *testClusterInfoSuite) TestSetOfflineStore(c *C) { +func TestSetOfflineStore(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts()) if opt.IsPlacementRulesEnabled() { err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels()) @@ -230,65 +222,69 @@ func (s *testClusterInfoSuite) TestSetOfflineStore(c *C) { // Put 6 stores. for _, store := range newTestStores(6, "2.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } // store 1: up -> offline - c.Assert(cluster.RemoveStore(1, false), IsNil) + re.NoError(cluster.RemoveStore(1, false)) store := cluster.GetStore(1) - c.Assert(store.IsRemoving(), IsTrue) - c.Assert(store.IsPhysicallyDestroyed(), IsFalse) + re.True(store.IsRemoving()) + re.False(store.IsPhysicallyDestroyed()) // store 1: set physically to true success - c.Assert(cluster.RemoveStore(1, true), IsNil) + re.NoError(cluster.RemoveStore(1, true)) store = cluster.GetStore(1) - c.Assert(store.IsRemoving(), IsTrue) - c.Assert(store.IsPhysicallyDestroyed(), IsTrue) + re.True(store.IsRemoving()) + re.True(store.IsPhysicallyDestroyed()) // store 2:up -> offline & physically destroyed - c.Assert(cluster.RemoveStore(2, true), IsNil) + re.NoError(cluster.RemoveStore(2, true)) // store 2: set physically destroyed to false failed - c.Assert(cluster.RemoveStore(2, false), NotNil) - c.Assert(cluster.RemoveStore(2, true), IsNil) + re.Error(cluster.RemoveStore(2, false)) + re.NoError(cluster.RemoveStore(2, true)) // store 3: up to offline - c.Assert(cluster.RemoveStore(3, false), IsNil) - c.Assert(cluster.RemoveStore(3, false), IsNil) + re.NoError(cluster.RemoveStore(3, false)) + re.NoError(cluster.RemoveStore(3, false)) cluster.checkStores() // store 1,2,3 should be to tombstone for storeID := uint64(1); storeID <= 3; storeID++ { - c.Assert(cluster.GetStore(storeID).IsRemoved(), IsTrue) + re.True(cluster.GetStore(storeID).IsRemoved()) } // test bury store for storeID := uint64(0); storeID <= 4; storeID++ { store := cluster.GetStore(storeID) if store == nil || store.IsUp() { - c.Assert(cluster.BuryStore(storeID, false), NotNil) + re.Error(cluster.BuryStore(storeID, false)) } else { - c.Assert(cluster.BuryStore(storeID, false), IsNil) + re.NoError(cluster.BuryStore(storeID, false)) } } } -func (s *testClusterInfoSuite) TestSetOfflineWithReplica(c *C) { +func TestSetOfflineWithReplica(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) // Put 4 stores. for _, store := range newTestStores(4, "2.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } - c.Assert(cluster.RemoveStore(2, false), IsNil) + re.NoError(cluster.RemoveStore(2, false)) // should be failed since no enough store to accommodate the extra replica. err = cluster.RemoveStore(3, false) - c.Assert(strings.Contains(err.Error(), string(errs.ErrStoresNotEnough.RFCCode())), IsTrue) - c.Assert(cluster.RemoveStore(3, false), NotNil) + re.Contains(err.Error(), string(errs.ErrStoresNotEnough.RFCCode())) + re.Error(cluster.RemoveStore(3, false)) // should be success since physically-destroyed is true. - c.Assert(cluster.RemoveStore(3, true), IsNil) + re.NoError(cluster.RemoveStore(3, true)) } func addEvictLeaderScheduler(cluster *RaftCluster, storeID uint64) (evictScheduler schedule.Scheduler, err error) { @@ -305,62 +301,74 @@ func addEvictLeaderScheduler(cluster *RaftCluster, storeID uint64) (evictSchedul return } -func (s *testClusterInfoSuite) TestSetOfflineStoreWithEvictLeader(c *C) { +func TestSetOfflineStoreWithEvictLeader(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) + re.NoError(err) opt.SetMaxReplicas(1) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) // Put 3 stores. for _, store := range newTestStores(3, "2.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } _, err = addEvictLeaderScheduler(cluster, 1) - c.Assert(err, IsNil) - c.Assert(cluster.RemoveStore(2, false), IsNil) + re.NoError(err) + re.NoError(cluster.RemoveStore(2, false)) // should be failed since there is only 1 store left and it is the evict-leader store. err = cluster.RemoveStore(3, false) - c.Assert(err, NotNil) - c.Assert(strings.Contains(err.Error(), string(errs.ErrNoStoreForRegionLeader.RFCCode())), IsTrue) - c.Assert(cluster.RemoveScheduler(schedulers.EvictLeaderName), IsNil) - c.Assert(cluster.RemoveStore(3, false), IsNil) + re.Error(err) + re.Contains(err.Error(), string(errs.ErrNoStoreForRegionLeader.RFCCode())) + re.NoError(cluster.RemoveScheduler(schedulers.EvictLeaderName)) + re.NoError(cluster.RemoveStore(3, false)) } -func (s *testClusterInfoSuite) TestForceBuryStore(c *C) { +func TestForceBuryStore(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) // Put 2 stores. stores := newTestStores(2, "5.3.0") stores[1] = stores[1].Clone(core.SetLastHeartbeatTS(time.Now())) for _, store := range stores { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } - c.Assert(cluster.BuryStore(uint64(1), true), IsNil) - c.Assert(cluster.BuryStore(uint64(2), true), NotNil) - c.Assert(errors.ErrorEqual(cluster.BuryStore(uint64(3), true), errs.ErrStoreNotFound.FastGenByArgs(uint64(3))), IsTrue) + re.NoError(cluster.BuryStore(uint64(1), true)) + re.Error(cluster.BuryStore(uint64(2), true)) + re.True(errors.ErrorEqual(cluster.BuryStore(uint64(3), true), errs.ErrStoreNotFound.FastGenByArgs(uint64(3)))) } -func (s *testClusterInfoSuite) TestReuseAddress(c *C) { +func TestReuseAddress(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) // Put 4 stores. for _, store := range newTestStores(4, "2.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } // store 1: up // store 2: offline - c.Assert(cluster.RemoveStore(2, false), IsNil) + re.NoError(cluster.RemoveStore(2, false)) // store 3: offline and physically destroyed - c.Assert(cluster.RemoveStore(3, true), IsNil) + re.NoError(cluster.RemoveStore(3, true)) // store 4: tombstone - c.Assert(cluster.RemoveStore(4, true), IsNil) - c.Assert(cluster.BuryStore(4, false), IsNil) + re.NoError(cluster.RemoveStore(4, true)) + re.NoError(cluster.BuryStore(4, false)) for id := uint64(1); id <= 4; id++ { storeInfo := cluster.GetStore(id) @@ -375,9 +383,9 @@ func (s *testClusterInfoSuite) TestReuseAddress(c *C) { if storeInfo.IsPhysicallyDestroyed() || storeInfo.IsRemoved() { // try to start a new store with the same address with store which is physically destryed or tombstone should be success - c.Assert(cluster.PutStore(newStore), IsNil) + re.NoError(cluster.PutStore(newStore)) } else { - c.Assert(cluster.PutStore(newStore), NotNil) + re.Error(cluster.PutStore(newStore)) } } } @@ -386,11 +394,15 @@ func getTestDeployPath(storeID uint64) string { return fmt.Sprintf("test/store%d", storeID) } -func (s *testClusterInfoSuite) TestUpStore(c *C) { +func TestUpStore(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts()) if opt.IsPlacementRulesEnabled() { err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels()) @@ -401,43 +413,47 @@ func (s *testClusterInfoSuite) TestUpStore(c *C) { // Put 5 stores. for _, store := range newTestStores(5, "5.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } // set store 1 offline - c.Assert(cluster.RemoveStore(1, false), IsNil) + re.NoError(cluster.RemoveStore(1, false)) // up a offline store should be success. - c.Assert(cluster.UpStore(1), IsNil) + re.NoError(cluster.UpStore(1)) // set store 2 offline and physically destroyed - c.Assert(cluster.RemoveStore(2, true), IsNil) - c.Assert(cluster.UpStore(2), NotNil) + re.NoError(cluster.RemoveStore(2, true)) + re.Error(cluster.UpStore(2)) // bury store 2 cluster.checkStores() // store is tombstone err = cluster.UpStore(2) - c.Assert(errors.ErrorEqual(err, errs.ErrStoreRemoved.FastGenByArgs(2)), IsTrue) + re.True(errors.ErrorEqual(err, errs.ErrStoreRemoved.FastGenByArgs(2))) // store 3 is up - c.Assert(cluster.UpStore(3), IsNil) + re.NoError(cluster.UpStore(3)) // store 4 not exist err = cluster.UpStore(10) - c.Assert(errors.ErrorEqual(err, errs.ErrStoreNotFound.FastGenByArgs(4)), IsTrue) + re.True(errors.ErrorEqual(err, errs.ErrStoreNotFound.FastGenByArgs(4))) } -func (s *testClusterInfoSuite) TestRemovingProcess(c *C) { +func TestRemovingProcess(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) cluster.SetPrepared() // Put 5 stores. stores := newTestStores(5, "5.0.0") for _, store := range stores { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } regions := newTestRegions(100, 5, 1) var regionInStore1 []*core.RegionInfo @@ -446,19 +462,19 @@ func (s *testClusterInfoSuite) TestRemovingProcess(c *C) { region = region.Clone(core.SetApproximateSize(100)) regionInStore1 = append(regionInStore1, region) } - c.Assert(cluster.putRegion(region), IsNil) + re.NoError(cluster.putRegion(region)) } - c.Assert(len(regionInStore1), Equals, 20) + re.Len(regionInStore1, 20) cluster.progressManager = progress.NewManager() cluster.RemoveStore(1, false) cluster.checkStores() process := "removing-1" // no region moving p, l, cs, err := cluster.progressManager.Status(process) - c.Assert(err, IsNil) - c.Assert(p, Equals, 0.0) - c.Assert(l, Equals, math.MaxFloat64) - c.Assert(cs, Equals, 0.0) + re.NoError(err) + re.Equal(0.0, p) + re.Equal(math.MaxFloat64, l) + re.Equal(0.0, cs) i := 0 // simulate region moving by deleting region from store 1 for _, region := range regionInStore1 { @@ -470,22 +486,26 @@ func (s *testClusterInfoSuite) TestRemovingProcess(c *C) { } cluster.checkStores() p, l, cs, err = cluster.progressManager.Status(process) - c.Assert(err, IsNil) + re.NoError(err) // In above we delete 5 region from store 1, the total count of region in store 1 is 20. // process = 5 / 20 = 0.25 - c.Assert(p, Equals, 0.25) + re.Equal(0.25, p) // Each region is 100MB, we use more than 1s to move 5 region. // speed = 5 * 100MB / 20s = 25MB/s - c.Assert(cs, Equals, 25.0) + re.Equal(25.0, cs) // left second = 15 * 100MB / 25s = 60s - c.Assert(l, Equals, 60.0) + re.Equal(60.0, l) } -func (s *testClusterInfoSuite) TestDeleteStoreUpdatesClusterVersion(c *C) { +func TestDeleteStoreUpdatesClusterVersion(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts()) if opt.IsPlacementRulesEnabled() { err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels()) @@ -496,48 +516,56 @@ func (s *testClusterInfoSuite) TestDeleteStoreUpdatesClusterVersion(c *C) { // Put 3 new 4.0.9 stores. for _, store := range newTestStores(3, "4.0.9") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } - c.Assert(cluster.GetClusterVersion(), Equals, "4.0.9") + re.Equal("4.0.9", cluster.GetClusterVersion()) // Upgrade 2 stores to 5.0.0. for _, store := range newTestStores(2, "5.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } - c.Assert(cluster.GetClusterVersion(), Equals, "4.0.9") + re.Equal("4.0.9", cluster.GetClusterVersion()) // Bury the other store. - c.Assert(cluster.RemoveStore(3, true), IsNil) + re.NoError(cluster.RemoveStore(3, true)) cluster.checkStores() - c.Assert(cluster.GetClusterVersion(), Equals, "5.0.0") + re.Equal("5.0.0", cluster.GetClusterVersion()) } -func (s *testClusterInfoSuite) TestStoreClusterVersion(c *C) { +func TestStoreClusterVersion(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) stores := newTestStores(3, "5.0.0") s1, s2, s3 := stores[0].GetMeta(), stores[1].GetMeta(), stores[2].GetMeta() s1.Version = "5.0.1" s2.Version = "5.0.3" s3.Version = "5.0.5" - c.Assert(cluster.PutStore(s2), IsNil) - c.Assert(cluster.GetClusterVersion(), Equals, s2.Version) + re.NoError(cluster.PutStore(s2)) + re.Equal(s2.Version, cluster.GetClusterVersion()) - c.Assert(cluster.PutStore(s1), IsNil) + re.NoError(cluster.PutStore(s1)) // the cluster version should be 5.0.1(the min one) - c.Assert(cluster.GetClusterVersion(), Equals, s1.Version) + re.Equal(s1.Version, cluster.GetClusterVersion()) - c.Assert(cluster.PutStore(s3), IsNil) + re.NoError(cluster.PutStore(s3)) // the cluster version should be 5.0.1(the min one) - c.Assert(cluster.GetClusterVersion(), Equals, s1.Version) + re.Equal(s1.Version, cluster.GetClusterVersion()) } -func (s *testClusterInfoSuite) TestRegionHeartbeatHotStat(c *C) { +func TestRegionHeartbeatHotStat(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) newTestStores(4, "2.0.0") peers := []*metapb.Peer{ { @@ -568,34 +596,38 @@ func (s *testClusterInfoSuite) TestRegionHeartbeatHotStat(c *C) { core.SetWrittenBytes(30000*10), core.SetWrittenKeys(300000*10)) err = cluster.processRegionHeartbeat(region) - c.Assert(err, IsNil) + re.NoError(err) // wait HotStat to update items time.Sleep(1 * time.Second) stats := cluster.hotStat.RegionStats(statistics.Write, 0) - c.Assert(stats[1], HasLen, 1) - c.Assert(stats[2], HasLen, 1) - c.Assert(stats[3], HasLen, 1) + re.Len(stats[1], 1) + re.Len(stats[2], 1) + re.Len(stats[3], 1) newPeer := &metapb.Peer{ Id: 4, StoreId: 4, } region = region.Clone(core.WithRemoveStorePeer(2), core.WithAddPeer(newPeer)) err = cluster.processRegionHeartbeat(region) - c.Assert(err, IsNil) + re.NoError(err) // wait HotStat to update items time.Sleep(1 * time.Second) stats = cluster.hotStat.RegionStats(statistics.Write, 0) - c.Assert(stats[1], HasLen, 1) - c.Assert(stats[2], HasLen, 0) - c.Assert(stats[3], HasLen, 1) - c.Assert(stats[4], HasLen, 1) + re.Len(stats[1], 1) + re.Len(stats[2], 0) + re.Len(stats[3], 1) + re.Len(stats[4], 1) } -func (s *testClusterInfoSuite) TestBucketHeartbeat(c *C) { +func TestBucketHeartbeat(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) // case1: region is not exist buckets := &metapb.Buckets{ @@ -603,54 +635,58 @@ func (s *testClusterInfoSuite) TestBucketHeartbeat(c *C) { Version: 1, Keys: [][]byte{{'1'}, {'2'}}, } - c.Assert(cluster.processReportBuckets(buckets), NotNil) + re.Error(cluster.processReportBuckets(buckets)) // case2: bucket can be processed after the region update. stores := newTestStores(3, "2.0.0") n, np := uint64(2), uint64(2) regions := newTestRegions(n, n, np) for _, store := range stores { - c.Assert(cluster.putStoreLocked(store), IsNil) + re.NoError(cluster.putStoreLocked(store)) } - c.Assert(cluster.processRegionHeartbeat(regions[0]), IsNil) - c.Assert(cluster.processRegionHeartbeat(regions[1]), IsNil) - c.Assert(cluster.GetRegion(uint64(1)).GetBuckets(), IsNil) - c.Assert(cluster.processReportBuckets(buckets), IsNil) - c.Assert(cluster.GetRegion(uint64(1)).GetBuckets(), DeepEquals, buckets) + re.NoError(cluster.processRegionHeartbeat(regions[0])) + re.NoError(cluster.processRegionHeartbeat(regions[1])) + re.Nil(cluster.GetRegion(uint64(1)).GetBuckets()) + re.NoError(cluster.processReportBuckets(buckets)) + re.Equal(buckets, cluster.GetRegion(uint64(1)).GetBuckets()) // case3: the bucket version is same. - c.Assert(cluster.processReportBuckets(buckets), IsNil) + re.NoError(cluster.processReportBuckets(buckets)) // case4: the bucket version is changed. newBuckets := &metapb.Buckets{ RegionId: 1, Version: 3, Keys: [][]byte{{'1'}, {'2'}}, } - c.Assert(cluster.processReportBuckets(newBuckets), IsNil) - c.Assert(cluster.GetRegion(uint64(1)).GetBuckets(), DeepEquals, newBuckets) + re.NoError(cluster.processReportBuckets(newBuckets)) + re.Equal(newBuckets, cluster.GetRegion(uint64(1)).GetBuckets()) // case5: region update should inherit buckets. newRegion := regions[1].Clone(core.WithIncConfVer(), core.SetBuckets(nil)) cluster.storeConfigManager = config.NewTestStoreConfigManager(nil) config := cluster.storeConfigManager.GetStoreConfig() config.Coprocessor.EnableRegionBucket = true - c.Assert(cluster.processRegionHeartbeat(newRegion), IsNil) - c.Assert(cluster.GetRegion(uint64(1)).GetBuckets().GetKeys(), HasLen, 2) + re.NoError(cluster.processRegionHeartbeat(newRegion)) + re.Len(cluster.GetRegion(uint64(1)).GetBuckets().GetKeys(), 2) // case6: disable region bucket in config.Coprocessor.EnableRegionBucket = false newRegion2 := regions[1].Clone(core.WithIncConfVer(), core.SetBuckets(nil)) - c.Assert(cluster.processRegionHeartbeat(newRegion2), IsNil) - c.Assert(cluster.GetRegion(uint64(1)).GetBuckets(), IsNil) - c.Assert(cluster.GetRegion(uint64(1)).GetBuckets().GetKeys(), HasLen, 0) + re.NoError(cluster.processRegionHeartbeat(newRegion2)) + re.Nil(cluster.GetRegion(uint64(1)).GetBuckets()) + re.Len(cluster.GetRegion(uint64(1)).GetBuckets().GetKeys(), 0) } -func (s *testClusterInfoSuite) TestRegionHeartbeat(c *C) { +func TestRegionHeartbeat(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) n, np := uint64(3), uint64(3) @@ -658,32 +694,32 @@ func (s *testClusterInfoSuite) TestRegionHeartbeat(c *C) { regions := newTestRegions(n, n, np) for _, store := range stores { - c.Assert(cluster.putStoreLocked(store), IsNil) + re.NoError(cluster.putStoreLocked(store)) } for i, region := range regions { // region does not exist. - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) - checkRegionsKV(c, cluster.storage, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) + checkRegionsKV(re, cluster.storage, regions[:i+1]) // region is the same, not updated. - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) - checkRegionsKV(c, cluster.storage, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) + checkRegionsKV(re, cluster.storage, regions[:i+1]) origin := region // region is updated. region = origin.Clone(core.WithIncVersion()) regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) - checkRegionsKV(c, cluster.storage, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) + checkRegionsKV(re, cluster.storage, regions[:i+1]) // region is stale (Version). stale := origin.Clone(core.WithIncConfVer()) - c.Assert(cluster.processRegionHeartbeat(stale), NotNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) - checkRegionsKV(c, cluster.storage, regions[:i+1]) + re.Error(cluster.processRegionHeartbeat(stale)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) + checkRegionsKV(re, cluster.storage, regions[:i+1]) // region is updated. region = origin.Clone( @@ -691,15 +727,15 @@ func (s *testClusterInfoSuite) TestRegionHeartbeat(c *C) { core.WithIncConfVer(), ) regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) - checkRegionsKV(c, cluster.storage, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) + checkRegionsKV(re, cluster.storage, regions[:i+1]) // region is stale (ConfVer). stale = origin.Clone(core.WithIncConfVer()) - c.Assert(cluster.processRegionHeartbeat(stale), NotNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) - checkRegionsKV(c, cluster.storage, regions[:i+1]) + re.Error(cluster.processRegionHeartbeat(stale)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) + checkRegionsKV(re, cluster.storage, regions[:i+1]) // Add a down peer. region = region.Clone(core.WithDownPeers([]*pdpb.PeerStats{ @@ -709,70 +745,70 @@ func (s *testClusterInfoSuite) TestRegionHeartbeat(c *C) { }, })) regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) // Add a pending peer. region = region.Clone(core.WithPendingPeers([]*metapb.Peer{region.GetPeers()[rand.Intn(len(region.GetPeers()))]})) regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) // Clear down peers. region = region.Clone(core.WithDownPeers(nil)) regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) // Clear pending peers. region = region.Clone(core.WithPendingPeers(nil)) regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) // Remove peers. origin = region region = origin.Clone(core.SetPeers(region.GetPeers()[:1])) regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) - checkRegionsKV(c, cluster.storage, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) + checkRegionsKV(re, cluster.storage, regions[:i+1]) // Add peers. region = origin regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) - checkRegionsKV(c, cluster.storage, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) + checkRegionsKV(re, cluster.storage, regions[:i+1]) // Change leader. region = region.Clone(core.WithLeader(region.GetPeers()[1])) regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) // Change ApproximateSize. region = region.Clone(core.SetApproximateSize(144)) regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) // Change ApproximateKeys. region = region.Clone(core.SetApproximateKeys(144000)) regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) // Change bytes written. region = region.Clone(core.SetWrittenBytes(24000)) regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) // Change bytes read. region = region.Clone(core.SetReadBytes(1080000)) regions[i] = region - c.Assert(cluster.processRegionHeartbeat(region), IsNil) - checkRegions(c, cluster.core.Regions, regions[:i+1]) + re.NoError(cluster.processRegionHeartbeat(region)) + checkRegions(re, cluster.core.Regions, regions[:i+1]) } regionCounts := make(map[uint64]int) @@ -782,31 +818,31 @@ func (s *testClusterInfoSuite) TestRegionHeartbeat(c *C) { } } for id, count := range regionCounts { - c.Assert(cluster.GetStoreRegionCount(id), Equals, count) + re.Equal(count, cluster.GetStoreRegionCount(id)) } for _, region := range cluster.GetRegions() { - checkRegion(c, region, regions[region.GetID()]) + checkRegion(re, region, regions[region.GetID()]) } for _, region := range cluster.GetMetaRegions() { - c.Assert(region, DeepEquals, regions[region.GetId()].GetMeta()) + re.Equal(regions[region.GetId()].GetMeta(), region) } for _, region := range regions { for _, store := range cluster.GetRegionStores(region) { - c.Assert(region.GetStorePeer(store.GetID()), NotNil) + re.NotNil(region.GetStorePeer(store.GetID())) } for _, store := range cluster.GetFollowerStores(region) { peer := region.GetStorePeer(store.GetID()) - c.Assert(peer.GetId(), Not(Equals), region.GetLeader().GetId()) + re.NotEqual(region.GetLeader().GetId(), peer.GetId()) } } for _, store := range cluster.core.Stores.GetStores() { - c.Assert(store.GetLeaderCount(), Equals, cluster.core.Regions.GetStoreLeaderCount(store.GetID())) - c.Assert(store.GetRegionCount(), Equals, cluster.core.Regions.GetStoreRegionCount(store.GetID())) - c.Assert(store.GetLeaderSize(), Equals, cluster.core.Regions.GetStoreLeaderRegionSize(store.GetID())) - c.Assert(store.GetRegionSize(), Equals, cluster.core.Regions.GetStoreRegionSize(store.GetID())) + re.Equal(cluster.core.Regions.GetStoreLeaderCount(store.GetID()), store.GetLeaderCount()) + re.Equal(cluster.core.Regions.GetStoreRegionCount(store.GetID()), store.GetRegionCount()) + re.Equal(cluster.core.Regions.GetStoreLeaderRegionSize(store.GetID()), store.GetLeaderSize()) + re.Equal(cluster.core.Regions.GetStoreRegionSize(store.GetID()), store.GetRegionSize()) } // Test with storage. @@ -814,9 +850,9 @@ func (s *testClusterInfoSuite) TestRegionHeartbeat(c *C) { for _, region := range regions { tmp := &metapb.Region{} ok, err := storage.LoadRegion(region.GetID(), tmp) - c.Assert(ok, IsTrue) - c.Assert(err, IsNil) - c.Assert(tmp, DeepEquals, region.GetMeta()) + re.True(ok) + re.NoError(err) + re.Equal(region.GetMeta(), tmp) } // Check overlap with stale version @@ -826,45 +862,49 @@ func (s *testClusterInfoSuite) TestRegionHeartbeat(c *C) { core.WithNewRegionID(10000), core.WithDecVersion(), ) - c.Assert(cluster.processRegionHeartbeat(overlapRegion), NotNil) + re.Error(cluster.processRegionHeartbeat(overlapRegion)) region := &metapb.Region{} ok, err := storage.LoadRegion(regions[n-1].GetID(), region) - c.Assert(ok, IsTrue) - c.Assert(err, IsNil) - c.Assert(region, DeepEquals, regions[n-1].GetMeta()) + re.True(ok) + re.NoError(err) + re.Equal(regions[n-1].GetMeta(), region) ok, err = storage.LoadRegion(regions[n-2].GetID(), region) - c.Assert(ok, IsTrue) - c.Assert(err, IsNil) - c.Assert(region, DeepEquals, regions[n-2].GetMeta()) + re.True(ok) + re.NoError(err) + re.Equal(regions[n-2].GetMeta(), region) ok, err = storage.LoadRegion(overlapRegion.GetID(), region) - c.Assert(ok, IsFalse) - c.Assert(err, IsNil) + re.False(ok) + re.NoError(err) // Check overlap overlapRegion = regions[n-1].Clone( core.WithStartKey(regions[n-2].GetStartKey()), core.WithNewRegionID(regions[n-1].GetID()+1), ) - c.Assert(cluster.processRegionHeartbeat(overlapRegion), IsNil) + re.NoError(cluster.processRegionHeartbeat(overlapRegion)) region = &metapb.Region{} ok, err = storage.LoadRegion(regions[n-1].GetID(), region) - c.Assert(ok, IsFalse) - c.Assert(err, IsNil) + re.False(ok) + re.NoError(err) ok, err = storage.LoadRegion(regions[n-2].GetID(), region) - c.Assert(ok, IsFalse) - c.Assert(err, IsNil) + re.False(ok) + re.NoError(err) ok, err = storage.LoadRegion(overlapRegion.GetID(), region) - c.Assert(ok, IsTrue) - c.Assert(err, IsNil) - c.Assert(region, DeepEquals, overlapRegion.GetMeta()) + re.True(ok) + re.NoError(err) + re.Equal(overlapRegion.GetMeta(), region) } } -func (s *testClusterInfoSuite) TestRegionFlowChanged(c *C) { +func TestRegionFlowChanged(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) regions := []*core.RegionInfo{core.NewTestRegionInfo([]byte{}, []byte{})} processRegions := func(regions []*core.RegionInfo) { for _, r := range regions { @@ -878,14 +918,18 @@ func (s *testClusterInfoSuite) TestRegionFlowChanged(c *C) { regions[0] = region.Clone(core.SetReadBytes(1000)) processRegions(regions) newRegion := cluster.GetRegion(region.GetID()) - c.Assert(newRegion.GetBytesRead(), Equals, uint64(1000)) + re.Equal(uint64(1000), newRegion.GetBytesRead()) } -func (s *testClusterInfoSuite) TestRegionSizeChanged(c *C) { +func TestRegionSizeChanged(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) cluster.regionStats = statistics.NewRegionStatistics(cluster.GetOpts(), cluster.ruleManager, cluster.storeConfigManager) region := newTestRegions(1, 3, 3)[0] cluster.opt.GetMaxMergeRegionKeys() @@ -899,7 +943,7 @@ func (s *testClusterInfoSuite) TestRegionSizeChanged(c *C) { ) cluster.processRegionHeartbeat(region) regionID := region.GetID() - c.Assert(cluster.regionStats.IsRegionStatsType(regionID, statistics.UndersizedRegion), IsTrue) + re.True(cluster.regionStats.IsRegionStatsType(regionID, statistics.UndersizedRegion)) // Test ApproximateSize and ApproximateKeys change. region = region.Clone( core.WithLeader(region.GetPeers()[2]), @@ -908,53 +952,61 @@ func (s *testClusterInfoSuite) TestRegionSizeChanged(c *C) { core.SetFromHeartbeat(true), ) cluster.processRegionHeartbeat(region) - c.Assert(cluster.regionStats.IsRegionStatsType(regionID, statistics.UndersizedRegion), IsFalse) + re.False(cluster.regionStats.IsRegionStatsType(regionID, statistics.UndersizedRegion)) // Test MaxMergeRegionSize and MaxMergeRegionKeys change. cluster.opt.SetMaxMergeRegionSize((uint64(curMaxMergeSize + 2))) cluster.opt.SetMaxMergeRegionKeys((uint64(curMaxMergeKeys + 2))) cluster.processRegionHeartbeat(region) - c.Assert(cluster.regionStats.IsRegionStatsType(regionID, statistics.UndersizedRegion), IsTrue) + re.True(cluster.regionStats.IsRegionStatsType(regionID, statistics.UndersizedRegion)) cluster.opt.SetMaxMergeRegionSize((uint64(curMaxMergeSize))) cluster.opt.SetMaxMergeRegionKeys((uint64(curMaxMergeKeys))) cluster.processRegionHeartbeat(region) - c.Assert(cluster.regionStats.IsRegionStatsType(regionID, statistics.UndersizedRegion), IsFalse) + re.False(cluster.regionStats.IsRegionStatsType(regionID, statistics.UndersizedRegion)) } -func (s *testClusterInfoSuite) TestConcurrentReportBucket(c *C) { +func TestConcurrentReportBucket(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) regions := []*core.RegionInfo{core.NewTestRegionInfo([]byte{}, []byte{})} - heartbeatRegions(c, cluster, regions) - c.Assert(cluster.GetRegion(0), NotNil) + heartbeatRegions(re, cluster, regions) + re.NotNil(cluster.GetRegion(0)) bucket1 := &metapb.Buckets{RegionId: 0, Version: 3} bucket2 := &metapb.Buckets{RegionId: 0, Version: 2} var wg sync.WaitGroup wg.Add(1) - c.Assert(failpoint.Enable("github.com/tikv/pd/server/cluster/concurrentBucketHeartbeat", "return(true)"), IsNil) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/concurrentBucketHeartbeat", "return(true)")) go func() { defer wg.Done() cluster.processReportBuckets(bucket1) }() time.Sleep(100 * time.Millisecond) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/cluster/concurrentBucketHeartbeat"), IsNil) - c.Assert(cluster.processReportBuckets(bucket2), IsNil) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/concurrentBucketHeartbeat")) + re.NoError(cluster.processReportBuckets(bucket2)) wg.Wait() - c.Assert(cluster.GetRegion(0).GetBuckets(), DeepEquals, bucket1) + re.Equal(bucket1, cluster.GetRegion(0).GetBuckets()) } -func (s *testClusterInfoSuite) TestConcurrentRegionHeartbeat(c *C) { +func TestConcurrentRegionHeartbeat(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) regions := []*core.RegionInfo{core.NewTestRegionInfo([]byte{}, []byte{})} regions = core.SplitRegions(regions) - heartbeatRegions(c, cluster, regions) + heartbeatRegions(re, cluster, regions) // Merge regions manually source, target := regions[0], regions[1] @@ -968,25 +1020,29 @@ func (s *testClusterInfoSuite) TestConcurrentRegionHeartbeat(c *C) { var wg sync.WaitGroup wg.Add(1) - c.Assert(failpoint.Enable("github.com/tikv/pd/server/cluster/concurrentRegionHeartbeat", "return(true)"), IsNil) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/concurrentRegionHeartbeat", "return(true)")) go func() { defer wg.Done() cluster.processRegionHeartbeat(source) }() time.Sleep(100 * time.Millisecond) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/cluster/concurrentRegionHeartbeat"), IsNil) - c.Assert(cluster.processRegionHeartbeat(target), IsNil) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/concurrentRegionHeartbeat")) + re.NoError(cluster.processRegionHeartbeat(target)) wg.Wait() - checkRegion(c, cluster.GetRegionByKey([]byte{}), target) + checkRegion(re, cluster.GetRegionByKey([]byte{}), target) } -func (s *testClusterInfoSuite) TestRegionLabelIsolationLevel(c *C) { +func TestRegionLabelIsolationLevel(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() cfg := opt.GetReplicationConfig() cfg.LocationLabels = []string{"zone"} opt.SetReplicationConfig(cfg) - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) for i := uint64(1); i <= 4; i++ { var labels []*metapb.StoreLabel @@ -1001,7 +1057,7 @@ func (s *testClusterInfoSuite) TestRegionLabelIsolationLevel(c *C) { State: metapb.StoreState_Up, Labels: labels, } - c.Assert(cluster.putStoreLocked(core.NewStoreInfo(store)), IsNil) + re.NoError(cluster.putStoreLocked(core.NewStoreInfo(store))) } peers := make([]*metapb.Peer, 0, 4) @@ -1022,52 +1078,56 @@ func (s *testClusterInfoSuite) TestRegionLabelIsolationLevel(c *C) { EndKey: []byte{byte(2)}, } r := core.NewRegionInfo(region, peers[0]) - c.Assert(cluster.putRegion(r), IsNil) + re.NoError(cluster.putRegion(r)) cluster.updateRegionsLabelLevelStats([]*core.RegionInfo{r}) counter := cluster.labelLevelStats.GetLabelCounter() - c.Assert(counter["none"], Equals, 0) - c.Assert(counter["zone"], Equals, 1) + re.Equal(0, counter["none"]) + re.Equal(1, counter["zone"]) } -func heartbeatRegions(c *C, cluster *RaftCluster, regions []*core.RegionInfo) { +func heartbeatRegions(re *require.Assertions, cluster *RaftCluster, regions []*core.RegionInfo) { // Heartbeat and check region one by one. for _, r := range regions { - c.Assert(cluster.processRegionHeartbeat(r), IsNil) + re.NoError(cluster.processRegionHeartbeat(r)) - checkRegion(c, cluster.GetRegion(r.GetID()), r) - checkRegion(c, cluster.GetRegionByKey(r.GetStartKey()), r) + checkRegion(re, cluster.GetRegion(r.GetID()), r) + checkRegion(re, cluster.GetRegionByKey(r.GetStartKey()), r) if len(r.GetEndKey()) > 0 { end := r.GetEndKey()[0] - checkRegion(c, cluster.GetRegionByKey([]byte{end - 1}), r) + checkRegion(re, cluster.GetRegionByKey([]byte{end - 1}), r) } } // Check all regions after handling all heartbeats. for _, r := range regions { - checkRegion(c, cluster.GetRegion(r.GetID()), r) - checkRegion(c, cluster.GetRegionByKey(r.GetStartKey()), r) + checkRegion(re, cluster.GetRegion(r.GetID()), r) + checkRegion(re, cluster.GetRegionByKey(r.GetStartKey()), r) if len(r.GetEndKey()) > 0 { end := r.GetEndKey()[0] - checkRegion(c, cluster.GetRegionByKey([]byte{end - 1}), r) + checkRegion(re, cluster.GetRegionByKey([]byte{end - 1}), r) result := cluster.GetRegionByKey([]byte{end + 1}) - c.Assert(result.GetID(), Not(Equals), r.GetID()) + re.NotEqual(r.GetID(), result.GetID()) } } } -func (s *testClusterInfoSuite) TestHeartbeatSplit(c *C) { +func TestHeartbeatSplit(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) // 1: [nil, nil) region1 := core.NewRegionInfo(&metapb.Region{Id: 1, RegionEpoch: &metapb.RegionEpoch{Version: 1, ConfVer: 1}}, nil) - c.Assert(cluster.processRegionHeartbeat(region1), IsNil) - checkRegion(c, cluster.GetRegionByKey([]byte("foo")), region1) + re.NoError(cluster.processRegionHeartbeat(region1)) + checkRegion(re, cluster.GetRegionByKey([]byte("foo")), region1) // split 1 to 2: [nil, m) 1: [m, nil), sync 2 first. region1 = region1.Clone( @@ -1075,13 +1135,13 @@ func (s *testClusterInfoSuite) TestHeartbeatSplit(c *C) { core.WithIncVersion(), ) region2 := core.NewRegionInfo(&metapb.Region{Id: 2, EndKey: []byte("m"), RegionEpoch: &metapb.RegionEpoch{Version: 1, ConfVer: 1}}, nil) - c.Assert(cluster.processRegionHeartbeat(region2), IsNil) - checkRegion(c, cluster.GetRegionByKey([]byte("a")), region2) + re.NoError(cluster.processRegionHeartbeat(region2)) + checkRegion(re, cluster.GetRegionByKey([]byte("a")), region2) // [m, nil) is missing before r1's heartbeat. - c.Assert(cluster.GetRegionByKey([]byte("z")), IsNil) + re.Nil(cluster.GetRegionByKey([]byte("z"))) - c.Assert(cluster.processRegionHeartbeat(region1), IsNil) - checkRegion(c, cluster.GetRegionByKey([]byte("z")), region1) + re.NoError(cluster.processRegionHeartbeat(region1)) + checkRegion(re, cluster.GetRegionByKey([]byte("z")), region1) // split 1 to 3: [m, q) 1: [q, nil), sync 1 first. region1 = region1.Clone( @@ -1089,20 +1149,24 @@ func (s *testClusterInfoSuite) TestHeartbeatSplit(c *C) { core.WithIncVersion(), ) region3 := core.NewRegionInfo(&metapb.Region{Id: 3, StartKey: []byte("m"), EndKey: []byte("q"), RegionEpoch: &metapb.RegionEpoch{Version: 1, ConfVer: 1}}, nil) - c.Assert(cluster.processRegionHeartbeat(region1), IsNil) - checkRegion(c, cluster.GetRegionByKey([]byte("z")), region1) - checkRegion(c, cluster.GetRegionByKey([]byte("a")), region2) + re.NoError(cluster.processRegionHeartbeat(region1)) + checkRegion(re, cluster.GetRegionByKey([]byte("z")), region1) + checkRegion(re, cluster.GetRegionByKey([]byte("a")), region2) // [m, q) is missing before r3's heartbeat. - c.Assert(cluster.GetRegionByKey([]byte("n")), IsNil) - c.Assert(cluster.processRegionHeartbeat(region3), IsNil) - checkRegion(c, cluster.GetRegionByKey([]byte("n")), region3) + re.Nil(cluster.GetRegionByKey([]byte("n"))) + re.NoError(cluster.processRegionHeartbeat(region3)) + checkRegion(re, cluster.GetRegionByKey([]byte("n")), region3) } -func (s *testClusterInfoSuite) TestRegionSplitAndMerge(c *C) { +func TestRegionSplitAndMerge(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) regions := []*core.RegionInfo{core.NewTestRegionInfo([]byte{}, []byte{})} @@ -1112,13 +1176,13 @@ func (s *testClusterInfoSuite) TestRegionSplitAndMerge(c *C) { // Split. for i := 0; i < n; i++ { regions = core.SplitRegions(regions) - heartbeatRegions(c, cluster, regions) + heartbeatRegions(re, cluster, regions) } // Merge. for i := 0; i < n; i++ { regions = core.MergeRegions(regions) - heartbeatRegions(c, cluster, regions) + heartbeatRegions(re, cluster, regions) } // Split twice and merge once. @@ -1128,15 +1192,19 @@ func (s *testClusterInfoSuite) TestRegionSplitAndMerge(c *C) { } else { regions = core.SplitRegions(regions) } - heartbeatRegions(c, cluster, regions) + heartbeatRegions(re, cluster, regions) } } -func (s *testClusterInfoSuite) TestOfflineAndMerge(c *C) { +func TestOfflineAndMerge(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) cluster.ruleManager = placement.NewRuleManager(storage.NewStorageWithMemoryBackend(), cluster, cluster.GetOpts()) if opt.IsPlacementRulesEnabled() { err := cluster.ruleManager.Initialize(opt.GetMaxReplicas(), opt.GetLocationLabels()) @@ -1145,11 +1213,11 @@ func (s *testClusterInfoSuite) TestOfflineAndMerge(c *C) { } } cluster.regionStats = statistics.NewRegionStatistics(cluster.GetOpts(), cluster.ruleManager, cluster.storeConfigManager) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + cluster.coordinator = newCoordinator(ctx, cluster, nil) // Put 4 stores. for _, store := range newTestStores(4, "5.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } peers := []*metapb.Peer{ @@ -1174,35 +1242,39 @@ func (s *testClusterInfoSuite) TestOfflineAndMerge(c *C) { regions := []*core.RegionInfo{origin} // store 1: up -> offline - c.Assert(cluster.RemoveStore(1, false), IsNil) + re.NoError(cluster.RemoveStore(1, false)) store := cluster.GetStore(1) - c.Assert(store.IsRemoving(), IsTrue) + re.True(store.IsRemoving()) // Split. n := 7 for i := 0; i < n; i++ { regions = core.SplitRegions(regions) } - heartbeatRegions(c, cluster, regions) - c.Assert(cluster.GetOfflineRegionStatsByType(statistics.OfflinePeer), HasLen, len(regions)) + heartbeatRegions(re, cluster, regions) + re.Len(cluster.GetOfflineRegionStatsByType(statistics.OfflinePeer), len(regions)) // Merge. for i := 0; i < n; i++ { regions = core.MergeRegions(regions) - heartbeatRegions(c, cluster, regions) - c.Assert(cluster.GetOfflineRegionStatsByType(statistics.OfflinePeer), HasLen, len(regions)) + heartbeatRegions(re, cluster, regions) + re.Len(cluster.GetOfflineRegionStatsByType(statistics.OfflinePeer), len(regions)) } } -func (s *testClusterInfoSuite) TestSyncConfig(c *C) { +func TestSyncConfig(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - tc := newTestCluster(s.ctx, opt) + re.NoError(err) + tc := newTestCluster(ctx, opt) stores := newTestStores(5, "2.0.0") for _, s := range stores { - c.Assert(tc.putStoreLocked(s), IsNil) + re.NoError(tc.putStoreLocked(s)) } - c.Assert(tc.getUpStores(), HasLen, 5) + re.Len(tc.getUpStores(), 5) testdata := []struct { whiteList []string @@ -1220,20 +1292,24 @@ func (s *testClusterInfoSuite) TestSyncConfig(c *C) { for _, v := range testdata { tc.storeConfigManager = config.NewTestStoreConfigManager(v.whiteList) - c.Assert(tc.GetStoreConfig().GetRegionMaxSize(), Equals, uint64(144)) - c.Assert(syncConfig(tc.storeConfigManager, tc.GetStores()), Equals, v.updated) - c.Assert(tc.GetStoreConfig().GetRegionMaxSize(), Equals, v.maxRegionSize) + re.Equal(uint64(144), tc.GetStoreConfig().GetRegionMaxSize()) + re.Equal(v.updated, syncConfig(tc.storeConfigManager, tc.GetStores())) + re.Equal(v.maxRegionSize, tc.GetStoreConfig().GetRegionMaxSize()) } } -func (s *testClusterInfoSuite) TestUpdateStorePendingPeerCount(c *C) { +func TestUpdateStorePendingPeerCount(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - tc := newTestCluster(s.ctx, opt) - tc.RaftCluster.coordinator = newCoordinator(s.ctx, tc.RaftCluster, nil) + re.NoError(err) + tc := newTestCluster(ctx, opt) + tc.RaftCluster.coordinator = newCoordinator(ctx, tc.RaftCluster, nil) stores := newTestStores(5, "2.0.0") for _, s := range stores { - c.Assert(tc.putStoreLocked(s), IsNil) + re.NoError(tc.putStoreLocked(s)) } peers := []*metapb.Peer{ { @@ -1254,14 +1330,16 @@ func (s *testClusterInfoSuite) TestUpdateStorePendingPeerCount(c *C) { }, } origin := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: peers[:3]}, peers[0], core.WithPendingPeers(peers[1:3])) - c.Assert(tc.processRegionHeartbeat(origin), IsNil) - checkPendingPeerCount([]int{0, 1, 1, 0}, tc.RaftCluster, c) + re.NoError(tc.processRegionHeartbeat(origin)) + checkPendingPeerCount([]int{0, 1, 1, 0}, tc.RaftCluster, re) newRegion := core.NewRegionInfo(&metapb.Region{Id: 1, Peers: peers[1:]}, peers[1], core.WithPendingPeers(peers[3:4])) - c.Assert(tc.processRegionHeartbeat(newRegion), IsNil) - checkPendingPeerCount([]int{0, 0, 0, 1}, tc.RaftCluster, c) + re.NoError(tc.processRegionHeartbeat(newRegion)) + checkPendingPeerCount([]int{0, 0, 0, 1}, tc.RaftCluster, re) } -func (s *testClusterInfoSuite) TestTopologyWeight(c *C) { +func TestTopologyWeight(t *testing.T) { + re := require.New(t) + labels := []string{"zone", "rack", "host"} zones := []string{"z1", "z2", "z3"} racks := []string{"r1", "r2", "r3"} @@ -1287,17 +1365,21 @@ func (s *testClusterInfoSuite) TestTopologyWeight(c *C) { } } - c.Assert(getStoreTopoWeight(testStore, stores, labels), Equals, 1.0/3/3/4) + re.Equal(1.0/3/3/4, getStoreTopoWeight(testStore, stores, labels)) } -func (s *testClusterInfoSuite) TestCalculateStoreSize1(c *C) { +func TestCalculateStoreSize1(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) + re.NoError(err) cfg := opt.GetReplicationConfig() cfg.EnablePlacementRules = true opt.SetReplicationConfig(cfg) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) cluster.regionStats = statistics.NewRegionStatistics(cluster.GetOpts(), cluster.ruleManager, cluster.storeConfigManager) // Put 10 stores. @@ -1324,7 +1406,7 @@ func (s *testClusterInfoSuite) TestCalculateStoreSize1(c *C) { }, }...) s := store.Clone(core.SetStoreLabels(labels)) - c.Assert(cluster.PutStore(s.GetMeta()), IsNil) + re.NoError(cluster.PutStore(s.GetMeta())) } cluster.ruleManager.SetRule( @@ -1354,29 +1436,33 @@ func (s *testClusterInfoSuite) TestCalculateStoreSize1(c *C) { regions := newTestRegions(100, 10, 5) for _, region := range regions { - c.Assert(cluster.putRegion(region), IsNil) + re.NoError(cluster.putRegion(region)) } stores := cluster.GetStores() store := cluster.GetStore(1) // 100 * 100 * 2 (placement rule) / 4 (host) * 0.9 = 4500 - c.Assert(cluster.getThreshold(stores, store), Equals, 4500.0) + re.Equal(4500.0, cluster.getThreshold(stores, store)) cluster.opt.SetPlacementRuleEnabled(false) cluster.opt.SetLocationLabels([]string{"zone", "rack", "host"}) // 30000 (total region size) / 3 (zone) / 4 (host) * 0.9 = 2250 - c.Assert(cluster.getThreshold(stores, store), Equals, 2250.0) + re.Equal(2250.0, cluster.getThreshold(stores, store)) } -func (s *testClusterInfoSuite) TestCalculateStoreSize2(c *C) { +func TestCalculateStoreSize2(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) + re.NoError(err) cfg := opt.GetReplicationConfig() cfg.EnablePlacementRules = true opt.SetReplicationConfig(cfg) opt.SetMaxReplicas(3) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, nil) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, nil) cluster.regionStats = statistics.NewRegionStatistics(cluster.GetOpts(), cluster.ruleManager, cluster.storeConfigManager) // Put 10 stores. @@ -1401,7 +1487,7 @@ func (s *testClusterInfoSuite) TestCalculateStoreSize2(c *C) { } labels = append(labels, []*metapb.StoreLabel{{Key: "rack", Value: "r1"}, {Key: "host", Value: "h1"}}...) s := store.Clone(core.SetStoreLabels(labels)) - c.Assert(cluster.PutStore(s.GetMeta()), IsNil) + re.NoError(cluster.PutStore(s.GetMeta())) } cluster.ruleManager.SetRule( @@ -1431,129 +1517,115 @@ func (s *testClusterInfoSuite) TestCalculateStoreSize2(c *C) { regions := newTestRegions(100, 10, 5) for _, region := range regions { - c.Assert(cluster.putRegion(region), IsNil) + re.NoError(cluster.putRegion(region)) } stores := cluster.GetStores() store := cluster.GetStore(1) // 100 * 100 * 4 (total region size) / 2 (dc) / 2 (logic) / 3 (host) * 0.9 = 3000 - c.Assert(cluster.getThreshold(stores, store), Equals, 3000.0) + re.Equal(3000.0, cluster.getThreshold(stores, store)) } -var _ = Suite(&testStoresInfoSuite{}) - -type testStoresInfoSuite struct{} - -func (s *testStoresInfoSuite) TestStores(c *C) { +func TestStores(t *testing.T) { + re := require.New(t) n := uint64(10) cache := core.NewStoresInfo() stores := newTestStores(n, "2.0.0") for i, store := range stores { id := store.GetID() - c.Assert(cache.GetStore(id), IsNil) - c.Assert(cache.PauseLeaderTransfer(id), NotNil) + re.Nil(cache.GetStore(id)) + re.Error(cache.PauseLeaderTransfer(id)) cache.SetStore(store) - c.Assert(cache.GetStore(id), DeepEquals, store) - c.Assert(cache.GetStoreCount(), Equals, i+1) - c.Assert(cache.PauseLeaderTransfer(id), IsNil) - c.Assert(cache.GetStore(id).AllowLeaderTransfer(), IsFalse) - c.Assert(cache.PauseLeaderTransfer(id), NotNil) + re.Equal(store, cache.GetStore(id)) + re.Equal(i+1, cache.GetStoreCount()) + re.NoError(cache.PauseLeaderTransfer(id)) + re.False(cache.GetStore(id).AllowLeaderTransfer()) + re.Error(cache.PauseLeaderTransfer(id)) cache.ResumeLeaderTransfer(id) - c.Assert(cache.GetStore(id).AllowLeaderTransfer(), IsTrue) + re.True(cache.GetStore(id).AllowLeaderTransfer()) } - c.Assert(cache.GetStoreCount(), Equals, int(n)) + re.Equal(int(n), cache.GetStoreCount()) for _, store := range cache.GetStores() { - c.Assert(store, DeepEquals, stores[store.GetID()-1]) + re.Equal(stores[store.GetID()-1], store) } for _, store := range cache.GetMetaStores() { - c.Assert(store, DeepEquals, stores[store.GetId()-1].GetMeta()) + re.Equal(stores[store.GetId()-1].GetMeta(), store) } - c.Assert(cache.GetStoreCount(), Equals, int(n)) + re.Equal(int(n), cache.GetStoreCount()) } -var _ = Suite(&testRegionsInfoSuite{}) - -type testRegionsInfoSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testRegionsInfoSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testRegionsInfoSuite) SetUpTest(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} +func Test(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() -func (s *testRegionsInfoSuite) Test(c *C) { n, np := uint64(10), uint64(3) regions := newTestRegions(n, n, np) _, opts, err := newTestScheduleConfig() - c.Assert(err, IsNil) - tc := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opts, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + re.NoError(err) + tc := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opts, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) cache := tc.core.Regions for i := uint64(0); i < n; i++ { region := regions[i] regionKey := []byte{byte(i)} - c.Assert(cache.GetRegion(i), IsNil) - c.Assert(cache.GetRegionByKey(regionKey), IsNil) - checkRegions(c, cache, regions[0:i]) + re.Nil(cache.GetRegion(i)) + re.Nil(cache.GetRegionByKey(regionKey)) + checkRegions(re, cache, regions[0:i]) cache.SetRegion(region) - checkRegion(c, cache.GetRegion(i), region) - checkRegion(c, cache.GetRegionByKey(regionKey), region) - checkRegions(c, cache, regions[0:(i+1)]) + checkRegion(re, cache.GetRegion(i), region) + checkRegion(re, cache.GetRegionByKey(regionKey), region) + checkRegions(re, cache, regions[0:(i+1)]) // previous region if i == 0 { - c.Assert(cache.GetPrevRegionByKey(regionKey), IsNil) + re.Nil(cache.GetPrevRegionByKey(regionKey)) } else { - checkRegion(c, cache.GetPrevRegionByKey(regionKey), regions[i-1]) + checkRegion(re, cache.GetPrevRegionByKey(regionKey), regions[i-1]) } // Update leader to peer np-1. newRegion := region.Clone(core.WithLeader(region.GetPeers()[np-1])) regions[i] = newRegion cache.SetRegion(newRegion) - checkRegion(c, cache.GetRegion(i), newRegion) - checkRegion(c, cache.GetRegionByKey(regionKey), newRegion) - checkRegions(c, cache, regions[0:(i+1)]) + checkRegion(re, cache.GetRegion(i), newRegion) + checkRegion(re, cache.GetRegionByKey(regionKey), newRegion) + checkRegions(re, cache, regions[0:(i+1)]) cache.RemoveRegion(region) - c.Assert(cache.GetRegion(i), IsNil) - c.Assert(cache.GetRegionByKey(regionKey), IsNil) - checkRegions(c, cache, regions[0:i]) + re.Nil(cache.GetRegion(i)) + re.Nil(cache.GetRegionByKey(regionKey)) + checkRegions(re, cache, regions[0:i]) // Reset leader to peer 0. newRegion = region.Clone(core.WithLeader(region.GetPeers()[0])) regions[i] = newRegion cache.SetRegion(newRegion) - checkRegion(c, cache.GetRegion(i), newRegion) - checkRegions(c, cache, regions[0:(i+1)]) - checkRegion(c, cache.GetRegionByKey(regionKey), newRegion) + checkRegion(re, cache.GetRegion(i), newRegion) + checkRegions(re, cache, regions[0:(i+1)]) + checkRegion(re, cache.GetRegionByKey(regionKey), newRegion) } for i := uint64(0); i < n; i++ { region := tc.RandLeaderRegion(i, []core.KeyRange{core.NewKeyRange("", "")}, schedule.IsRegionHealthy) - c.Assert(region.GetLeader().GetStoreId(), Equals, i) + re.Equal(i, region.GetLeader().GetStoreId()) region = tc.RandFollowerRegion(i, []core.KeyRange{core.NewKeyRange("", "")}, schedule.IsRegionHealthy) - c.Assert(region.GetLeader().GetStoreId(), Not(Equals), i) + re.NotEqual(i, region.GetLeader().GetStoreId()) - c.Assert(region.GetStorePeer(i), NotNil) + re.NotNil(region.GetStorePeer(i)) } // check overlaps // clone it otherwise there are two items with the same key in the tree overlapRegion := regions[n-1].Clone(core.WithStartKey(regions[n-2].GetStartKey())) cache.SetRegion(overlapRegion) - c.Assert(cache.GetRegion(n-2), IsNil) - c.Assert(cache.GetRegion(n-1), NotNil) + re.Nil(cache.GetRegion(n - 2)) + re.NotNil(cache.GetRegion(n - 1)) // All regions will be filtered out if they have pending peers. for i := uint64(0); i < n; i++ { @@ -1562,71 +1634,36 @@ func (s *testRegionsInfoSuite) Test(c *C) { newRegion := region.Clone(core.WithPendingPeers(region.GetPeers())) cache.SetRegion(newRegion) } - c.Assert(tc.RandLeaderRegion(i, []core.KeyRange{core.NewKeyRange("", "")}, schedule.IsRegionHealthy), IsNil) + re.Nil(tc.RandLeaderRegion(i, []core.KeyRange{core.NewKeyRange("", "")}, schedule.IsRegionHealthy)) } for i := uint64(0); i < n; i++ { - c.Assert(tc.RandFollowerRegion(i, []core.KeyRange{core.NewKeyRange("", "")}, schedule.IsRegionHealthy), IsNil) + re.Nil(tc.RandFollowerRegion(i, []core.KeyRange{core.NewKeyRange("", "")}, schedule.IsRegionHealthy)) } } -var _ = Suite(&testClusterUtilSuite{}) - -type testClusterUtilSuite struct{} +func TestCheckStaleRegion(t *testing.T) { + re := require.New(t) -func (s *testClusterUtilSuite) TestCheckStaleRegion(c *C) { // (0, 0) v.s. (0, 0) region := core.NewTestRegionInfo([]byte{}, []byte{}) origin := core.NewTestRegionInfo([]byte{}, []byte{}) - c.Assert(checkStaleRegion(region.GetMeta(), origin.GetMeta()), IsNil) - c.Assert(checkStaleRegion(origin.GetMeta(), region.GetMeta()), IsNil) + re.NoError(checkStaleRegion(region.GetMeta(), origin.GetMeta())) + re.NoError(checkStaleRegion(origin.GetMeta(), region.GetMeta())) // (1, 0) v.s. (0, 0) region.GetRegionEpoch().Version++ - c.Assert(checkStaleRegion(origin.GetMeta(), region.GetMeta()), IsNil) - c.Assert(checkStaleRegion(region.GetMeta(), origin.GetMeta()), NotNil) + re.NoError(checkStaleRegion(origin.GetMeta(), region.GetMeta())) + re.Error(checkStaleRegion(region.GetMeta(), origin.GetMeta())) // (1, 1) v.s. (0, 0) region.GetRegionEpoch().ConfVer++ - c.Assert(checkStaleRegion(origin.GetMeta(), region.GetMeta()), IsNil) - c.Assert(checkStaleRegion(region.GetMeta(), origin.GetMeta()), NotNil) + re.NoError(checkStaleRegion(origin.GetMeta(), region.GetMeta())) + re.Error(checkStaleRegion(region.GetMeta(), origin.GetMeta())) // (0, 1) v.s. (0, 0) region.GetRegionEpoch().Version-- - c.Assert(checkStaleRegion(origin.GetMeta(), region.GetMeta()), IsNil) - c.Assert(checkStaleRegion(region.GetMeta(), origin.GetMeta()), NotNil) -} - -var _ = Suite(&testGetStoresSuite{}) - -type testGetStoresSuite struct { - ctx context.Context - cancel context.CancelFunc - cluster *RaftCluster -} - -func (s *testGetStoresSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testGetStoresSuite) SetUpSuite(c *C) { - _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - s.ctx, s.cancel = context.WithCancel(context.Background()) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - s.cluster = cluster - - stores := newTestStores(200, "2.0.0") - - for _, store := range stores { - c.Assert(s.cluster.putStoreLocked(store), IsNil) - } -} - -func (s *testGetStoresSuite) BenchmarkGetStores(c *C) { - for i := 0; i < c.N; i++ { - // Logic to benchmark - s.cluster.core.Stores.GetStores() - } + re.NoError(checkStaleRegion(origin.GetMeta(), region.GetMeta())) + re.Error(checkStaleRegion(region.GetMeta(), origin.GetMeta())) } type testCluster struct { @@ -1723,32 +1760,32 @@ func newTestRegionMeta(regionID uint64) *metapb.Region { } } -func checkRegion(c *C, a *core.RegionInfo, b *core.RegionInfo) { - c.Assert(a, DeepEquals, b) - c.Assert(a.GetMeta(), DeepEquals, b.GetMeta()) - c.Assert(a.GetLeader(), DeepEquals, b.GetLeader()) - c.Assert(a.GetPeers(), DeepEquals, b.GetPeers()) +func checkRegion(re *require.Assertions, a *core.RegionInfo, b *core.RegionInfo) { + re.Equal(b, a) + re.Equal(b.GetMeta(), a.GetMeta()) + re.Equal(b.GetLeader(), a.GetLeader()) + re.Equal(b.GetPeers(), a.GetPeers()) if len(a.GetDownPeers()) > 0 || len(b.GetDownPeers()) > 0 { - c.Assert(a.GetDownPeers(), DeepEquals, b.GetDownPeers()) + re.Equal(b.GetDownPeers(), a.GetDownPeers()) } if len(a.GetPendingPeers()) > 0 || len(b.GetPendingPeers()) > 0 { - c.Assert(a.GetPendingPeers(), DeepEquals, b.GetPendingPeers()) + re.Equal(b.GetPendingPeers(), a.GetPendingPeers()) } } -func checkRegionsKV(c *C, s storage.Storage, regions []*core.RegionInfo) { +func checkRegionsKV(re *require.Assertions, s storage.Storage, regions []*core.RegionInfo) { if s != nil { for _, region := range regions { var meta metapb.Region ok, err := s.LoadRegion(region.GetID(), &meta) - c.Assert(ok, IsTrue) - c.Assert(err, IsNil) - c.Assert(&meta, DeepEquals, region.GetMeta()) + re.True(ok) + re.NoError(err) + re.Equal(region.GetMeta(), &meta) } } } -func checkRegions(c *C, cache *core.RegionsInfo, regions []*core.RegionInfo) { +func checkRegions(re *require.Assertions, cache *core.RegionsInfo, regions []*core.RegionInfo) { regionCount := make(map[uint64]int) leaderCount := make(map[uint64]int) followerCount := make(map[uint64]int) @@ -1757,37 +1794,37 @@ func checkRegions(c *C, cache *core.RegionsInfo, regions []*core.RegionInfo) { regionCount[peer.StoreId]++ if peer.Id == region.GetLeader().Id { leaderCount[peer.StoreId]++ - checkRegion(c, cache.GetLeader(peer.StoreId, region), region) + checkRegion(re, cache.GetLeader(peer.StoreId, region), region) } else { followerCount[peer.StoreId]++ - checkRegion(c, cache.GetFollower(peer.StoreId, region), region) + checkRegion(re, cache.GetFollower(peer.StoreId, region), region) } } } - c.Assert(cache.GetRegionCount(), Equals, len(regions)) + re.Equal(len(regions), cache.GetRegionCount()) for id, count := range regionCount { - c.Assert(cache.GetStoreRegionCount(id), Equals, count) + re.Equal(count, cache.GetStoreRegionCount(id)) } for id, count := range leaderCount { - c.Assert(cache.GetStoreLeaderCount(id), Equals, count) + re.Equal(count, cache.GetStoreLeaderCount(id)) } for id, count := range followerCount { - c.Assert(cache.GetStoreFollowerCount(id), Equals, count) + re.Equal(count, cache.GetStoreFollowerCount(id)) } for _, region := range cache.GetRegions() { - checkRegion(c, region, regions[region.GetID()]) + checkRegion(re, region, regions[region.GetID()]) } for _, region := range cache.GetMetaRegions() { - c.Assert(region, DeepEquals, regions[region.GetId()].GetMeta()) + re.Equal(regions[region.GetId()].GetMeta(), region) } } -func checkPendingPeerCount(expect []int, cluster *RaftCluster, c *C) { +func checkPendingPeerCount(expect []int, cluster *RaftCluster, re *require.Assertions) { for i, e := range expect { s := cluster.core.Stores.GetStore(uint64(i + 1)) - c.Assert(s.GetPendingPeerCount(), Equals, e) + re.Equal(e, s.GetPendingPeerCount()) } } diff --git a/server/cluster/cluster_worker_test.go b/server/cluster/cluster_worker_test.go index b545ccd2d2c..8c747c6c847 100644 --- a/server/cluster/cluster_worker_test.go +++ b/server/cluster/cluster_worker_test.go @@ -16,47 +16,41 @@ package cluster import ( "context" + "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/server/core" _ "github.com/tikv/pd/server/schedulers" "github.com/tikv/pd/server/storage" ) -var _ = Suite(&testClusterWorkerSuite{}) +func TestReportSplit(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() -type testClusterWorkerSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testClusterWorkerSuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testClusterWorkerSuite) SetUpTest(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testClusterWorkerSuite) TestReportSplit(c *C) { _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) left := &metapb.Region{Id: 1, StartKey: []byte("a"), EndKey: []byte("b")} right := &metapb.Region{Id: 2, StartKey: []byte("b"), EndKey: []byte("c")} _, err = cluster.HandleReportSplit(&pdpb.ReportSplitRequest{Left: left, Right: right}) - c.Assert(err, IsNil) + re.NoError(err) _, err = cluster.HandleReportSplit(&pdpb.ReportSplitRequest{Left: right, Right: left}) - c.Assert(err, NotNil) + re.Error(err) } -func (s *testClusterWorkerSuite) TestReportBatchSplit(c *C) { +func TestReportBatchSplit(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + re.NoError(err) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) regions := []*metapb.Region{ {Id: 1, StartKey: []byte(""), EndKey: []byte("a")}, {Id: 2, StartKey: []byte("a"), EndKey: []byte("b")}, @@ -64,5 +58,5 @@ func (s *testClusterWorkerSuite) TestReportBatchSplit(c *C) { {Id: 3, StartKey: []byte("c"), EndKey: []byte("")}, } _, err = cluster.HandleBatchReportSplit(&pdpb.ReportBatchSplitRequest{Regions: regions}) - c.Assert(err, IsNil) + re.NoError(err) } diff --git a/server/cluster/coordinator_test.go b/server/cluster/coordinator_test.go index b234374a765..a7d34ccb558 100644 --- a/server/cluster/coordinator_test.go +++ b/server/cluster/coordinator_test.go @@ -17,16 +17,16 @@ package cluster import ( "context" "encoding/json" + "github.com/pingcap/failpoint" "math/rand" "sync" "testing" "time" - . "github.com/pingcap/check" - "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/eraftpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockhbstream" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/pkg/typeutil" @@ -149,95 +149,83 @@ func (c *testCluster) LoadRegion(regionID uint64, followerStoreIDs ...uint64) er return c.putRegion(core.NewRegionInfo(region, nil)) } -var _ = Suite(&testCoordinatorSuite{}) - -type testCoordinatorSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testCoordinatorSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) - c.Assert(failpoint.Enable("github.com/tikv/pd/server/schedule/unexpectedOperator", "return(true)"), IsNil) -} - -func (s *testCoordinatorSuite) TearDownSuite(c *C) { - s.cancel() -} +func TestBasic(t *testing.T) { + re := require.New(t) -func (s *testCoordinatorSuite) TestBasic(c *C) { - tc, co, cleanup := prepare(nil, nil, nil, c) + tc, co, cleanup := prepare(nil, nil, nil, re) defer cleanup() oc := co.opController - c.Assert(tc.addLeaderRegion(1, 1), IsNil) + re.NoError(tc.addLeaderRegion(1, 1)) op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) oc.AddWaitingOperator(op1) - c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1)) - c.Assert(oc.GetOperator(1).RegionID(), Equals, op1.RegionID()) + re.Equal(uint64(1), oc.OperatorCount(operator.OpLeader)) + re.Equal(op1.RegionID(), oc.GetOperator(1).RegionID()) // Region 1 already has an operator, cannot add another one. op2 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion) oc.AddWaitingOperator(op2) - c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(0)) + re.Equal(uint64(0), oc.OperatorCount(operator.OpRegion)) // Remove the operator manually, then we can add a new operator. - c.Assert(oc.RemoveOperator(op1), IsTrue) + re.True(oc.RemoveOperator(op1)) op3 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion) oc.AddWaitingOperator(op3) - c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(1)) - c.Assert(oc.GetOperator(1).RegionID(), Equals, op3.RegionID()) + re.Equal(uint64(1), oc.OperatorCount(operator.OpRegion)) + re.Equal(op3.RegionID(), oc.GetOperator(1).RegionID()) } -func (s *testCoordinatorSuite) TestDispatch(c *C) { - tc, co, cleanup := prepare(nil, nil, nil, c) +func TestDispatch(t *testing.T) { + re := require.New(t) + + tc, co, cleanup := prepare(nil, nil, nil, re) defer cleanup() co.prepareChecker.prepared = true // Transfer peer from store 4 to store 1. - c.Assert(tc.addRegionStore(4, 40), IsNil) - c.Assert(tc.addRegionStore(3, 30), IsNil) - c.Assert(tc.addRegionStore(2, 20), IsNil) - c.Assert(tc.addRegionStore(1, 10), IsNil) - c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil) + re.NoError(tc.addRegionStore(4, 40)) + re.NoError(tc.addRegionStore(3, 30)) + re.NoError(tc.addRegionStore(2, 20)) + re.NoError(tc.addRegionStore(1, 10)) + re.NoError(tc.addLeaderRegion(1, 2, 3, 4)) // Transfer leader from store 4 to store 2. - c.Assert(tc.updateLeaderCount(4, 50), IsNil) - c.Assert(tc.updateLeaderCount(3, 50), IsNil) - c.Assert(tc.updateLeaderCount(2, 20), IsNil) - c.Assert(tc.updateLeaderCount(1, 10), IsNil) - c.Assert(tc.addLeaderRegion(2, 4, 3, 2), IsNil) + re.NoError(tc.updateLeaderCount(4, 50)) + re.NoError(tc.updateLeaderCount(3, 50)) + re.NoError(tc.updateLeaderCount(2, 20)) + re.NoError(tc.updateLeaderCount(1, 10)) + re.NoError(tc.addLeaderRegion(2, 4, 3, 2)) go co.runUntilStop() // Wait for schedule and turn off balance. - waitOperator(c, co, 1) - testutil.CheckTransferPeer(c, co.opController.GetOperator(1), operator.OpKind(0), 4, 1) - c.Assert(co.removeScheduler(schedulers.BalanceRegionName), IsNil) - waitOperator(c, co, 2) - testutil.CheckTransferLeader(c, co.opController.GetOperator(2), operator.OpKind(0), 4, 2) - c.Assert(co.removeScheduler(schedulers.BalanceLeaderName), IsNil) + waitOperator(re, co, 1) + testutil.CheckTransferPeerWithTestify(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1) + re.NoError(co.removeScheduler(schedulers.BalanceRegionName)) + waitOperator(re, co, 2) + testutil.CheckTransferLeaderWithTestify(re, co.opController.GetOperator(2), operator.OpKind(0), 4, 2) + re.NoError(co.removeScheduler(schedulers.BalanceLeaderName)) stream := mockhbstream.NewHeartbeatStream() // Transfer peer. region := tc.GetRegion(1).Clone() - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitAddLearner(c, stream, region, 1) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitPromoteLearner(c, stream, region, 1) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitRemovePeer(c, stream, region, 4) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - waitNoResponse(c, stream) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitAddLearner(re, stream, region, 1) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitPromoteLearner(re, stream, region, 1) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitRemovePeer(re, stream, region, 4) + re.NoError(dispatchHeartbeat(co, region, stream)) + re.NoError(dispatchHeartbeat(co, region, stream)) + waitNoResponse(re, stream) // Transfer leader. region = tc.GetRegion(2).Clone() - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - waitTransferLeader(c, stream, region, 2) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - waitNoResponse(c, stream) + re.NoError(dispatchHeartbeat(co, region, stream)) + waitTransferLeader(re, stream, region, 2) + re.NoError(dispatchHeartbeat(co, region, stream)) + waitNoResponse(re, stream) } func dispatchHeartbeat(co *coordinator, region *core.RegionInfo, stream hbstream.HeartbeatStream) error { @@ -249,10 +237,12 @@ func dispatchHeartbeat(co *coordinator, region *core.RegionInfo, stream hbstream return nil } -func (s *testCoordinatorSuite) TestCollectMetrics(c *C) { +func TestCollectMetrics(t *testing.T) { + re := require.New(t) + tc, co, cleanup := prepare(nil, func(tc *testCluster) { tc.regionStats = statistics.NewRegionStatistics(tc.GetOpts(), nil, tc.storeConfigManager) - }, func(co *coordinator) { co.run() }, c) + }, func(co *coordinator) { co.run() }, re) defer cleanup() // Make sure there are no problem when concurrent write and read @@ -263,7 +253,7 @@ func (s *testCoordinatorSuite) TestCollectMetrics(c *C) { go func(i int) { defer wg.Done() for j := 0; j < 1000; j++ { - c.Assert(tc.addRegionStore(uint64(i%5), rand.Intn(200)), IsNil) + re.NoError(tc.addRegionStore(uint64(i%5), rand.Intn(200))) } }(i) } @@ -278,10 +268,10 @@ func (s *testCoordinatorSuite) TestCollectMetrics(c *C) { wg.Wait() } -func prepare(setCfg func(*config.ScheduleConfig), setTc func(*testCluster), run func(*coordinator), c *C) (*testCluster, *coordinator, func()) { +func prepare(setCfg func(*config.ScheduleConfig), setTc func(*testCluster), run func(*coordinator), re *require.Assertions) (*testCluster, *coordinator, func()) { ctx, cancel := context.WithCancel(context.Background()) cfg, opt, err := newTestScheduleConfig() - c.Assert(err, IsNil) + re.NoError(err) if setCfg != nil { setCfg(cfg) } @@ -302,28 +292,32 @@ func prepare(setCfg func(*config.ScheduleConfig), setTc func(*testCluster), run } } -func (s *testCoordinatorSuite) checkRegion(c *C, tc *testCluster, co *coordinator, regionID uint64, expectAddOperator int) { +func checkRegionAndOperator(re *require.Assertions, tc *testCluster, co *coordinator, regionID uint64, expectAddOperator int) { ops := co.checkers.CheckRegion(tc.GetRegion(regionID)) if ops == nil { - c.Assert(expectAddOperator, Equals, 0) + re.Equal(0, expectAddOperator) } else { - c.Assert(co.opController.AddWaitingOperator(ops...), Equals, expectAddOperator) + re.Equal(expectAddOperator, co.opController.AddWaitingOperator(ops...)) } } -func (s *testCoordinatorSuite) TestCheckRegion(c *C) { - tc, co, cleanup := prepare(nil, nil, nil, c) +func TestCheckRegion(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tc, co, cleanup := prepare(nil, nil, nil, re) hbStreams, opt := co.hbStreams, tc.opt defer cleanup() - c.Assert(tc.addRegionStore(4, 4), IsNil) - c.Assert(tc.addRegionStore(3, 3), IsNil) - c.Assert(tc.addRegionStore(2, 2), IsNil) - c.Assert(tc.addRegionStore(1, 1), IsNil) - c.Assert(tc.addLeaderRegion(1, 2, 3), IsNil) - s.checkRegion(c, tc, co, 1, 1) - testutil.CheckAddPeer(c, co.opController.GetOperator(1), operator.OpReplica, 1) - s.checkRegion(c, tc, co, 1, 0) + re.NoError(tc.addRegionStore(4, 4)) + re.NoError(tc.addRegionStore(3, 3)) + re.NoError(tc.addRegionStore(2, 2)) + re.NoError(tc.addRegionStore(1, 1)) + re.NoError(tc.addLeaderRegion(1, 2, 3)) + checkRegionAndOperator(re, tc, co, 1, 1) + testutil.CheckAddPeerWithTestify(re, co.opController.GetOperator(1), operator.OpReplica, 1) + checkRegionAndOperator(re, tc, co, 1, 0) r := tc.GetRegion(1) p := &metapb.Peer{Id: 1, StoreId: 1, Role: metapb.PeerRole_Learner} @@ -331,39 +325,41 @@ func (s *testCoordinatorSuite) TestCheckRegion(c *C) { core.WithAddPeer(p), core.WithPendingPeers(append(r.GetPendingPeers(), p)), ) - c.Assert(tc.putRegion(r), IsNil) - s.checkRegion(c, tc, co, 1, 0) - - tc = newTestCluster(s.ctx, opt) - co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams) - - c.Assert(tc.addRegionStore(4, 4), IsNil) - c.Assert(tc.addRegionStore(3, 3), IsNil) - c.Assert(tc.addRegionStore(2, 2), IsNil) - c.Assert(tc.addRegionStore(1, 1), IsNil) - c.Assert(tc.putRegion(r), IsNil) - s.checkRegion(c, tc, co, 1, 0) + re.NoError(tc.putRegion(r)) + checkRegionAndOperator(re, tc, co, 1, 0) + + tc = newTestCluster(ctx, opt) + co = newCoordinator(ctx, tc.RaftCluster, hbStreams) + + re.NoError(tc.addRegionStore(4, 4)) + re.NoError(tc.addRegionStore(3, 3)) + re.NoError(tc.addRegionStore(2, 2)) + re.NoError(tc.addRegionStore(1, 1)) + re.NoError(tc.putRegion(r)) + checkRegionAndOperator(re, tc, co, 1, 0) r = r.Clone(core.WithPendingPeers(nil)) - c.Assert(tc.putRegion(r), IsNil) - s.checkRegion(c, tc, co, 1, 1) + re.NoError(tc.putRegion(r)) + checkRegionAndOperator(re, tc, co, 1, 1) op := co.opController.GetOperator(1) - c.Assert(op.Len(), Equals, 1) - c.Assert(op.Step(0).(operator.PromoteLearner).ToStore, Equals, uint64(1)) - s.checkRegion(c, tc, co, 1, 0) + re.Equal(1, op.Len()) + re.Equal(uint64(1), op.Step(0).(operator.PromoteLearner).ToStore) + checkRegionAndOperator(re, tc, co, 1, 0) } -func (s *testCoordinatorSuite) TestCheckRegionWithScheduleDeny(c *C) { - tc, co, cleanup := prepare(nil, nil, nil, c) +func TestCheckRegionWithScheduleDeny(t *testing.T) { + re := require.New(t) + + tc, co, cleanup := prepare(nil, nil, nil, re) defer cleanup() - c.Assert(tc.addRegionStore(4, 4), IsNil) - c.Assert(tc.addRegionStore(3, 3), IsNil) - c.Assert(tc.addRegionStore(2, 2), IsNil) - c.Assert(tc.addRegionStore(1, 1), IsNil) - c.Assert(tc.addLeaderRegion(1, 2, 3), IsNil) + re.NoError(tc.addRegionStore(4, 4)) + re.NoError(tc.addRegionStore(3, 3)) + re.NoError(tc.addRegionStore(2, 2)) + re.NoError(tc.addRegionStore(1, 1)) + re.NoError(tc.addLeaderRegion(1, 2, 3)) region := tc.GetRegion(1) - c.Assert(region, NotNil) + re.NotNil(region) // test with label schedule=deny labelerManager := tc.GetRegionLabeler() labelerManager.SetLabelRule(&labeler.LabelRule{ @@ -373,21 +369,23 @@ func (s *testCoordinatorSuite) TestCheckRegionWithScheduleDeny(c *C) { Data: []interface{}{map[string]interface{}{"start_key": "", "end_key": ""}}, }) - c.Assert(labelerManager.ScheduleDisabled(region), IsTrue) - s.checkRegion(c, tc, co, 1, 0) + re.True(labelerManager.ScheduleDisabled(region)) + checkRegionAndOperator(re, tc, co, 1, 0) labelerManager.DeleteLabelRule("schedulelabel") - c.Assert(labelerManager.ScheduleDisabled(region), IsFalse) - s.checkRegion(c, tc, co, 1, 1) + re.False(labelerManager.ScheduleDisabled(region)) + checkRegionAndOperator(re, tc, co, 1, 1) } -func (s *testCoordinatorSuite) TestCheckerIsBusy(c *C) { +func TestCheckerIsBusy(t *testing.T) { + re := require.New(t) + tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) { cfg.ReplicaScheduleLimit = 0 // ensure replica checker is busy cfg.MergeScheduleLimit = 10 - }, nil, nil, c) + }, nil, nil, re) defer cleanup() - c.Assert(tc.addRegionStore(1, 0), IsNil) + re.NoError(tc.addRegionStore(1, 0)) num := 1 + typeutil.MaxUint64(tc.opt.GetReplicaScheduleLimit(), tc.opt.GetMergeScheduleLimit()) var operatorKinds = []operator.OpKind{ operator.OpReplica, operator.OpRegion | operator.OpMerge, @@ -395,50 +393,52 @@ func (s *testCoordinatorSuite) TestCheckerIsBusy(c *C) { for i, operatorKind := range operatorKinds { for j := uint64(0); j < num; j++ { regionID := j + uint64(i+1)*num - c.Assert(tc.addLeaderRegion(regionID, 1), IsNil) + re.NoError(tc.addLeaderRegion(regionID, 1)) switch operatorKind { case operator.OpReplica: op := newTestOperator(regionID, tc.GetRegion(regionID).GetRegionEpoch(), operatorKind) - c.Assert(co.opController.AddWaitingOperator(op), Equals, 1) + re.Equal(1, co.opController.AddWaitingOperator(op)) case operator.OpRegion | operator.OpMerge: if regionID%2 == 1 { ops, err := operator.CreateMergeRegionOperator("merge-region", co.cluster, tc.GetRegion(regionID), tc.GetRegion(regionID-1), operator.OpMerge) - c.Assert(err, IsNil) - c.Assert(co.opController.AddWaitingOperator(ops...), Equals, len(ops)) + re.NoError(err) + re.Len(ops, co.opController.AddWaitingOperator(ops...)) } } } } - s.checkRegion(c, tc, co, num, 0) + checkRegionAndOperator(re, tc, co, num, 0) } -func (s *testCoordinatorSuite) TestReplica(c *C) { +func TestReplica(t *testing.T) { + re := require.New(t) + tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) { // Turn off balance. cfg.LeaderScheduleLimit = 0 cfg.RegionScheduleLimit = 0 - }, nil, func(co *coordinator) { co.run() }, c) + }, nil, func(co *coordinator) { co.run() }, re) defer cleanup() - c.Assert(tc.addRegionStore(1, 1), IsNil) - c.Assert(tc.addRegionStore(2, 2), IsNil) - c.Assert(tc.addRegionStore(3, 3), IsNil) - c.Assert(tc.addRegionStore(4, 4), IsNil) + re.NoError(tc.addRegionStore(1, 1)) + re.NoError(tc.addRegionStore(2, 2)) + re.NoError(tc.addRegionStore(3, 3)) + re.NoError(tc.addRegionStore(4, 4)) stream := mockhbstream.NewHeartbeatStream() // Add peer to store 1. - c.Assert(tc.addLeaderRegion(1, 2, 3), IsNil) + re.NoError(tc.addLeaderRegion(1, 2, 3)) region := tc.GetRegion(1) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitAddLearner(c, stream, region, 1) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitPromoteLearner(c, stream, region, 1) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - waitNoResponse(c, stream) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitAddLearner(re, stream, region, 1) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitPromoteLearner(re, stream, region, 1) + re.NoError(dispatchHeartbeat(co, region, stream)) + waitNoResponse(re, stream) // Peer in store 3 is down, remove peer in store 3 and add peer to store 4. - c.Assert(tc.setStoreDown(3), IsNil) + re.NoError(tc.setStoreDown(3)) downPeer := &pdpb.PeerStats{ Peer: region.GetStorePeer(3), DownSeconds: 24 * 60 * 60, @@ -446,50 +446,52 @@ func (s *testCoordinatorSuite) TestReplica(c *C) { region = region.Clone( core.WithDownPeers(append(region.GetDownPeers(), downPeer)), ) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitAddLearner(c, stream, region, 4) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitPromoteLearner(c, stream, region, 4) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitAddLearner(re, stream, region, 4) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitPromoteLearner(re, stream, region, 4) region = region.Clone(core.WithDownPeers(nil)) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - waitNoResponse(c, stream) + re.NoError(dispatchHeartbeat(co, region, stream)) + waitNoResponse(re, stream) // Remove peer from store 4. - c.Assert(tc.addLeaderRegion(2, 1, 2, 3, 4), IsNil) + re.NoError(tc.addLeaderRegion(2, 1, 2, 3, 4)) region = tc.GetRegion(2) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitRemovePeer(c, stream, region, 4) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - waitNoResponse(c, stream) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitRemovePeer(re, stream, region, 4) + re.NoError(dispatchHeartbeat(co, region, stream)) + waitNoResponse(re, stream) // Remove offline peer directly when it's pending. - c.Assert(tc.addLeaderRegion(3, 1, 2, 3), IsNil) - c.Assert(tc.setStoreOffline(3), IsNil) + re.NoError(tc.addLeaderRegion(3, 1, 2, 3)) + re.NoError(tc.setStoreOffline(3)) region = tc.GetRegion(3) region = region.Clone(core.WithPendingPeers([]*metapb.Peer{region.GetStorePeer(3)})) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - waitNoResponse(c, stream) + re.NoError(dispatchHeartbeat(co, region, stream)) + waitNoResponse(re, stream) } -func (s *testCoordinatorSuite) TestCheckCache(c *C) { +func TestCheckCache(t *testing.T) { + re := require.New(t) + tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) { // Turn off replica scheduling. cfg.ReplicaScheduleLimit = 0 - }, nil, nil, c) + }, nil, nil, re) defer cleanup() - c.Assert(tc.addRegionStore(1, 0), IsNil) - c.Assert(tc.addRegionStore(2, 0), IsNil) - c.Assert(tc.addRegionStore(3, 0), IsNil) + re.NoError(tc.addRegionStore(1, 0)) + re.NoError(tc.addRegionStore(2, 0)) + re.NoError(tc.addRegionStore(3, 0)) // Add a peer with two replicas. - c.Assert(tc.addLeaderRegion(1, 2, 3), IsNil) - c.Assert(failpoint.Enable("github.com/tikv/pd/server/cluster/break-patrol", `return`), IsNil) + re.NoError(tc.addLeaderRegion(1, 2, 3)) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/break-patrol", `return`)) // case 1: operator cannot be created due to replica-schedule-limit restriction co.wg.Add(1) co.patrolRegions() - c.Assert(co.checkers.GetWaitingRegions(), HasLen, 1) + re.Len(co.checkers.GetWaitingRegions(), 1) // cancel the replica-schedule-limit restriction opt := tc.GetOpts() @@ -499,88 +501,92 @@ func (s *testCoordinatorSuite) TestCheckCache(c *C) { co.wg.Add(1) co.patrolRegions() oc := co.opController - c.Assert(oc.GetOperators(), HasLen, 1) - c.Assert(co.checkers.GetWaitingRegions(), HasLen, 0) + re.Len(oc.GetOperators(), 1) + re.Len(co.checkers.GetWaitingRegions(), 0) // case 2: operator cannot be created due to store limit restriction oc.RemoveOperator(oc.GetOperator(1)) tc.SetStoreLimit(1, storelimit.AddPeer, 0) co.wg.Add(1) co.patrolRegions() - c.Assert(co.checkers.GetWaitingRegions(), HasLen, 1) + re.Len(co.checkers.GetWaitingRegions(), 1) // cancel the store limit restriction tc.SetStoreLimit(1, storelimit.AddPeer, 10) time.Sleep(1 * time.Second) co.wg.Add(1) co.patrolRegions() - c.Assert(oc.GetOperators(), HasLen, 1) - c.Assert(co.checkers.GetWaitingRegions(), HasLen, 0) + re.Len(oc.GetOperators(), 1) + re.Len(co.checkers.GetWaitingRegions(), 0) co.wg.Wait() - c.Assert(failpoint.Disable("github.com/tikv/pd/server/cluster/break-patrol"), IsNil) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/break-patrol")) } -func (s *testCoordinatorSuite) TestPeerState(c *C) { - tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c) +func TestPeerState(t *testing.T) { + re := require.New(t) + + tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, re) defer cleanup() // Transfer peer from store 4 to store 1. - c.Assert(tc.addRegionStore(1, 10), IsNil) - c.Assert(tc.addRegionStore(2, 10), IsNil) - c.Assert(tc.addRegionStore(3, 10), IsNil) - c.Assert(tc.addRegionStore(4, 40), IsNil) - c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil) + re.NoError(tc.addRegionStore(1, 10)) + re.NoError(tc.addRegionStore(2, 10)) + re.NoError(tc.addRegionStore(3, 10)) + re.NoError(tc.addRegionStore(4, 40)) + re.NoError(tc.addLeaderRegion(1, 2, 3, 4)) stream := mockhbstream.NewHeartbeatStream() // Wait for schedule. - waitOperator(c, co, 1) - testutil.CheckTransferPeer(c, co.opController.GetOperator(1), operator.OpKind(0), 4, 1) + waitOperator(re, co, 1) + testutil.CheckTransferPeerWithTestify(re, co.opController.GetOperator(1), operator.OpKind(0), 4, 1) region := tc.GetRegion(1).Clone() // Add new peer. - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitAddLearner(c, stream, region, 1) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitPromoteLearner(c, stream, region, 1) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitAddLearner(re, stream, region, 1) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitPromoteLearner(re, stream, region, 1) // If the new peer is pending, the operator will not finish. region = region.Clone(core.WithPendingPeers(append(region.GetPendingPeers(), region.GetStorePeer(1)))) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - waitNoResponse(c, stream) - c.Assert(co.opController.GetOperator(region.GetID()), NotNil) + re.NoError(dispatchHeartbeat(co, region, stream)) + waitNoResponse(re, stream) + re.NotNil(co.opController.GetOperator(region.GetID())) // The new peer is not pending now, the operator will finish. // And we will proceed to remove peer in store 4. region = region.Clone(core.WithPendingPeers(nil)) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - waitRemovePeer(c, stream, region, 4) - c.Assert(tc.addLeaderRegion(1, 1, 2, 3), IsNil) + re.NoError(dispatchHeartbeat(co, region, stream)) + waitRemovePeer(re, stream, region, 4) + re.NoError(tc.addLeaderRegion(1, 1, 2, 3)) region = tc.GetRegion(1).Clone() - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - waitNoResponse(c, stream) + re.NoError(dispatchHeartbeat(co, region, stream)) + waitNoResponse(re, stream) } -func (s *testCoordinatorSuite) TestShouldRun(c *C) { - tc, co, cleanup := prepare(nil, nil, nil, c) +func TestShouldRun(t *testing.T) { + re := require.New(t) + + tc, co, cleanup := prepare(nil, nil, nil, re) tc.RaftCluster.coordinator = co defer cleanup() - c.Assert(tc.addLeaderStore(1, 5), IsNil) - c.Assert(tc.addLeaderStore(2, 2), IsNil) - c.Assert(tc.addLeaderStore(3, 0), IsNil) - c.Assert(tc.addLeaderStore(4, 0), IsNil) - c.Assert(tc.LoadRegion(1, 1, 2, 3), IsNil) - c.Assert(tc.LoadRegion(2, 1, 2, 3), IsNil) - c.Assert(tc.LoadRegion(3, 1, 2, 3), IsNil) - c.Assert(tc.LoadRegion(4, 1, 2, 3), IsNil) - c.Assert(tc.LoadRegion(5, 1, 2, 3), IsNil) - c.Assert(tc.LoadRegion(6, 2, 1, 4), IsNil) - c.Assert(tc.LoadRegion(7, 2, 1, 4), IsNil) - c.Assert(co.shouldRun(), IsFalse) - c.Assert(tc.core.Regions.GetStoreRegionCount(4), Equals, 2) + re.NoError(tc.addLeaderStore(1, 5)) + re.NoError(tc.addLeaderStore(2, 2)) + re.NoError(tc.addLeaderStore(3, 0)) + re.NoError(tc.addLeaderStore(4, 0)) + re.NoError(tc.LoadRegion(1, 1, 2, 3)) + re.NoError(tc.LoadRegion(2, 1, 2, 3)) + re.NoError(tc.LoadRegion(3, 1, 2, 3)) + re.NoError(tc.LoadRegion(4, 1, 2, 3)) + re.NoError(tc.LoadRegion(5, 1, 2, 3)) + re.NoError(tc.LoadRegion(6, 2, 1, 4)) + re.NoError(tc.LoadRegion(7, 2, 1, 4)) + re.False(co.shouldRun()) + re.Equal(2, tc.core.Regions.GetStoreRegionCount(4)) tbl := []struct { regionID uint64 @@ -599,28 +605,30 @@ func (s *testCoordinatorSuite) TestShouldRun(c *C) { for _, t := range tbl { r := tc.GetRegion(t.regionID) nr := r.Clone(core.WithLeader(r.GetPeers()[0])) - c.Assert(tc.processRegionHeartbeat(nr), IsNil) - c.Assert(co.shouldRun(), Equals, t.shouldRun) + re.NoError(tc.processRegionHeartbeat(nr)) + re.Equal(t.shouldRun, co.shouldRun()) } nr := &metapb.Region{Id: 6, Peers: []*metapb.Peer{}} newRegion := core.NewRegionInfo(nr, nil) - c.Assert(tc.processRegionHeartbeat(newRegion), NotNil) - c.Assert(co.prepareChecker.sum, Equals, 7) + re.Error(tc.processRegionHeartbeat(newRegion)) + re.Equal(7, co.prepareChecker.sum) } -func (s *testCoordinatorSuite) TestShouldRunWithNonLeaderRegions(c *C) { - tc, co, cleanup := prepare(nil, nil, nil, c) +func TestShouldRunWithNonLeaderRegions(t *testing.T) { + re := require.New(t) + + tc, co, cleanup := prepare(nil, nil, nil, re) tc.RaftCluster.coordinator = co defer cleanup() - c.Assert(tc.addLeaderStore(1, 10), IsNil) - c.Assert(tc.addLeaderStore(2, 0), IsNil) - c.Assert(tc.addLeaderStore(3, 0), IsNil) + re.NoError(tc.addLeaderStore(1, 10)) + re.NoError(tc.addLeaderStore(2, 0)) + re.NoError(tc.addLeaderStore(3, 0)) for i := 0; i < 10; i++ { - c.Assert(tc.LoadRegion(uint64(i+1), 1, 2, 3), IsNil) + re.NoError(tc.LoadRegion(uint64(i+1), 1, 2, 3)) } - c.Assert(co.shouldRun(), IsFalse) - c.Assert(tc.core.Regions.GetStoreRegionCount(1), Equals, 10) + re.False(co.shouldRun()) + re.Equal(10, tc.core.Regions.GetStoreRegionCount(1)) tbl := []struct { regionID uint64 @@ -640,289 +648,307 @@ func (s *testCoordinatorSuite) TestShouldRunWithNonLeaderRegions(c *C) { for _, t := range tbl { r := tc.GetRegion(t.regionID) nr := r.Clone(core.WithLeader(r.GetPeers()[0])) - c.Assert(tc.processRegionHeartbeat(nr), IsNil) - c.Assert(co.shouldRun(), Equals, t.shouldRun) + re.NoError(tc.processRegionHeartbeat(nr)) + re.Equal(t.shouldRun, co.shouldRun()) } nr := &metapb.Region{Id: 9, Peers: []*metapb.Peer{}} newRegion := core.NewRegionInfo(nr, nil) - c.Assert(tc.processRegionHeartbeat(newRegion), NotNil) - c.Assert(co.prepareChecker.sum, Equals, 9) + re.Error(tc.processRegionHeartbeat(newRegion)) + re.Equal(9, co.prepareChecker.sum) // Now, after server is prepared, there exist some regions with no leader. - c.Assert(tc.GetRegion(10).GetLeader().GetStoreId(), Equals, uint64(0)) + re.Equal(uint64(0), tc.GetRegion(10).GetLeader().GetStoreId()) } -func (s *testCoordinatorSuite) TestAddScheduler(c *C) { - tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c) +func TestAddScheduler(t *testing.T) { + re := require.New(t) + + tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, re) defer cleanup() - c.Assert(co.schedulers, HasLen, len(config.DefaultSchedulers)) - c.Assert(co.removeScheduler(schedulers.BalanceLeaderName), IsNil) - c.Assert(co.removeScheduler(schedulers.BalanceRegionName), IsNil) - c.Assert(co.removeScheduler(schedulers.HotRegionName), IsNil) - c.Assert(co.removeScheduler(schedulers.SplitBucketName), IsNil) - c.Assert(co.schedulers, HasLen, 0) + re.Len(co.schedulers, len(config.DefaultSchedulers)) + re.NoError(co.removeScheduler(schedulers.BalanceLeaderName)) + re.NoError(co.removeScheduler(schedulers.BalanceRegionName)) + re.NoError(co.removeScheduler(schedulers.HotRegionName)) + re.NoError(co.removeScheduler(schedulers.SplitBucketName)) + re.Len(co.schedulers, 0) stream := mockhbstream.NewHeartbeatStream() // Add stores 1,2,3 - c.Assert(tc.addLeaderStore(1, 1), IsNil) - c.Assert(tc.addLeaderStore(2, 1), IsNil) - c.Assert(tc.addLeaderStore(3, 1), IsNil) + re.NoError(tc.addLeaderStore(1, 1)) + re.NoError(tc.addLeaderStore(2, 1)) + re.NoError(tc.addLeaderStore(3, 1)) // Add regions 1 with leader in store 1 and followers in stores 2,3 - c.Assert(tc.addLeaderRegion(1, 1, 2, 3), IsNil) + re.NoError(tc.addLeaderRegion(1, 1, 2, 3)) // Add regions 2 with leader in store 2 and followers in stores 1,3 - c.Assert(tc.addLeaderRegion(2, 2, 1, 3), IsNil) + re.NoError(tc.addLeaderRegion(2, 2, 1, 3)) // Add regions 3 with leader in store 3 and followers in stores 1,2 - c.Assert(tc.addLeaderRegion(3, 3, 1, 2), IsNil) + re.NoError(tc.addLeaderRegion(3, 3, 1, 2)) oc := co.opController // test ConfigJSONDecoder create bl, err := schedule.CreateScheduler(schedulers.BalanceLeaderType, oc, storage.NewStorageWithMemoryBackend(), schedule.ConfigJSONDecoder([]byte("{}"))) - c.Assert(err, IsNil) + re.NoError(err) conf, err := bl.EncodeConfig() - c.Assert(err, IsNil) + re.NoError(err) data := make(map[string]interface{}) err = json.Unmarshal(conf, &data) - c.Assert(err, IsNil) + re.NoError(err) batch := data["batch"].(float64) - c.Assert(int(batch), Equals, 4) + re.Equal(4, int(batch)) gls, err := schedule.CreateScheduler(schedulers.GrantLeaderType, oc, storage.NewStorageWithMemoryBackend(), schedule.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"0"})) - c.Assert(err, IsNil) - c.Assert(co.addScheduler(gls), NotNil) - c.Assert(co.removeScheduler(gls.GetName()), NotNil) + re.NoError(err) + re.NotNil(co.addScheduler(gls)) + re.NotNil(co.removeScheduler(gls.GetName())) gls, err = schedule.CreateScheduler(schedulers.GrantLeaderType, oc, storage.NewStorageWithMemoryBackend(), schedule.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"})) - c.Assert(err, IsNil) - c.Assert(co.addScheduler(gls), IsNil) + re.NoError(err) + re.NoError(co.addScheduler(gls)) // Transfer all leaders to store 1. - waitOperator(c, co, 2) + waitOperator(re, co, 2) region2 := tc.GetRegion(2) - c.Assert(dispatchHeartbeat(co, region2, stream), IsNil) - region2 = waitTransferLeader(c, stream, region2, 1) - c.Assert(dispatchHeartbeat(co, region2, stream), IsNil) - waitNoResponse(c, stream) + re.NoError(dispatchHeartbeat(co, region2, stream)) + region2 = waitTransferLeader(re, stream, region2, 1) + re.NoError(dispatchHeartbeat(co, region2, stream)) + waitNoResponse(re, stream) - waitOperator(c, co, 3) + waitOperator(re, co, 3) region3 := tc.GetRegion(3) - c.Assert(dispatchHeartbeat(co, region3, stream), IsNil) - region3 = waitTransferLeader(c, stream, region3, 1) - c.Assert(dispatchHeartbeat(co, region3, stream), IsNil) - waitNoResponse(c, stream) + re.NoError(dispatchHeartbeat(co, region3, stream)) + region3 = waitTransferLeader(re, stream, region3, 1) + re.NoError(dispatchHeartbeat(co, region3, stream)) + waitNoResponse(re, stream) } -func (s *testCoordinatorSuite) TestPersistScheduler(c *C) { - tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c) +func TestPersistScheduler(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + tc, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, re) hbStreams := co.hbStreams defer cleanup() // Add stores 1,2 - c.Assert(tc.addLeaderStore(1, 1), IsNil) - c.Assert(tc.addLeaderStore(2, 1), IsNil) + re.NoError(tc.addLeaderStore(1, 1)) + re.NoError(tc.addLeaderStore(2, 1)) - c.Assert(co.schedulers, HasLen, 4) + re.Len(co.schedulers, 4) oc := co.opController storage := tc.RaftCluster.storage gls1, err := schedule.CreateScheduler(schedulers.GrantLeaderType, oc, storage, schedule.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"})) - c.Assert(err, IsNil) - c.Assert(co.addScheduler(gls1, "1"), IsNil) + re.NoError(err) + re.NoError(co.addScheduler(gls1, "1")) evict, err := schedule.CreateScheduler(schedulers.EvictLeaderType, oc, storage, schedule.ConfigSliceDecoder(schedulers.EvictLeaderType, []string{"2"})) - c.Assert(err, IsNil) - c.Assert(co.addScheduler(evict, "2"), IsNil) - c.Assert(co.schedulers, HasLen, 6) + re.NoError(err) + re.NoError(co.addScheduler(evict, "2")) + re.Len(co.schedulers, 6) sches, _, err := storage.LoadAllScheduleConfig() - c.Assert(err, IsNil) - c.Assert(sches, HasLen, 6) - c.Assert(co.removeScheduler(schedulers.BalanceLeaderName), IsNil) - c.Assert(co.removeScheduler(schedulers.BalanceRegionName), IsNil) - c.Assert(co.removeScheduler(schedulers.HotRegionName), IsNil) - c.Assert(co.removeScheduler(schedulers.SplitBucketName), IsNil) - c.Assert(co.schedulers, HasLen, 2) - c.Assert(co.cluster.opt.Persist(storage), IsNil) + re.NoError(err) + re.Len(sches, 6) + re.NoError(co.removeScheduler(schedulers.BalanceLeaderName)) + re.NoError(co.removeScheduler(schedulers.BalanceRegionName)) + re.NoError(co.removeScheduler(schedulers.HotRegionName)) + re.NoError(co.removeScheduler(schedulers.SplitBucketName)) + re.Len(co.schedulers, 2) + re.NoError(co.cluster.opt.Persist(storage)) co.stop() co.wg.Wait() // make a new coordinator for testing // whether the schedulers added or removed in dynamic way are recorded in opt _, newOpt, err := newTestScheduleConfig() - c.Assert(err, IsNil) + re.NoError(err) _, err = schedule.CreateScheduler(schedulers.ShuffleRegionType, oc, storage, schedule.ConfigJSONDecoder([]byte("null"))) - c.Assert(err, IsNil) + re.NoError(err) // suppose we add a new default enable scheduler config.DefaultSchedulers = append(config.DefaultSchedulers, config.SchedulerConfig{Type: "shuffle-region"}) defer func() { config.DefaultSchedulers = config.DefaultSchedulers[:len(config.DefaultSchedulers)-1] }() - c.Assert(newOpt.GetSchedulers(), HasLen, 4) - c.Assert(newOpt.Reload(storage), IsNil) + re.Len(newOpt.GetSchedulers(), 4) + re.NoError(newOpt.Reload(storage)) // only remains 3 items with independent config. sches, _, err = storage.LoadAllScheduleConfig() - c.Assert(err, IsNil) - c.Assert(sches, HasLen, 3) + re.NoError(err) + re.Len(sches, 3) // option have 6 items because the default scheduler do not remove. - c.Assert(newOpt.GetSchedulers(), HasLen, 7) - c.Assert(newOpt.Persist(storage), IsNil) + re.Len(newOpt.GetSchedulers(), 7) + re.NoError(newOpt.Persist(storage)) tc.RaftCluster.opt = newOpt - co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + co = newCoordinator(ctx, tc.RaftCluster, hbStreams) co.run() - c.Assert(co.schedulers, HasLen, 3) + re.Len(co.schedulers, 3) co.stop() co.wg.Wait() // suppose restart PD again _, newOpt, err = newTestScheduleConfig() - c.Assert(err, IsNil) - c.Assert(newOpt.Reload(storage), IsNil) + re.NoError(err) + re.NoError(newOpt.Reload(storage)) tc.RaftCluster.opt = newOpt - co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + co = newCoordinator(ctx, tc.RaftCluster, hbStreams) co.run() - c.Assert(co.schedulers, HasLen, 3) + re.Len(co.schedulers, 3) bls, err := schedule.CreateScheduler(schedulers.BalanceLeaderType, oc, storage, schedule.ConfigSliceDecoder(schedulers.BalanceLeaderType, []string{"", ""})) - c.Assert(err, IsNil) - c.Assert(co.addScheduler(bls), IsNil) + re.NoError(err) + re.NoError(co.addScheduler(bls)) brs, err := schedule.CreateScheduler(schedulers.BalanceRegionType, oc, storage, schedule.ConfigSliceDecoder(schedulers.BalanceRegionType, []string{"", ""})) - c.Assert(err, IsNil) - c.Assert(co.addScheduler(brs), IsNil) - c.Assert(co.schedulers, HasLen, 5) + re.NoError(err) + re.NoError(co.addScheduler(brs)) + re.Len(co.schedulers, 5) // the scheduler option should contain 6 items // the `hot scheduler` are disabled - c.Assert(co.cluster.opt.GetSchedulers(), HasLen, 7) - c.Assert(co.removeScheduler(schedulers.GrantLeaderName), IsNil) + re.Len(co.cluster.opt.GetSchedulers(), 7) + re.NoError(co.removeScheduler(schedulers.GrantLeaderName)) // the scheduler that is not enable by default will be completely deleted - c.Assert(co.cluster.opt.GetSchedulers(), HasLen, 6) - c.Assert(co.schedulers, HasLen, 4) - c.Assert(co.cluster.opt.Persist(co.cluster.storage), IsNil) + re.Len(co.cluster.opt.GetSchedulers(), 6) + re.Len(co.schedulers, 4) + re.NoError(co.cluster.opt.Persist(co.cluster.storage)) co.stop() co.wg.Wait() _, newOpt, err = newTestScheduleConfig() - c.Assert(err, IsNil) - c.Assert(newOpt.Reload(co.cluster.storage), IsNil) + re.NoError(err) + re.NoError(newOpt.Reload(co.cluster.storage)) tc.RaftCluster.opt = newOpt - co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + co = newCoordinator(ctx, tc.RaftCluster, hbStreams) co.run() - c.Assert(co.schedulers, HasLen, 4) - c.Assert(co.removeScheduler(schedulers.EvictLeaderName), IsNil) - c.Assert(co.schedulers, HasLen, 3) + re.Len(co.schedulers, 4) + re.NoError(co.removeScheduler(schedulers.EvictLeaderName)) + re.Len(co.schedulers, 3) } -func (s *testCoordinatorSuite) TestRemoveScheduler(c *C) { +func TestRemoveScheduler(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) { cfg.ReplicaScheduleLimit = 0 - }, nil, func(co *coordinator) { co.run() }, c) + }, nil, func(co *coordinator) { co.run() }, re) hbStreams := co.hbStreams defer cleanup() // Add stores 1,2 - c.Assert(tc.addLeaderStore(1, 1), IsNil) - c.Assert(tc.addLeaderStore(2, 1), IsNil) + re.NoError(tc.addLeaderStore(1, 1)) + re.NoError(tc.addLeaderStore(2, 1)) - c.Assert(co.schedulers, HasLen, 4) + re.Len(co.schedulers, 4) oc := co.opController storage := tc.RaftCluster.storage gls1, err := schedule.CreateScheduler(schedulers.GrantLeaderType, oc, storage, schedule.ConfigSliceDecoder(schedulers.GrantLeaderType, []string{"1"})) - c.Assert(err, IsNil) - c.Assert(co.addScheduler(gls1, "1"), IsNil) - c.Assert(co.schedulers, HasLen, 5) + re.NoError(err) + re.NoError(co.addScheduler(gls1, "1")) + re.Len(co.schedulers, 5) sches, _, err := storage.LoadAllScheduleConfig() - c.Assert(err, IsNil) - c.Assert(sches, HasLen, 5) + re.NoError(err) + re.Len(sches, 5) // remove all schedulers - c.Assert(co.removeScheduler(schedulers.BalanceLeaderName), IsNil) - c.Assert(co.removeScheduler(schedulers.BalanceRegionName), IsNil) - c.Assert(co.removeScheduler(schedulers.HotRegionName), IsNil) - c.Assert(co.removeScheduler(schedulers.GrantLeaderName), IsNil) - c.Assert(co.removeScheduler(schedulers.SplitBucketName), IsNil) + re.NoError(co.removeScheduler(schedulers.BalanceLeaderName)) + re.NoError(co.removeScheduler(schedulers.BalanceRegionName)) + re.NoError(co.removeScheduler(schedulers.HotRegionName)) + re.NoError(co.removeScheduler(schedulers.GrantLeaderName)) + re.NoError(co.removeScheduler(schedulers.SplitBucketName)) // all removed sches, _, err = storage.LoadAllScheduleConfig() - c.Assert(err, IsNil) - c.Assert(sches, HasLen, 0) - c.Assert(co.schedulers, HasLen, 0) - c.Assert(co.cluster.opt.Persist(co.cluster.storage), IsNil) + re.NoError(err) + re.Len(sches, 0) + re.Len(co.schedulers, 0) + re.NoError(co.cluster.opt.Persist(co.cluster.storage)) co.stop() co.wg.Wait() // suppose restart PD again _, newOpt, err := newTestScheduleConfig() - c.Assert(err, IsNil) - c.Assert(newOpt.Reload(tc.storage), IsNil) + re.NoError(err) + re.NoError(newOpt.Reload(tc.storage)) tc.RaftCluster.opt = newOpt - co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + co = newCoordinator(ctx, tc.RaftCluster, hbStreams) co.run() - c.Assert(co.schedulers, HasLen, 0) + re.Len(co.schedulers, 0) // the option remains default scheduler - c.Assert(co.cluster.opt.GetSchedulers(), HasLen, 4) + re.Len(co.cluster.opt.GetSchedulers(), 4) co.stop() co.wg.Wait() } -func (s *testCoordinatorSuite) TestRestart(c *C) { +func TestRestart(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) { // Turn off balance, we test add replica only. cfg.LeaderScheduleLimit = 0 cfg.RegionScheduleLimit = 0 - }, nil, func(co *coordinator) { co.run() }, c) + }, nil, func(co *coordinator) { co.run() }, re) hbStreams := co.hbStreams defer cleanup() // Add 3 stores (1, 2, 3) and a region with 1 replica on store 1. - c.Assert(tc.addRegionStore(1, 1), IsNil) - c.Assert(tc.addRegionStore(2, 2), IsNil) - c.Assert(tc.addRegionStore(3, 3), IsNil) - c.Assert(tc.addLeaderRegion(1, 1), IsNil) + re.NoError(tc.addRegionStore(1, 1)) + re.NoError(tc.addRegionStore(2, 2)) + re.NoError(tc.addRegionStore(3, 3)) + re.NoError(tc.addLeaderRegion(1, 1)) region := tc.GetRegion(1) co.prepareChecker.collect(region) // Add 1 replica on store 2. stream := mockhbstream.NewHeartbeatStream() - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitAddLearner(c, stream, region, 2) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitPromoteLearner(c, stream, region, 2) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitAddLearner(re, stream, region, 2) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitPromoteLearner(re, stream, region, 2) co.stop() co.wg.Wait() // Recreate coordinator then add another replica on store 3. - co = newCoordinator(s.ctx, tc.RaftCluster, hbStreams) + co = newCoordinator(ctx, tc.RaftCluster, hbStreams) co.prepareChecker.collect(region) co.run() - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - region = waitAddLearner(c, stream, region, 3) - c.Assert(dispatchHeartbeat(co, region, stream), IsNil) - waitPromoteLearner(c, stream, region, 3) + re.NoError(dispatchHeartbeat(co, region, stream)) + region = waitAddLearner(re, stream, region, 3) + re.NoError(dispatchHeartbeat(co, region, stream)) + waitPromoteLearner(re, stream, region, 3) } -func (s *testCoordinatorSuite) TestPauseScheduler(c *C) { - _, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, c) +func TestPauseScheduler(t *testing.T) { + re := require.New(t) + + _, co, cleanup := prepare(nil, nil, func(co *coordinator) { co.run() }, re) defer cleanup() _, err := co.isSchedulerAllowed("test") - c.Assert(err, NotNil) + re.Error(err) co.pauseOrResumeScheduler(schedulers.BalanceLeaderName, 60) paused, _ := co.isSchedulerPaused(schedulers.BalanceLeaderName) - c.Assert(paused, Equals, true) + re.True(paused) pausedAt, err := co.getPausedSchedulerDelayAt(schedulers.BalanceLeaderName) - c.Assert(err, IsNil) + re.NoError(err) resumeAt, err := co.getPausedSchedulerDelayUntil(schedulers.BalanceLeaderName) - c.Assert(err, IsNil) - c.Assert(resumeAt-pausedAt, Equals, int64(60)) + re.NoError(err) + re.Equal(int64(60), resumeAt-pausedAt) allowed, _ := co.isSchedulerAllowed(schedulers.BalanceLeaderName) - c.Assert(allowed, Equals, false) + re.False(allowed) } func BenchmarkPatrolRegion(b *testing.B) { + re := require.New(b) + mergeLimit := uint64(4100) regionNum := 10000 tc, co, cleanup := prepare(func(cfg *config.ScheduleConfig) { cfg.MergeScheduleLimit = mergeLimit - }, nil, nil, &C{}) + }, nil, nil, re) defer cleanup() tc.opt.SetSplitMergeInterval(time.Duration(0)) @@ -955,83 +981,71 @@ func BenchmarkPatrolRegion(b *testing.B) { co.patrolRegions() } -func waitOperator(c *C, co *coordinator, regionID uint64) { - testutil.WaitUntil(c, func() bool { +func waitOperator(re *require.Assertions, co *coordinator, regionID uint64) { + testutil.Eventually(re, func() bool { return co.opController.GetOperator(regionID) != nil }) } -var _ = Suite(&testOperatorControllerSuite{}) - -type testOperatorControllerSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testOperatorControllerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) - c.Assert(failpoint.Enable("github.com/tikv/pd/server/schedule/unexpectedOperator", "return(true)"), IsNil) -} - -func (s *testOperatorControllerSuite) TearDownSuite(c *C) { - s.cancel() -} +func TestOperatorCount(t *testing.T) { + re := require.New(t) -func (s *testOperatorControllerSuite) TestOperatorCount(c *C) { - tc, co, cleanup := prepare(nil, nil, nil, c) + tc, co, cleanup := prepare(nil, nil, nil, re) defer cleanup() oc := co.opController - c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(0)) - c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(0)) + re.Equal(uint64(0), oc.OperatorCount(operator.OpLeader)) + re.Equal(uint64(0), oc.OperatorCount(operator.OpRegion)) - c.Assert(tc.addLeaderRegion(1, 1), IsNil) - c.Assert(tc.addLeaderRegion(2, 2), IsNil) + re.NoError(tc.addLeaderRegion(1, 1)) + re.NoError(tc.addLeaderRegion(2, 2)) { op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) oc.AddWaitingOperator(op1) - c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1)) // 1:leader + re.Equal(uint64(1), oc.OperatorCount(operator.OpLeader)) // 1:leader op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader) oc.AddWaitingOperator(op2) - c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(2)) // 1:leader, 2:leader - c.Assert(oc.RemoveOperator(op1), IsTrue) - c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1)) // 2:leader + re.Equal(uint64(2), oc.OperatorCount(operator.OpLeader)) // 1:leader, 2:leader + re.True(oc.RemoveOperator(op1)) + re.Equal(uint64(1), oc.OperatorCount(operator.OpLeader)) // 2:leader } { op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion) oc.AddWaitingOperator(op1) - c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(1)) // 1:region 2:leader - c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(1)) + re.Equal(uint64(1), oc.OperatorCount(operator.OpRegion)) // 1:region 2:leader + re.Equal(uint64(1), oc.OperatorCount(operator.OpLeader)) op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion) op2.SetPriorityLevel(core.HighPriority) oc.AddWaitingOperator(op2) - c.Assert(oc.OperatorCount(operator.OpRegion), Equals, uint64(2)) // 1:region 2:region - c.Assert(oc.OperatorCount(operator.OpLeader), Equals, uint64(0)) + re.Equal(uint64(2), oc.OperatorCount(operator.OpRegion)) // 1:region 2:region + re.Equal(uint64(0), oc.OperatorCount(operator.OpLeader)) } } -func (s *testOperatorControllerSuite) TestStoreOverloaded(c *C) { - tc, co, cleanup := prepare(nil, nil, nil, c) +func TestStoreOverloaded(t *testing.T) { + re := require.New(t) + + tc, co, cleanup := prepare(nil, nil, nil, re) defer cleanup() oc := co.opController lb, err := schedule.CreateScheduler(schedulers.BalanceRegionType, oc, tc.storage, schedule.ConfigSliceDecoder(schedulers.BalanceRegionType, []string{"", ""})) - c.Assert(err, IsNil) + re.NoError(err) opt := tc.GetOpts() - c.Assert(tc.addRegionStore(4, 100), IsNil) - c.Assert(tc.addRegionStore(3, 100), IsNil) - c.Assert(tc.addRegionStore(2, 100), IsNil) - c.Assert(tc.addRegionStore(1, 10), IsNil) - c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil) + re.NoError(tc.addRegionStore(4, 100)) + re.NoError(tc.addRegionStore(3, 100)) + re.NoError(tc.addRegionStore(2, 100)) + re.NoError(tc.addRegionStore(1, 10)) + re.NoError(tc.addLeaderRegion(1, 2, 3, 4)) region := tc.GetRegion(1).Clone(core.SetApproximateSize(60)) tc.putRegion(region) start := time.Now() { ops := lb.Schedule(tc) - c.Assert(ops, HasLen, 1) + re.Len(ops, 1) op1 := ops[0] - c.Assert(op1, NotNil) - c.Assert(oc.AddOperator(op1), IsTrue) - c.Assert(oc.RemoveOperator(op1), IsTrue) + re.NotNil(op1) + re.True(oc.AddOperator(op1)) + re.True(oc.RemoveOperator(op1)) } for { time.Sleep(time.Millisecond * 10) @@ -1039,7 +1053,7 @@ func (s *testOperatorControllerSuite) TestStoreOverloaded(c *C) { if time.Since(start) > time.Second { break } - c.Assert(ops, HasLen, 0) + re.Len(ops, 0) } // reset all stores' limit @@ -1049,50 +1063,54 @@ func (s *testOperatorControllerSuite) TestStoreOverloaded(c *C) { time.Sleep(time.Second) for i := 0; i < 10; i++ { ops := lb.Schedule(tc) - c.Assert(ops, HasLen, 1) + re.Len(ops, 1) op := ops[0] - c.Assert(oc.AddOperator(op), IsTrue) - c.Assert(oc.RemoveOperator(op), IsTrue) + re.True(oc.AddOperator(op)) + re.True(oc.RemoveOperator(op)) } // sleep 1 seconds to make sure that the token is filled up time.Sleep(time.Second) for i := 0; i < 100; i++ { - c.Assert(len(lb.Schedule(tc)), Greater, 0) + re.Greater(len(lb.Schedule(tc)), 0) } } -func (s *testOperatorControllerSuite) TestStoreOverloadedWithReplace(c *C) { - tc, co, cleanup := prepare(nil, nil, nil, c) +func TestStoreOverloadedWithReplace(t *testing.T) { + re := require.New(t) + + tc, co, cleanup := prepare(nil, nil, nil, re) defer cleanup() oc := co.opController lb, err := schedule.CreateScheduler(schedulers.BalanceRegionType, oc, tc.storage, schedule.ConfigSliceDecoder(schedulers.BalanceRegionType, []string{"", ""})) - c.Assert(err, IsNil) - - c.Assert(tc.addRegionStore(4, 100), IsNil) - c.Assert(tc.addRegionStore(3, 100), IsNil) - c.Assert(tc.addRegionStore(2, 100), IsNil) - c.Assert(tc.addRegionStore(1, 10), IsNil) - c.Assert(tc.addLeaderRegion(1, 2, 3, 4), IsNil) - c.Assert(tc.addLeaderRegion(2, 1, 3, 4), IsNil) + re.NoError(err) + + re.NoError(tc.addRegionStore(4, 100)) + re.NoError(tc.addRegionStore(3, 100)) + re.NoError(tc.addRegionStore(2, 100)) + re.NoError(tc.addRegionStore(1, 10)) + re.NoError(tc.addLeaderRegion(1, 2, 3, 4)) + re.NoError(tc.addLeaderRegion(2, 1, 3, 4)) region := tc.GetRegion(1).Clone(core.SetApproximateSize(60)) tc.putRegion(region) region = tc.GetRegion(2).Clone(core.SetApproximateSize(60)) tc.putRegion(region) op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 1, PeerID: 1}) - c.Assert(oc.AddOperator(op1), IsTrue) + re.True(oc.AddOperator(op1)) op2 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 2, PeerID: 2}) op2.SetPriorityLevel(core.HighPriority) - c.Assert(oc.AddOperator(op2), IsTrue) + re.True(oc.AddOperator(op2)) op3 := newTestOperator(1, tc.GetRegion(2).GetRegionEpoch(), operator.OpRegion, operator.AddPeer{ToStore: 1, PeerID: 3}) - c.Assert(oc.AddOperator(op3), IsFalse) - c.Assert(lb.Schedule(tc), HasLen, 0) + re.False(oc.AddOperator(op3)) + re.Len(lb.Schedule(tc), 0) // sleep 2 seconds to make sure that token is filled up time.Sleep(2 * time.Second) - c.Assert(len(lb.Schedule(tc)), Greater, 0) + re.Greater(len(lb.Schedule(tc)), 0) } -func (s *testOperatorControllerSuite) TestDownStoreLimit(c *C) { - tc, co, cleanup := prepare(nil, nil, nil, c) +func TestDownStoreLimit(t *testing.T) { + re := require.New(t) + + tc, co, cleanup := prepare(nil, nil, nil, re) defer cleanup() oc := co.opController rc := co.checkers.GetRuleChecker() @@ -1116,8 +1134,8 @@ func (s *testOperatorControllerSuite) TestDownStoreLimit(c *C) { for i := uint64(1); i < 20; i++ { tc.addRegionStore(i+3, 100) op := rc.Check(region) - c.Assert(op, NotNil) - c.Assert(oc.AddOperator(op), IsTrue) + re.NotNil(op) + re.True(oc.AddOperator(op)) oc.RemoveOperator(op) } @@ -1126,28 +1144,12 @@ func (s *testOperatorControllerSuite) TestDownStoreLimit(c *C) { for i := uint64(20); i < 25; i++ { tc.addRegionStore(i+3, 100) op := rc.Check(region) - c.Assert(op, NotNil) - c.Assert(oc.AddOperator(op), IsTrue) + re.NotNil(op) + re.True(oc.AddOperator(op)) oc.RemoveOperator(op) } } -var _ = Suite(&testScheduleControllerSuite{}) - -type testScheduleControllerSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testScheduleControllerSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) - c.Assert(failpoint.Enable("github.com/tikv/pd/server/schedule/unexpectedOperator", "return(true)"), IsNil) -} - -func (s *testScheduleControllerSuite) TearDownSuite(c *C) { - s.cancel() -} - // FIXME: remove after move into schedulers package type mockLimitScheduler struct { schedule.Scheduler @@ -1160,15 +1162,17 @@ func (s *mockLimitScheduler) IsScheduleAllowed(cluster schedule.Cluster) bool { return s.counter.OperatorCount(s.kind) < s.limit } -func (s *testScheduleControllerSuite) TestController(c *C) { - tc, co, cleanup := prepare(nil, nil, nil, c) +func TestController(t *testing.T) { + re := require.New(t) + + tc, co, cleanup := prepare(nil, nil, nil, re) defer cleanup() oc := co.opController - c.Assert(tc.addLeaderRegion(1, 1), IsNil) - c.Assert(tc.addLeaderRegion(2, 2), IsNil) + re.NoError(tc.addLeaderRegion(1, 1)) + re.NoError(tc.addLeaderRegion(2, 2)) scheduler, err := schedule.CreateScheduler(schedulers.BalanceLeaderType, oc, storage.NewStorageWithMemoryBackend(), schedule.ConfigSliceDecoder(schedulers.BalanceLeaderType, []string{"", ""})) - c.Assert(err, IsNil) + re.NoError(err) lb := &mockLimitScheduler{ Scheduler: scheduler, counter: oc, @@ -1178,25 +1182,25 @@ func (s *testScheduleControllerSuite) TestController(c *C) { sc := newScheduleController(co, lb) for i := schedulers.MinScheduleInterval; sc.GetInterval() != schedulers.MaxScheduleInterval; i = sc.GetNextInterval(i) { - c.Assert(sc.GetInterval(), Equals, i) - c.Assert(sc.Schedule(), HasLen, 0) + re.Equal(i, sc.GetInterval()) + re.Len(sc.Schedule(), 0) } // limit = 2 lb.limit = 2 // count = 0 { - c.Assert(sc.AllowSchedule(), IsTrue) + re.True(sc.AllowSchedule()) op1 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) - c.Assert(oc.AddWaitingOperator(op1), Equals, 1) + re.Equal(1, oc.AddWaitingOperator(op1)) // count = 1 - c.Assert(sc.AllowSchedule(), IsTrue) + re.True(sc.AllowSchedule()) op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader) - c.Assert(oc.AddWaitingOperator(op2), Equals, 1) + re.Equal(1, oc.AddWaitingOperator(op2)) // count = 2 - c.Assert(sc.AllowSchedule(), IsFalse) - c.Assert(oc.RemoveOperator(op1), IsTrue) + re.False(sc.AllowSchedule()) + re.True(oc.RemoveOperator(op1)) // count = 1 - c.Assert(sc.AllowSchedule(), IsTrue) + re.True(sc.AllowSchedule()) } op11 := newTestOperator(1, tc.GetRegion(1).GetRegionEpoch(), operator.OpLeader) @@ -1204,55 +1208,57 @@ func (s *testScheduleControllerSuite) TestController(c *C) { { op3 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpHotRegion) op3.SetPriorityLevel(core.HighPriority) - c.Assert(oc.AddWaitingOperator(op11), Equals, 1) - c.Assert(sc.AllowSchedule(), IsFalse) - c.Assert(oc.AddWaitingOperator(op3), Equals, 1) - c.Assert(sc.AllowSchedule(), IsTrue) - c.Assert(oc.RemoveOperator(op3), IsTrue) + re.Equal(1, oc.AddWaitingOperator(op11)) + re.False(sc.AllowSchedule()) + re.Equal(1, oc.AddWaitingOperator(op3)) + re.True(sc.AllowSchedule()) + re.True(oc.RemoveOperator(op3)) } // add a admin operator will remove old operator { op2 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpLeader) - c.Assert(oc.AddWaitingOperator(op2), Equals, 1) - c.Assert(sc.AllowSchedule(), IsFalse) + re.Equal(1, oc.AddWaitingOperator(op2)) + re.False(sc.AllowSchedule()) op4 := newTestOperator(2, tc.GetRegion(2).GetRegionEpoch(), operator.OpAdmin) op4.SetPriorityLevel(core.HighPriority) - c.Assert(oc.AddWaitingOperator(op4), Equals, 1) - c.Assert(sc.AllowSchedule(), IsTrue) - c.Assert(oc.RemoveOperator(op4), IsTrue) + re.Equal(1, oc.AddWaitingOperator(op4)) + re.True(sc.AllowSchedule()) + re.True(oc.RemoveOperator(op4)) } // test wrong region id. { op5 := newTestOperator(3, &metapb.RegionEpoch{}, operator.OpHotRegion) - c.Assert(oc.AddWaitingOperator(op5), Equals, 0) + re.Equal(0, oc.AddWaitingOperator(op5)) } // test wrong region epoch. - c.Assert(oc.RemoveOperator(op11), IsTrue) + re.True(oc.RemoveOperator(op11)) epoch := &metapb.RegionEpoch{ Version: tc.GetRegion(1).GetRegionEpoch().GetVersion() + 1, ConfVer: tc.GetRegion(1).GetRegionEpoch().GetConfVer(), } { op6 := newTestOperator(1, epoch, operator.OpLeader) - c.Assert(oc.AddWaitingOperator(op6), Equals, 0) + re.Equal(0, oc.AddWaitingOperator(op6)) } epoch.Version-- { op6 := newTestOperator(1, epoch, operator.OpLeader) - c.Assert(oc.AddWaitingOperator(op6), Equals, 1) - c.Assert(oc.RemoveOperator(op6), IsTrue) + re.Equal(1, oc.AddWaitingOperator(op6)) + re.True(oc.RemoveOperator(op6)) } } -func (s *testScheduleControllerSuite) TestInterval(c *C) { - _, co, cleanup := prepare(nil, nil, nil, c) +func TestInterval(t *testing.T) { + re := require.New(t) + + _, co, cleanup := prepare(nil, nil, nil, re) defer cleanup() lb, err := schedule.CreateScheduler(schedulers.BalanceLeaderType, co.opController, storage.NewStorageWithMemoryBackend(), schedule.ConfigSliceDecoder(schedulers.BalanceLeaderType, []string{"", ""})) - c.Assert(err, IsNil) + re.NoError(err) sc := newScheduleController(co, lb) // If no operator for x seconds, the next check should be in x/2 seconds. @@ -1260,15 +1266,15 @@ func (s *testScheduleControllerSuite) TestInterval(c *C) { for _, n := range idleSeconds { sc.nextInterval = schedulers.MinScheduleInterval for totalSleep := time.Duration(0); totalSleep <= time.Second*time.Duration(n); totalSleep += sc.GetInterval() { - c.Assert(sc.Schedule(), HasLen, 0) + re.Len(sc.Schedule(), 0) } - c.Assert(sc.GetInterval(), Less, time.Second*time.Duration(n/2)) + re.Less(sc.GetInterval(), time.Second*time.Duration(n/2)) } } -func waitAddLearner(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { +func waitAddLearner(re *require.Assertions, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { var res *pdpb.RegionHeartbeatResponse - testutil.WaitUntil(c, func() bool { + testutil.Eventually(re, func() bool { if res = stream.Recv(); res != nil { return res.GetRegionId() == region.GetID() && res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_AddLearnerNode && @@ -1282,9 +1288,9 @@ func waitAddLearner(c *C, stream mockhbstream.HeartbeatStream, region *core.Regi ) } -func waitPromoteLearner(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { +func waitPromoteLearner(re *require.Assertions, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { var res *pdpb.RegionHeartbeatResponse - testutil.WaitUntil(c, func() bool { + testutil.Eventually(re, func() bool { if res = stream.Recv(); res != nil { return res.GetRegionId() == region.GetID() && res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_AddNode && @@ -1299,9 +1305,9 @@ func waitPromoteLearner(c *C, stream mockhbstream.HeartbeatStream, region *core. ) } -func waitRemovePeer(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { +func waitRemovePeer(re *require.Assertions, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { var res *pdpb.RegionHeartbeatResponse - testutil.WaitUntil(c, func() bool { + testutil.Eventually(re, func() bool { if res = stream.Recv(); res != nil { return res.GetRegionId() == region.GetID() && res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_RemoveNode && @@ -1315,9 +1321,9 @@ func waitRemovePeer(c *C, stream mockhbstream.HeartbeatStream, region *core.Regi ) } -func waitTransferLeader(c *C, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { +func waitTransferLeader(re *require.Assertions, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { var res *pdpb.RegionHeartbeatResponse - testutil.WaitUntil(c, func() bool { + testutil.Eventually(re, func() bool { if res = stream.Recv(); res != nil { if res.GetRegionId() == region.GetID() { for _, peer := range append(res.GetTransferLeader().GetPeers(), res.GetTransferLeader().GetPeer()) { @@ -1334,8 +1340,8 @@ func waitTransferLeader(c *C, stream mockhbstream.HeartbeatStream, region *core. ) } -func waitNoResponse(c *C, stream mockhbstream.HeartbeatStream) { - testutil.WaitUntil(c, func() bool { +func waitNoResponse(re *require.Assertions, stream mockhbstream.HeartbeatStream) { + testutil.Eventually(re, func() bool { res := stream.Recv() return res == nil }) diff --git a/server/cluster/store_limiter_test.go b/server/cluster/store_limiter_test.go index d23bdb06536..7a1dcab9fad 100644 --- a/server/cluster/store_limiter_test.go +++ b/server/cluster/store_limiter_test.go @@ -15,43 +15,40 @@ package cluster import ( - . "github.com/pingcap/check" + "testing" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/server/config" "github.com/tikv/pd/server/core/storelimit" ) -var _ = Suite(&testStoreLimiterSuite{}) +func TestCollect(t *testing.T) { + re := require.New(t) -type testStoreLimiterSuite struct { - opt *config.PersistOptions -} + limiter := NewStoreLimiter(config.NewTestOptions()) -func (s *testStoreLimiterSuite) SetUpSuite(c *C) { - // Create a server for testing - s.opt = config.NewTestOptions() + limiter.Collect(&pdpb.StoreStats{}) + re.Equal(int64(1), limiter.state.cst.total) } -func (s *testStoreLimiterSuite) TestCollect(c *C) { - limiter := NewStoreLimiter(s.opt) +func TestStoreLimitScene(t *testing.T) { + re := require.New(t) - limiter.Collect(&pdpb.StoreStats{}) - c.Assert(limiter.state.cst.total, Equals, int64(1)) + limiter := NewStoreLimiter(config.NewTestOptions()) + re.Equal(storelimit.DefaultScene(storelimit.AddPeer), limiter.scene[storelimit.AddPeer]) + re.Equal(storelimit.DefaultScene(storelimit.RemovePeer), limiter.scene[storelimit.RemovePeer]) } -func (s *testStoreLimiterSuite) TestStoreLimitScene(c *C) { - limiter := NewStoreLimiter(s.opt) - c.Assert(limiter.scene[storelimit.AddPeer], DeepEquals, storelimit.DefaultScene(storelimit.AddPeer)) - c.Assert(limiter.scene[storelimit.RemovePeer], DeepEquals, storelimit.DefaultScene(storelimit.RemovePeer)) -} +func TestReplaceStoreLimitScene(t *testing.T) { + re := require.New(t) -func (s *testStoreLimiterSuite) TestReplaceStoreLimitScene(c *C) { - limiter := NewStoreLimiter(s.opt) + limiter := NewStoreLimiter(config.NewTestOptions()) sceneAddPeer := &storelimit.Scene{Idle: 4, Low: 3, Normal: 2, High: 1} limiter.ReplaceStoreLimitScene(sceneAddPeer, storelimit.AddPeer) - c.Assert(limiter.scene[storelimit.AddPeer], DeepEquals, sceneAddPeer) + re.Equal(sceneAddPeer, limiter.scene[storelimit.AddPeer]) sceneRemovePeer := &storelimit.Scene{Idle: 5, Low: 4, Normal: 3, High: 2} limiter.ReplaceStoreLimitScene(sceneRemovePeer, storelimit.RemovePeer) diff --git a/server/cluster/unsafe_recovery_controller_test.go b/server/cluster/unsafe_recovery_controller_test.go index edd6bf9c187..2b3717dabd6 100644 --- a/server/cluster/unsafe_recovery_controller_test.go +++ b/server/cluster/unsafe_recovery_controller_test.go @@ -16,13 +16,14 @@ package cluster import ( "context" + "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/eraftpb" "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/kvproto/pkg/pdpb" "github.com/pingcap/kvproto/pkg/raft_serverpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/codec" "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/server/core" @@ -30,21 +31,6 @@ import ( "github.com/tikv/pd/server/storage" ) -var _ = Suite(&testUnsafeRecoverySuite{}) - -type testUnsafeRecoverySuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testUnsafeRecoverySuite) TearDownTest(c *C) { - s.cancel() -} - -func (s *testUnsafeRecoverySuite) SetUpTest(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - func newStoreHeartbeat(storeID uint64, report *pdpb.StoreReport) *pdpb.StoreHeartbeatRequest { return &pdpb.StoreHeartbeatRequest{ Stats: &pdpb.StoreStats{ @@ -54,7 +40,7 @@ func newStoreHeartbeat(storeID uint64, report *pdpb.StoreReport) *pdpb.StoreHear } } -func applyRecoveryPlan(c *C, storeID uint64, storeReports map[uint64]*pdpb.StoreReport, resp *pdpb.StoreHeartbeatResponse) { +func applyRecoveryPlan(re *require.Assertions, storeID uint64, storeReports map[uint64]*pdpb.StoreReport, resp *pdpb.StoreHeartbeatResponse) { plan := resp.GetRecoveryPlan() if plan == nil { return @@ -122,7 +108,7 @@ func applyRecoveryPlan(c *C, storeID uint64, storeReports map[uint64]*pdpb.Store } region.RegionEpoch.ConfVer += 1 if store == storeID { - c.Assert(report.IsForceLeader, IsTrue) + re.True(report.IsForceLeader) } break } @@ -135,7 +121,7 @@ func applyRecoveryPlan(c *C, storeID uint64, storeReports map[uint64]*pdpb.Store } } -func advanceUntilFinished(c *C, recoveryController *unsafeRecoveryController, reports map[uint64]*pdpb.StoreReport) { +func advanceUntilFinished(re *require.Assertions, recoveryController *unsafeRecoveryController, reports map[uint64]*pdpb.StoreReport) { retry := 0 for { @@ -144,7 +130,7 @@ func advanceUntilFinished(c *C, recoveryController *unsafeRecoveryController, re req.StoreReport = report resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - applyRecoveryPlan(c, storeID, reports, resp) + applyRecoveryPlan(re, storeID, reports, resp) } if recoveryController.GetStage() == finished { break @@ -157,19 +143,23 @@ func advanceUntilFinished(c *C, recoveryController *unsafeRecoveryController, re } } -func (s *testUnsafeRecoverySuite) TestFinished(c *C) { +func TestFinished(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(3, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 2: {}, 3: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -183,18 +173,18 @@ func (s *testUnsafeRecoverySuite) TestFinished(c *C) { {Id: 11, StoreId: 1}, {Id: 21, StoreId: 2}, {Id: 31, StoreId: 3}}}}}, }}, } - c.Assert(recoveryController.GetStage(), Equals, collectReport) + re.Equal(collectReport, recoveryController.GetStage()) for storeID := range reports { req := newStoreHeartbeat(storeID, nil) resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) // require peer report by empty plan - c.Assert(resp.RecoveryPlan, NotNil) - c.Assert(len(resp.RecoveryPlan.Creates), Equals, 0) - c.Assert(len(resp.RecoveryPlan.Demotes), Equals, 0) - c.Assert(resp.RecoveryPlan.ForceLeader, IsNil) - c.Assert(resp.RecoveryPlan.Step, Equals, uint64(1)) - applyRecoveryPlan(c, storeID, reports, resp) + re.NotNil(resp.RecoveryPlan) + re.Empty(len(resp.RecoveryPlan.Creates)) + re.Empty(len(resp.RecoveryPlan.Demotes)) + re.Nil(resp.RecoveryPlan.ForceLeader) + re.Equal(uint64(1), resp.RecoveryPlan.Step) + applyRecoveryPlan(re, storeID, reports, resp) } // receive all reports and dispatch plan @@ -203,49 +193,53 @@ func (s *testUnsafeRecoverySuite) TestFinished(c *C) { req.StoreReport = report resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(resp.RecoveryPlan, NotNil) - c.Assert(resp.RecoveryPlan.ForceLeader, NotNil) - c.Assert(len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders), Equals, 1) - c.Assert(resp.RecoveryPlan.ForceLeader.FailedStores, NotNil) - applyRecoveryPlan(c, storeID, reports, resp) + re.NotNil(resp.RecoveryPlan) + re.NotNil(resp.RecoveryPlan.ForceLeader) + re.Equal(1, len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders)) + re.NotNil(resp.RecoveryPlan.ForceLeader.FailedStores) + applyRecoveryPlan(re, storeID, reports, resp) } - c.Assert(recoveryController.GetStage(), Equals, forceLeader) + re.Equal(forceLeader, recoveryController.GetStage()) for storeID, report := range reports { req := newStoreHeartbeat(storeID, report) req.StoreReport = report resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(resp.RecoveryPlan, NotNil) - c.Assert(len(resp.RecoveryPlan.Demotes), Equals, 1) - applyRecoveryPlan(c, storeID, reports, resp) + re.NotNil(resp.RecoveryPlan) + re.Equal(1, len(resp.RecoveryPlan.Demotes)) + applyRecoveryPlan(re, storeID, reports, resp) } - c.Assert(recoveryController.GetStage(), Equals, demoteFailedVoter) + re.Equal(demoteFailedVoter, recoveryController.GetStage()) for storeID, report := range reports { req := newStoreHeartbeat(storeID, report) req.StoreReport = report resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(resp.RecoveryPlan, IsNil) + re.Nil(resp.RecoveryPlan) // remove the two failed peers - applyRecoveryPlan(c, storeID, reports, resp) + applyRecoveryPlan(re, storeID, reports, resp) } - c.Assert(recoveryController.GetStage(), Equals, finished) + re.Equal(finished, recoveryController.GetStage()) } -func (s *testUnsafeRecoverySuite) TestFailed(c *C) { +func TestFailed(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(3, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 2: {}, 3: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -259,17 +253,17 @@ func (s *testUnsafeRecoverySuite) TestFailed(c *C) { {Id: 11, StoreId: 1}, {Id: 21, StoreId: 2}, {Id: 31, StoreId: 3}}}}}, }}, } - c.Assert(recoveryController.GetStage(), Equals, collectReport) + re.Equal(collectReport, recoveryController.GetStage()) // require peer report for storeID := range reports { req := newStoreHeartbeat(storeID, nil) resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(resp.RecoveryPlan, NotNil) - c.Assert(len(resp.RecoveryPlan.Creates), Equals, 0) - c.Assert(len(resp.RecoveryPlan.Demotes), Equals, 0) - c.Assert(resp.RecoveryPlan.ForceLeader, IsNil) - applyRecoveryPlan(c, storeID, reports, resp) + re.NotNil(resp.RecoveryPlan) + re.Empty(len(resp.RecoveryPlan.Creates)) + re.Empty(len(resp.RecoveryPlan.Demotes)) + re.Nil(resp.RecoveryPlan.ForceLeader) + applyRecoveryPlan(re, storeID, reports, resp) } // receive all reports and dispatch plan @@ -278,39 +272,39 @@ func (s *testUnsafeRecoverySuite) TestFailed(c *C) { req.StoreReport = report resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(resp.RecoveryPlan, NotNil) - c.Assert(resp.RecoveryPlan.ForceLeader, NotNil) - c.Assert(len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders), Equals, 1) - c.Assert(resp.RecoveryPlan.ForceLeader.FailedStores, NotNil) - applyRecoveryPlan(c, storeID, reports, resp) + re.NotNil(resp.RecoveryPlan) + re.NotNil(resp.RecoveryPlan.ForceLeader) + re.Equal(1, len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders)) + re.NotNil(resp.RecoveryPlan.ForceLeader.FailedStores) + applyRecoveryPlan(re, storeID, reports, resp) } - c.Assert(recoveryController.GetStage(), Equals, forceLeader) + re.Equal(forceLeader, recoveryController.GetStage()) for storeID, report := range reports { req := newStoreHeartbeat(storeID, report) req.StoreReport = report resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(resp.RecoveryPlan, NotNil) - c.Assert(len(resp.RecoveryPlan.Demotes), Equals, 1) - applyRecoveryPlan(c, storeID, reports, resp) + re.NotNil(resp.RecoveryPlan) + re.Equal(1, len(resp.RecoveryPlan.Demotes)) + applyRecoveryPlan(re, storeID, reports, resp) } - c.Assert(recoveryController.GetStage(), Equals, demoteFailedVoter) + re.Equal(demoteFailedVoter, recoveryController.GetStage()) // received heartbeat from failed store, abort req := newStoreHeartbeat(2, nil) resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(resp.RecoveryPlan, IsNil) - c.Assert(recoveryController.GetStage(), Equals, exitForceLeader) + re.Nil(resp.RecoveryPlan) + re.Equal(exitForceLeader, recoveryController.GetStage()) for storeID, report := range reports { req := newStoreHeartbeat(storeID, report) req.StoreReport = report resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(resp.RecoveryPlan, NotNil) - applyRecoveryPlan(c, storeID, reports, resp) + re.NotNil(resp.RecoveryPlan) + applyRecoveryPlan(re, storeID, reports, resp) } for storeID, report := range reports { @@ -318,24 +312,28 @@ func (s *testUnsafeRecoverySuite) TestFailed(c *C) { req.StoreReport = report resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - applyRecoveryPlan(c, storeID, reports, resp) + applyRecoveryPlan(re, storeID, reports, resp) } - c.Assert(recoveryController.GetStage(), Equals, failed) + re.Equal(failed, recoveryController.GetStage()) } -func (s *testUnsafeRecoverySuite) TestForceLeaderFail(c *C) { +func TestForceLeaderFail(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(4, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 3: {}, 4: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: { @@ -376,42 +374,46 @@ func (s *testUnsafeRecoverySuite) TestForceLeaderFail(c *C) { resp2 := &pdpb.StoreHeartbeatResponse{} req2.StoreReport.Step = 1 recoveryController.HandleStoreHeartbeat(req2, resp2) - c.Assert(recoveryController.GetStage(), Equals, forceLeader) + re.Equal(forceLeader, recoveryController.GetStage()) recoveryController.HandleStoreHeartbeat(req1, resp1) // force leader on store 1 succeed - applyRecoveryPlan(c, 1, reports, resp1) - applyRecoveryPlan(c, 2, reports, resp2) + applyRecoveryPlan(re, 1, reports, resp1) + applyRecoveryPlan(re, 2, reports, resp2) // force leader on store 2 doesn't succeed reports[2].PeerReports[0].IsForceLeader = false // force leader should retry on store 2 recoveryController.HandleStoreHeartbeat(req1, resp1) recoveryController.HandleStoreHeartbeat(req2, resp2) - c.Assert(recoveryController.GetStage(), Equals, forceLeader) + re.Equal(forceLeader, recoveryController.GetStage()) recoveryController.HandleStoreHeartbeat(req1, resp1) // force leader succeed this time - applyRecoveryPlan(c, 1, reports, resp1) - applyRecoveryPlan(c, 2, reports, resp2) + applyRecoveryPlan(re, 1, reports, resp1) + applyRecoveryPlan(re, 2, reports, resp2) recoveryController.HandleStoreHeartbeat(req1, resp1) recoveryController.HandleStoreHeartbeat(req2, resp2) - c.Assert(recoveryController.GetStage(), Equals, demoteFailedVoter) + re.Equal(demoteFailedVoter, recoveryController.GetStage()) } -func (s *testUnsafeRecoverySuite) TestAffectedTableID(c *C) { +func TestAffectedTableID(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(3, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 2: {}, 3: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: { @@ -429,26 +431,30 @@ func (s *testUnsafeRecoverySuite) TestAffectedTableID(c *C) { }, } - advanceUntilFinished(c, recoveryController, reports) + advanceUntilFinished(re, recoveryController, reports) - c.Assert(len(recoveryController.affectedTableIDs), Equals, 1) + re.Equal(1, len(recoveryController.affectedTableIDs)) _, exists := recoveryController.affectedTableIDs[6] - c.Assert(exists, IsTrue) + re.True(exists) } -func (s *testUnsafeRecoverySuite) TestForceLeaderForCommitMerge(c *C) { +func TestForceLeaderForCommitMerge(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(3, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 2: {}, 3: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: { @@ -483,44 +489,48 @@ func (s *testUnsafeRecoverySuite) TestForceLeaderForCommitMerge(c *C) { resp := &pdpb.StoreHeartbeatResponse{} req.StoreReport.Step = 1 recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(recoveryController.GetStage(), Equals, forceLeaderForCommitMerge) + re.Equal(forceLeaderForCommitMerge, recoveryController.GetStage()) // force leader on regions of commit merge first - c.Assert(resp.RecoveryPlan, NotNil) - c.Assert(resp.RecoveryPlan.ForceLeader, NotNil) - c.Assert(len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders), Equals, 1) - c.Assert(resp.RecoveryPlan.ForceLeader.EnterForceLeaders[0], Equals, uint64(1002)) - c.Assert(resp.RecoveryPlan.ForceLeader.FailedStores, NotNil) - applyRecoveryPlan(c, 1, reports, resp) + re.NotNil(resp.RecoveryPlan) + re.NotNil(resp.RecoveryPlan.ForceLeader) + re.Equal(1, len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders)) + re.Equal(uint64(1002), resp.RecoveryPlan.ForceLeader.EnterForceLeaders[0]) + re.NotNil(resp.RecoveryPlan.ForceLeader.FailedStores) + applyRecoveryPlan(re, 1, reports, resp) recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(recoveryController.GetStage(), Equals, forceLeader) + re.Equal(forceLeader, recoveryController.GetStage()) // force leader on the rest regions - c.Assert(resp.RecoveryPlan, NotNil) - c.Assert(resp.RecoveryPlan.ForceLeader, NotNil) - c.Assert(len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders), Equals, 1) - c.Assert(resp.RecoveryPlan.ForceLeader.EnterForceLeaders[0], Equals, uint64(1001)) - c.Assert(resp.RecoveryPlan.ForceLeader.FailedStores, NotNil) - applyRecoveryPlan(c, 1, reports, resp) + re.NotNil(resp.RecoveryPlan) + re.NotNil(resp.RecoveryPlan.ForceLeader) + re.Equal(1, len(resp.RecoveryPlan.ForceLeader.EnterForceLeaders)) + re.Equal(uint64(1001), resp.RecoveryPlan.ForceLeader.EnterForceLeaders[0]) + re.NotNil(resp.RecoveryPlan.ForceLeader.FailedStores) + applyRecoveryPlan(re, 1, reports, resp) recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(recoveryController.GetStage(), Equals, demoteFailedVoter) + re.Equal(demoteFailedVoter, recoveryController.GetStage()) } -func (s *testUnsafeRecoverySuite) TestOneLearner(c *C) { +func TestOneLearner(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(3, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 2: {}, 3: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -535,7 +545,7 @@ func (s *testUnsafeRecoverySuite) TestOneLearner(c *C) { }}, } - advanceUntilFinished(c, recoveryController, reports) + advanceUntilFinished(re, recoveryController, reports) expects := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -552,29 +562,33 @@ func (s *testUnsafeRecoverySuite) TestOneLearner(c *C) { for storeID, report := range reports { if result, ok := expects[storeID]; ok { - c.Assert(report.PeerReports, DeepEquals, result.PeerReports) + re.Equal(result.PeerReports, report.PeerReports) } else { - c.Assert(len(report.PeerReports), Equals, 0) + re.Empty(len(report.PeerReports)) } } } -func (s *testUnsafeRecoverySuite) TestTiflashLearnerPeer(c *C) { +func TestTiflashLearnerPeer(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(5, "6.0.0") { if store.GetID() == 3 { store.GetMeta().Labels = []*metapb.StoreLabel{{Key: "engine", Value: "tiflash"}} } - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 4: {}, 5: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -650,7 +664,7 @@ func (s *testUnsafeRecoverySuite) TestTiflashLearnerPeer(c *C) { }}, } - advanceUntilFinished(c, recoveryController, reports) + advanceUntilFinished(re, recoveryController, reports) expects := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -704,7 +718,7 @@ func (s *testUnsafeRecoverySuite) TestTiflashLearnerPeer(c *C) { for i, p := range report.PeerReports { // As the store of newly created region is not fixed, check it separately if p.RegionState.Region.GetId() == 1 { - c.Assert(p, DeepEquals, &pdpb.PeerReport{ + re.Equal(&pdpb.PeerReport{ RaftState: &raft_serverpb.RaftLocalState{LastIndex: 10, HardState: &eraftpb.HardState{Term: 1, Commit: 10}}, RegionState: &raft_serverpb.RegionLocalState{ Region: &metapb.Region{ @@ -717,32 +731,36 @@ func (s *testUnsafeRecoverySuite) TestTiflashLearnerPeer(c *C) { }, }, }, - }) + }, p) report.PeerReports = append(report.PeerReports[:i], report.PeerReports[i+1:]...) break } } if result, ok := expects[storeID]; ok { - c.Assert(report.PeerReports, DeepEquals, result.PeerReports) + re.Equal(result.PeerReports, report.PeerReports) } else { - c.Assert(len(report.PeerReports), Equals, 0) + re.Empty(len(report.PeerReports)) } } } -func (s *testUnsafeRecoverySuite) TestUninitializedPeer(c *C) { +func TestUninitializedPeer(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(3, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 2: {}, 3: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -756,7 +774,7 @@ func (s *testUnsafeRecoverySuite) TestUninitializedPeer(c *C) { }}, } - advanceUntilFinished(c, recoveryController, reports) + advanceUntilFinished(re, recoveryController, reports) expects := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -775,26 +793,30 @@ func (s *testUnsafeRecoverySuite) TestUninitializedPeer(c *C) { for storeID, report := range reports { if result, ok := expects[storeID]; ok { - c.Assert(report.PeerReports, DeepEquals, result.PeerReports) + re.Equal(result.PeerReports, report.PeerReports) } else { - c.Assert(len(report.PeerReports), Equals, 0) + re.Empty(report.PeerReports) } } } -func (s *testUnsafeRecoverySuite) TestJointState(c *C) { +func TestJointState(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(5, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 4: {}, 5: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -878,7 +900,7 @@ func (s *testUnsafeRecoverySuite) TestJointState(c *C) { }}, } - advanceUntilFinished(c, recoveryController, reports) + advanceUntilFinished(re, recoveryController, reports) expects := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -964,50 +986,58 @@ func (s *testUnsafeRecoverySuite) TestJointState(c *C) { for storeID, report := range reports { if result, ok := expects[storeID]; ok { - c.Assert(report.PeerReports, DeepEquals, result.PeerReports) + re.Equal(result.PeerReports, report.PeerReports) } else { - c.Assert(len(report.PeerReports), Equals, 0) + re.Empty(len(report.PeerReports)) } } } -func (s *testUnsafeRecoverySuite) TestTimeout(c *C) { +func TestTimeout(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(3, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 2: {}, 3: {}, - }, 1), IsNil) + }, 1)) time.Sleep(time.Second) req := newStoreHeartbeat(1, nil) resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(recoveryController.GetStage(), Equals, exitForceLeader) + re.Equal(exitForceLeader, recoveryController.GetStage()) req.StoreReport = &pdpb.StoreReport{Step: 2} recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(recoveryController.GetStage(), Equals, failed) + re.Equal(failed, recoveryController.GetStage()) } -func (s *testUnsafeRecoverySuite) TestExitForceLeader(c *C) { +func TestExitForceLeader(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(3, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 2: {}, 3: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: { @@ -1032,18 +1062,18 @@ func (s *testUnsafeRecoverySuite) TestExitForceLeader(c *C) { req.StoreReport = report resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - applyRecoveryPlan(c, storeID, reports, resp) + applyRecoveryPlan(re, storeID, reports, resp) } - c.Assert(recoveryController.GetStage(), Equals, exitForceLeader) + re.Equal(exitForceLeader, recoveryController.GetStage()) for storeID, report := range reports { req := newStoreHeartbeat(storeID, report) req.StoreReport = report resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - applyRecoveryPlan(c, storeID, reports, resp) + applyRecoveryPlan(re, storeID, reports, resp) } - c.Assert(recoveryController.GetStage(), Equals, finished) + re.Equal(finished, recoveryController.GetStage()) expects := map[uint64]*pdpb.StoreReport{ 1: { @@ -1062,26 +1092,30 @@ func (s *testUnsafeRecoverySuite) TestExitForceLeader(c *C) { for storeID, report := range reports { if result, ok := expects[storeID]; ok { - c.Assert(report.PeerReports, DeepEquals, result.PeerReports) + re.Equal(result.PeerReports, report.PeerReports) } else { - c.Assert(len(report.PeerReports), Equals, 0) + re.Empty(len(report.PeerReports)) } } } -func (s *testUnsafeRecoverySuite) TestStep(c *C) { +func TestStep(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(3, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 2: {}, 3: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: { @@ -1102,37 +1136,41 @@ func (s *testUnsafeRecoverySuite) TestStep(c *C) { resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) // step is not set, ignore - c.Assert(recoveryController.GetStage(), Equals, collectReport) + re.Equal(collectReport, recoveryController.GetStage()) // valid store report req.StoreReport.Step = 1 recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(recoveryController.GetStage(), Equals, forceLeader) + re.Equal(forceLeader, recoveryController.GetStage()) // duplicate report with same step, ignore recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(recoveryController.GetStage(), Equals, forceLeader) - applyRecoveryPlan(c, 1, reports, resp) + re.Equal(forceLeader, recoveryController.GetStage()) + applyRecoveryPlan(re, 1, reports, resp) recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(recoveryController.GetStage(), Equals, demoteFailedVoter) - applyRecoveryPlan(c, 1, reports, resp) + re.Equal(demoteFailedVoter, recoveryController.GetStage()) + applyRecoveryPlan(re, 1, reports, resp) recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(recoveryController.GetStage(), Equals, finished) + re.Equal(finished, recoveryController.GetStage()) } -func (s *testUnsafeRecoverySuite) TestOnHealthyRegions(c *C) { +func TestOnHealthyRegions(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(5, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 4: {}, 5: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -1166,17 +1204,17 @@ func (s *testUnsafeRecoverySuite) TestOnHealthyRegions(c *C) { {Id: 11, StoreId: 1}, {Id: 21, StoreId: 2}, {Id: 31, StoreId: 3}}}}}, }}, } - c.Assert(recoveryController.GetStage(), Equals, collectReport) + re.Equal(collectReport, recoveryController.GetStage()) // require peer report for storeID := range reports { req := newStoreHeartbeat(storeID, nil) resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(resp.RecoveryPlan, NotNil) - c.Assert(len(resp.RecoveryPlan.Creates), Equals, 0) - c.Assert(len(resp.RecoveryPlan.Demotes), Equals, 0) - c.Assert(resp.RecoveryPlan.ForceLeader, IsNil) - applyRecoveryPlan(c, storeID, reports, resp) + re.NotNil(resp.RecoveryPlan) + re.Empty(len(resp.RecoveryPlan.Creates)) + re.Empty(len(resp.RecoveryPlan.Demotes)) + re.Nil(resp.RecoveryPlan.ForceLeader) + applyRecoveryPlan(re, storeID, reports, resp) } // receive all reports and dispatch no plan @@ -1185,26 +1223,30 @@ func (s *testUnsafeRecoverySuite) TestOnHealthyRegions(c *C) { req.StoreReport = report resp := &pdpb.StoreHeartbeatResponse{} recoveryController.HandleStoreHeartbeat(req, resp) - c.Assert(resp.RecoveryPlan, IsNil) - applyRecoveryPlan(c, storeID, reports, resp) + re.Nil(resp.RecoveryPlan) + applyRecoveryPlan(re, storeID, reports, resp) } // nothing to do, finish directly - c.Assert(recoveryController.GetStage(), Equals, finished) + re.Equal(finished, recoveryController.GetStage()) } -func (s *testUnsafeRecoverySuite) TestCreateEmptyRegion(c *C) { +func TestCreateEmptyRegion(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(3, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 2: {}, 3: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -1231,7 +1273,7 @@ func (s *testUnsafeRecoverySuite) TestCreateEmptyRegion(c *C) { }}, } - advanceUntilFinished(c, recoveryController, reports) + advanceUntilFinished(re, recoveryController, reports) expects := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -1278,9 +1320,9 @@ func (s *testUnsafeRecoverySuite) TestCreateEmptyRegion(c *C) { for storeID, report := range reports { if expect, ok := expects[storeID]; ok { - c.Assert(report.PeerReports, DeepEquals, expect.PeerReports) + re.Equal(expect.PeerReports, report.PeerReports) } else { - c.Assert(len(report.PeerReports), Equals, 0) + re.Empty(len(report.PeerReports)) } } } @@ -1297,19 +1339,23 @@ func (s *testUnsafeRecoverySuite) TestCreateEmptyRegion(c *C) { // | Store 4, 5 and 6 fail | A=[a,m), B=[m,z) | A=[a,z) | C=[a,g) | fail | fail | fail | // +──────────────────────────────────+───────────────────+───────────────────+───────────────────+───────────────────+──────────+──────────+ -func (s *testUnsafeRecoverySuite) TestRangeOverlap1(c *C) { +func TestRangeOverlap1(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(5, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 4: {}, 5: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -1350,7 +1396,7 @@ func (s *testUnsafeRecoverySuite) TestRangeOverlap1(c *C) { }}, } - advanceUntilFinished(c, recoveryController, reports) + advanceUntilFinished(re, recoveryController, reports) expects := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -1381,26 +1427,30 @@ func (s *testUnsafeRecoverySuite) TestRangeOverlap1(c *C) { for storeID, report := range reports { if result, ok := expects[storeID]; ok { - c.Assert(report.PeerReports, DeepEquals, result.PeerReports) + re.Equal(result.PeerReports, report.PeerReports) } else { - c.Assert(len(report.PeerReports), Equals, 0) + re.Empty(len(report.PeerReports)) } } } -func (s *testUnsafeRecoverySuite) TestRangeOverlap2(c *C) { +func TestRangeOverlap2(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() for _, store := range newTestStores(5, "6.0.0") { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 4: {}, 5: {}, - }, 60), IsNil) + }, 60)) reports := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -1441,7 +1491,7 @@ func (s *testUnsafeRecoverySuite) TestRangeOverlap2(c *C) { }}, } - advanceUntilFinished(c, recoveryController, reports) + advanceUntilFinished(re, recoveryController, reports) expects := map[uint64]*pdpb.StoreReport{ 1: {PeerReports: []*pdpb.PeerReport{ @@ -1471,72 +1521,80 @@ func (s *testUnsafeRecoverySuite) TestRangeOverlap2(c *C) { for storeID, report := range reports { if result, ok := expects[storeID]; ok { - c.Assert(report.PeerReports, DeepEquals, result.PeerReports) + re.Equal(result.PeerReports, report.PeerReports) } else { - c.Assert(len(report.PeerReports), Equals, 0) + re.Empty(report.PeerReports) } } } -func (s *testUnsafeRecoverySuite) TestRemoveFailedStores(c *C) { +func TestRemoveFailedStores(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.coordinator.run() stores := newTestStores(2, "5.3.0") stores[1] = stores[1].Clone(core.SetLastHeartbeatTS(time.Now())) for _, store := range stores { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } recoveryController := newUnsafeRecoveryController(cluster) // Store 3 doesn't exist, reject to remove. - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.Error(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 1: {}, 3: {}, - }, 60), NotNil) + }, 60)) - c.Assert(recoveryController.RemoveFailedStores(map[uint64]struct{}{ + re.NoError(recoveryController.RemoveFailedStores(map[uint64]struct{}{ 1: {}, - }, 60), IsNil) - c.Assert(cluster.GetStore(uint64(1)).IsRemoved(), IsTrue) + }, 60)) + re.True(cluster.GetStore(uint64(1)).IsRemoved()) for _, s := range cluster.GetSchedulers() { paused, err := cluster.IsSchedulerAllowed(s) if s != "split-bucket-scheduler" { - c.Assert(err, IsNil) - c.Assert(paused, IsTrue) + re.NoError(err) + re.True(paused) } } // Store 2's last heartbeat is recent, and is not allowed to be removed. - c.Assert(recoveryController.RemoveFailedStores( + re.Error(recoveryController.RemoveFailedStores( map[uint64]struct{}{ 2: {}, - }, 60), NotNil) + }, 60)) } -func (s *testUnsafeRecoverySuite) TestSplitPaused(c *C) { +func TestSplitPaused(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + _, opt, _ := newTestScheduleConfig() - cluster := newTestRaftCluster(s.ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) + cluster := newTestRaftCluster(ctx, mockid.NewIDAllocator(), opt, storage.NewStorageWithMemoryBackend(), core.NewBasicCluster()) recoveryController := newUnsafeRecoveryController(cluster) cluster.Lock() cluster.unsafeRecoveryController = recoveryController - cluster.coordinator = newCoordinator(s.ctx, cluster, hbstream.NewTestHeartbeatStreams(s.ctx, cluster.meta.GetId(), cluster, true)) + cluster.coordinator = newCoordinator(ctx, cluster, hbstream.NewTestHeartbeatStreams(ctx, cluster.meta.GetId(), cluster, true)) cluster.Unlock() cluster.coordinator.run() stores := newTestStores(2, "5.3.0") stores[1] = stores[1].Clone(core.SetLastHeartbeatTS(time.Now())) for _, store := range stores { - c.Assert(cluster.PutStore(store.GetMeta()), IsNil) + re.NoError(cluster.PutStore(store.GetMeta())) } failedStores := map[uint64]struct{}{ 1: {}, } - c.Assert(recoveryController.RemoveFailedStores(failedStores, 60), IsNil) + re.NoError(recoveryController.RemoveFailedStores(failedStores, 60)) askSplitReq := &pdpb.AskSplitRequest{} _, err := cluster.HandleAskSplit(askSplitReq) - c.Assert(err.Error(), Equals, "[PD:unsaferecovery:ErrUnsafeRecoveryIsRunning]unsafe recovery is running") + re.Equal("[PD:unsaferecovery:ErrUnsafeRecoveryIsRunning]unsafe recovery is running", err.Error()) askBatchSplitReq := &pdpb.AskBatchSplitRequest{} _, err = cluster.HandleAskBatchSplit(askBatchSplitReq) - c.Assert(err.Error(), Equals, "[PD:unsaferecovery:ErrUnsafeRecoveryIsRunning]unsafe recovery is running") + re.Equal("[PD:unsaferecovery:ErrUnsafeRecoveryIsRunning]unsafe recovery is running", err.Error()) }