diff --git a/tests/server/id/id_test.go b/tests/server/id/id_test.go index b624ceb056f..d9279cd3616 100644 --- a/tests/server/id/id_test.go +++ b/tests/server/id/id_test.go @@ -19,53 +19,37 @@ import ( "sync" "testing" - . "github.com/pingcap/check" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/tests" "go.uber.org/goleak" ) -func Test(t *testing.T) { - TestingT(t) -} - func TestMain(m *testing.M) { goleak.VerifyTestMain(m, testutil.LeakOptions...) } const allocStep = uint64(1000) -var _ = Suite(&testAllocIDSuite{}) - -type testAllocIDSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *testAllocIDSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *testAllocIDSuite) TearDownSuite(c *C) { - s.cancel() -} - -func (s *testAllocIDSuite) TestID(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) - c.Assert(err, IsNil) +func TestID(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) + re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) var last uint64 for i := uint64(0); i < allocStep; i++ { id, err := leaderServer.GetAllocator().Alloc() - c.Assert(err, IsNil) - c.Assert(id, Greater, last) + re.NoError(err) + re.Greater(id, last) last = id } @@ -81,12 +65,12 @@ func (s *testAllocIDSuite) TestID(c *C) { for i := 0; i < 200; i++ { id, err := leaderServer.GetAllocator().Alloc() - c.Assert(err, IsNil) + re.NoError(err) m.Lock() _, ok := ids[id] ids[id] = struct{}{} m.Unlock() - c.Assert(ok, IsFalse) + re.False(ok) } }() } @@ -94,98 +78,107 @@ func (s *testAllocIDSuite) TestID(c *C) { wg.Wait() } -func (s *testAllocIDSuite) TestCommand(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) - c.Assert(err, IsNil) +func TestCommand(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) + re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) req := &pdpb.AllocIDRequest{Header: testutil.NewRequestHeader(leaderServer.GetClusterID())} - grpcPDClient := testutil.MustNewGrpcClient(c, leaderServer.GetAddr()) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, leaderServer.GetAddr()) var last uint64 for i := uint64(0); i < 2*allocStep; i++ { resp, err := grpcPDClient.AllocID(context.Background(), req) - c.Assert(err, IsNil) - c.Assert(resp.GetId(), Greater, last) + re.NoError(err) + re.Greater(resp.GetId(), last) last = resp.GetId() } } -func (s *testAllocIDSuite) TestMonotonicID(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 2) - c.Assert(err, IsNil) +func TestMonotonicID(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 2) + re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) var last1 uint64 for i := uint64(0); i < 10; i++ { id, err := leaderServer.GetAllocator().Alloc() - c.Assert(err, IsNil) - c.Assert(id, Greater, last1) + re.NoError(err) + re.Greater(id, last1) last1 = id } err = cluster.ResignLeader() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer = cluster.GetServer(cluster.GetLeader()) var last2 uint64 for i := uint64(0); i < 10; i++ { id, err := leaderServer.GetAllocator().Alloc() - c.Assert(err, IsNil) - c.Assert(id, Greater, last2) + re.NoError(err) + re.Greater(id, last2) last2 = id } err = cluster.ResignLeader() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer = cluster.GetServer(cluster.GetLeader()) id, err := leaderServer.GetAllocator().Alloc() - c.Assert(err, IsNil) - c.Assert(id, Greater, last2) + re.NoError(err) + re.Greater(id, last2) var last3 uint64 for i := uint64(0); i < 1000; i++ { id, err := leaderServer.GetAllocator().Alloc() - c.Assert(err, IsNil) - c.Assert(id, Greater, last3) + re.NoError(err) + re.Greater(id, last3) last3 = id } } -func (s *testAllocIDSuite) TestPDRestart(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) - c.Assert(err, IsNil) +func TestPDRestart(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) + re.NoError(err) defer cluster.Destroy() err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) var last uint64 for i := uint64(0); i < 10; i++ { id, err := leaderServer.GetAllocator().Alloc() - c.Assert(err, IsNil) - c.Assert(id, Greater, last) + re.NoError(err) + re.Greater(id, last) last = id } - c.Assert(leaderServer.Stop(), IsNil) - c.Assert(leaderServer.Run(), IsNil) + re.NoError(leaderServer.Stop()) + re.NoError(leaderServer.Run()) cluster.WaitLeader() for i := uint64(0); i < 10; i++ { id, err := leaderServer.GetAllocator().Alloc() - c.Assert(err, IsNil) - c.Assert(id, Greater, last) + re.NoError(err) + re.Greater(id, last) last = id } } diff --git a/tests/server/join/join_fail/join_fail_test.go b/tests/server/join/join_fail/join_fail_test.go index bc4e98abdce..26dca0d3b52 100644 --- a/tests/server/join/join_fail/join_fail_test.go +++ b/tests/server/join/join_fail/join_fail_test.go @@ -16,43 +16,29 @@ package join_fail_test import ( "context" - "strings" "testing" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" - "github.com/tikv/pd/pkg/testutil" + "github.com/stretchr/testify/require" "github.com/tikv/pd/tests" - "go.uber.org/goleak" ) -func Test(t *testing.T) { - TestingT(t) -} - -func TestMain(m *testing.M) { - goleak.VerifyTestMain(m, testutil.LeakOptions...) -} - -var _ = Suite(&joinTestSuite{}) - -type joinTestSuite struct{} - -func (s *joinTestSuite) TestFailedPDJoinInStep1(c *C) { +func TestFailedPDJoinInStep1(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() cluster, err := tests.NewTestCluster(ctx, 1) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() // Join the second PD. - c.Assert(failpoint.Enable("github.com/tikv/pd/server/join/add-member-failed", `return`), IsNil) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/join/add-member-failed", `return`)) _, err = cluster.Join(ctx) - c.Assert(err, NotNil) - c.Assert(strings.Contains(err.Error(), "join failed"), IsTrue) - c.Assert(failpoint.Disable("github.com/tikv/pd/server/join/add-member-failed"), IsNil) + re.Error(err) + re.Contains(err.Error(), "join failed") + re.NoError(failpoint.Disable("github.com/tikv/pd/server/join/add-member-failed")) } diff --git a/tests/server/join/join_test.go b/tests/server/join/join_test.go index 8cc9cdcdb34..e7e01d74668 100644 --- a/tests/server/join/join_test.go +++ b/tests/server/join/join_test.go @@ -21,119 +21,110 @@ import ( "testing" "time" - . "github.com/pingcap/check" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/etcdutil" "github.com/tikv/pd/server" "github.com/tikv/pd/server/join" "github.com/tikv/pd/tests" ) -func Test(t *testing.T) { - TestingT(t) -} - // TODO: enable it when we fix TestFailedAndDeletedPDJoinsPreviousCluster // func TestMain(m *testing.M) { // goleak.VerifyTestMain(m, testutil.LeakOptions...) // } -var _ = Suite(&joinTestSuite{}) - -type joinTestSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *joinTestSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) - server.EtcdStartTimeout = 10 * time.Second -} - -func (s *joinTestSuite) TearDownSuite(c *C) { - s.cancel() -} - -func (s *joinTestSuite) TestSimpleJoin(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) +func TestSimpleJoin(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pd1 := cluster.GetServer("pd1") client := pd1.GetEtcdClient() members, err := etcdutil.ListEtcdMembers(client) - c.Assert(err, IsNil) - c.Assert(members.Members, HasLen, 1) + re.NoError(err) + re.Len(members.Members, 1) // Join the second PD. - pd2, err := cluster.Join(s.ctx) - c.Assert(err, IsNil) + pd2, err := cluster.Join(ctx) + re.NoError(err) err = pd2.Run() - c.Assert(err, IsNil) + re.NoError(err) _, err = os.Stat(path.Join(pd2.GetConfig().DataDir, "join")) - c.Assert(os.IsNotExist(err), IsFalse) + re.False(os.IsNotExist(err)) members, err = etcdutil.ListEtcdMembers(client) - c.Assert(err, IsNil) - c.Assert(members.Members, HasLen, 2) - c.Assert(pd2.GetClusterID(), Equals, pd1.GetClusterID()) + re.NoError(err) + re.Len(members.Members, 2) + re.Equal(pd1.GetClusterID(), pd2.GetClusterID()) // Wait for all nodes becoming healthy. time.Sleep(time.Second * 5) // Join another PD. - pd3, err := cluster.Join(s.ctx) - c.Assert(err, IsNil) + pd3, err := cluster.Join(ctx) + re.NoError(err) err = pd3.Run() - c.Assert(err, IsNil) + re.NoError(err) _, err = os.Stat(path.Join(pd3.GetConfig().DataDir, "join")) - c.Assert(os.IsNotExist(err), IsFalse) + re.False(os.IsNotExist(err)) members, err = etcdutil.ListEtcdMembers(client) - c.Assert(err, IsNil) - c.Assert(members.Members, HasLen, 3) - c.Assert(pd3.GetClusterID(), Equals, pd1.GetClusterID()) + re.NoError(err) + re.Len(members.Members, 3) + re.Equal(pd1.GetClusterID(), pd3.GetClusterID()) } // A failed PD tries to join the previous cluster but it has been deleted // during its downtime. -func (s *joinTestSuite) TestFailedAndDeletedPDJoinsPreviousCluster(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 3) +func TestFailedAndDeletedPDJoinsPreviousCluster(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + server.EtcdStartTimeout = 10 * time.Second + cluster, err := tests.NewTestCluster(ctx, 3) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() // Wait for all nodes becoming healthy. time.Sleep(time.Second * 5) pd3 := cluster.GetServer("pd3") err = pd3.Stop() - c.Assert(err, IsNil) + re.NoError(err) client := cluster.GetServer("pd1").GetEtcdClient() _, err = client.MemberRemove(context.TODO(), pd3.GetServerID()) - c.Assert(err, IsNil) + re.NoError(err) // The server should not successfully start. res := cluster.RunServer(pd3) - c.Assert(<-res, NotNil) + re.Error(<-res) members, err := etcdutil.ListEtcdMembers(client) - c.Assert(err, IsNil) - c.Assert(members.Members, HasLen, 2) + re.NoError(err) + re.Len(members.Members, 2) } // A deleted PD joins the previous cluster. -func (s *joinTestSuite) TestDeletedPDJoinsPreviousCluster(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 3) +func TestDeletedPDJoinsPreviousCluster(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + server.EtcdStartTimeout = 10 * time.Second + cluster, err := tests.NewTestCluster(ctx, 3) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() // Wait for all nodes becoming healthy. time.Sleep(time.Second * 5) @@ -141,37 +132,36 @@ func (s *joinTestSuite) TestDeletedPDJoinsPreviousCluster(c *C) { pd3 := cluster.GetServer("pd3") client := cluster.GetServer("pd1").GetEtcdClient() _, err = client.MemberRemove(context.TODO(), pd3.GetServerID()) - c.Assert(err, IsNil) + re.NoError(err) err = pd3.Stop() - c.Assert(err, IsNil) + re.NoError(err) // The server should not successfully start. res := cluster.RunServer(pd3) - c.Assert(<-res, NotNil) + re.Error(<-res) members, err := etcdutil.ListEtcdMembers(client) - c.Assert(err, IsNil) - c.Assert(members.Members, HasLen, 2) + re.NoError(err) + re.Len(members.Members, 2) } -func (s *joinTestSuite) TestFailedPDJoinsPreviousCluster(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1) +func TestFailedPDJoinsPreviousCluster(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(cluster.RunInitialServers()) cluster.WaitLeader() // Join the second PD. - pd2, err := cluster.Join(s.ctx) - c.Assert(err, IsNil) - err = pd2.Run() - c.Assert(err, IsNil) - err = pd2.Stop() - c.Assert(err, IsNil) - err = pd2.Destroy() - c.Assert(err, IsNil) - c.Assert(join.PrepareJoinCluster(pd2.GetConfig()), NotNil) + pd2, err := cluster.Join(ctx) + re.NoError(err) + re.NoError(pd2.Run()) + re.NoError(pd2.Stop()) + re.NoError(pd2.Destroy()) + re.Error(join.PrepareJoinCluster(pd2.GetConfig())) } diff --git a/tests/server/member/member_test.go b/tests/server/member/member_test.go index 9c856b739a1..1864500df74 100644 --- a/tests/server/member/member_test.go +++ b/tests/server/member/member_test.go @@ -25,10 +25,10 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/assertutil" "github.com/tikv/pd/pkg/etcdutil" "github.com/tikv/pd/pkg/testutil" @@ -38,58 +38,42 @@ import ( "go.uber.org/goleak" ) -func Test(t *testing.T) { - TestingT(t) -} - func TestMain(m *testing.M) { goleak.VerifyTestMain(m, testutil.LeakOptions...) } -func checkerWithNilAssert(c *C) *assertutil.Checker { +func checkerWithNilAssert(re *require.Assertions) *assertutil.Checker { checker := assertutil.NewChecker() checker.FailNow = func() { - c.FailNow() + re.FailNow("should be nil") } checker.IsNil = func(obtained interface{}) { - c.Assert(obtained, IsNil) + re.Nil(obtained) } return checker } -var _ = Suite(&memberTestSuite{}) - -type memberTestSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *memberTestSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *memberTestSuite) TearDownSuite(c *C) { - s.cancel() -} - -func (s *memberTestSuite) TestMemberDelete(c *C) { +func TestMemberDelete(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() dcLocationConfig := map[string]string{ "pd1": "dc-1", "pd2": "dc-2", "pd3": "dc-3", } dcLocationNum := len(dcLocationConfig) - cluster, err := tests.NewTestCluster(s.ctx, dcLocationNum, func(conf *config.Config, serverName string) { + cluster, err := tests.NewTestCluster(ctx, dcLocationNum, func(conf *config.Config, serverName string) { conf.EnableLocalTSO = true conf.Labels[config.ZoneLabel] = dcLocationConfig[serverName] }) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) leaderName := cluster.WaitLeader() - c.Assert(leaderName, Not(Equals), "") + re.NotEmpty(leaderName) leader := cluster.GetServer(leaderName) var members []*tests.TestServer for _, s := range cluster.GetConfig().InitialServers { @@ -97,9 +81,9 @@ func (s *memberTestSuite) TestMemberDelete(c *C) { members = append(members, cluster.GetServer(s.Name)) } } - c.Assert(members, HasLen, 2) + re.Len(members, 2) - var table = []struct { + var tables = []struct { path string status int members []*config.Config @@ -111,18 +95,18 @@ func (s *memberTestSuite) TestMemberDelete(c *C) { } httpClient := &http.Client{Timeout: 15 * time.Second} - for _, t := range table { - c.Log(time.Now(), "try to delete:", t.path) - testutil.WaitUntil(c, func() bool { - addr := leader.GetConfig().ClientUrls + "/pd/api/v1/members/" + t.path + for _, table := range tables { + t.Log(time.Now(), "try to delete:", table.path) + testutil.Eventually(re, func() bool { + addr := leader.GetConfig().ClientUrls + "/pd/api/v1/members/" + table.path req, err := http.NewRequest(http.MethodDelete, addr, nil) - c.Assert(err, IsNil) + re.NoError(err) res, err := httpClient.Do(req) - c.Assert(err, IsNil) + re.NoError(err) defer res.Body.Close() // Check by status. - if t.status != 0 { - if res.StatusCode != t.status { + if table.status != 0 { + if res.StatusCode != table.status { time.Sleep(time.Second) return false } @@ -130,8 +114,8 @@ func (s *memberTestSuite) TestMemberDelete(c *C) { } // Check by member list. cluster.WaitLeader() - if err = s.checkMemberList(c, leader.GetConfig().ClientUrls, t.members); err != nil { - c.Logf("check member fail: %v", err) + if err = checkMemberList(re, leader.GetConfig().ClientUrls, table.members); err != nil { + t.Logf("check member fail: %v", err) time.Sleep(time.Second) return false } @@ -142,19 +126,19 @@ func (s *memberTestSuite) TestMemberDelete(c *C) { for _, member := range members { key := member.GetServer().GetMember().GetDCLocationPath(member.GetServerID()) resp, err := etcdutil.EtcdKVGet(leader.GetEtcdClient(), key) - c.Assert(err, IsNil) - c.Assert(resp.Kvs, HasLen, 0) + re.NoError(err) + re.Len(resp.Kvs, 0) } } -func (s *memberTestSuite) checkMemberList(c *C, clientURL string, configs []*config.Config) error { +func checkMemberList(re *require.Assertions, clientURL string, configs []*config.Config) error { httpClient := &http.Client{Timeout: 15 * time.Second} addr := clientURL + "/pd/api/v1/members" res, err := httpClient.Get(addr) - c.Assert(err, IsNil) + re.NoError(err) defer res.Body.Close() buf, err := io.ReadAll(res.Body) - c.Assert(err, IsNil) + re.NoError(err) if res.StatusCode != http.StatusOK { return errors.Errorf("load members failed, status: %v, data: %q", res.StatusCode, buf) } @@ -166,114 +150,118 @@ func (s *memberTestSuite) checkMemberList(c *C, clientURL string, configs []*con for _, member := range data["members"] { for _, cfg := range configs { if member.GetName() == cfg.Name { - c.Assert(member.ClientUrls, DeepEquals, []string{cfg.ClientUrls}) - c.Assert(member.PeerUrls, DeepEquals, []string{cfg.PeerUrls}) + re.Equal([]string{cfg.ClientUrls}, member.ClientUrls) + re.Equal([]string{cfg.PeerUrls}, member.PeerUrls) } } } return nil } -func (s *memberTestSuite) TestLeaderPriority(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 3) +func TestLeaderPriority(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 3) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leader1, err := cluster.GetServer("pd1").GetEtcdLeader() - c.Assert(err, IsNil) + re.NoError(err) server1 := cluster.GetServer(leader1) addr := server1.GetConfig().ClientUrls // PD leader should sync with etcd leader. - testutil.WaitUntil(c, func() bool { + testutil.Eventually(re, func() bool { return cluster.GetLeader() == leader1 }) // Bind a lower priority to current leader. - s.post(c, addr+"/pd/api/v1/members/name/"+leader1, `{"leader-priority": -1}`) + post(t, re, addr+"/pd/api/v1/members/name/"+leader1, `{"leader-priority": -1}`) // Wait etcd leader change. - leader2 := s.waitEtcdLeaderChange(c, server1, leader1) + leader2 := waitEtcdLeaderChange(re, server1, leader1) // PD leader should sync with etcd leader again. - testutil.WaitUntil(c, func() bool { + testutil.Eventually(re, func() bool { return cluster.GetLeader() == leader2 }) } -func (s *memberTestSuite) post(c *C, url string, body string) { - testutil.WaitUntil(c, func() bool { +func post(t *testing.T, re *require.Assertions, url string, body string) { + testutil.Eventually(re, func() bool { res, err := http.Post(url, "", bytes.NewBufferString(body)) // #nosec - c.Assert(err, IsNil) + re.NoError(err) b, err := io.ReadAll(res.Body) res.Body.Close() - c.Assert(err, IsNil) - c.Logf("post %s, status: %v res: %s", url, res.StatusCode, string(b)) + re.NoError(err) + t.Logf("post %s, status: %v res: %s", url, res.StatusCode, string(b)) return res.StatusCode == http.StatusOK }) } -func (s *memberTestSuite) waitEtcdLeaderChange(c *C, server *tests.TestServer, old string) string { +func waitEtcdLeaderChange(re *require.Assertions, server *tests.TestServer, old string) string { var leader string - testutil.WaitUntil(c, func() bool { + testutil.Eventually(re, func() bool { var err error leader, err = server.GetEtcdLeader() if err != nil { return false } - if leader == old { - // Priority check could be slow. So we sleep longer here. - time.Sleep(5 * time.Second) - } return leader != old - }) + }, testutil.WithWaitFor(time.Second*90), testutil.WithSleepInterval(time.Second)) return leader } -func (s *memberTestSuite) TestLeaderResign(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 3) +func TestLeaderResign(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 3) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) leader1 := cluster.WaitLeader() addr1 := cluster.GetServer(leader1).GetConfig().ClientUrls - s.post(c, addr1+"/pd/api/v1/leader/resign", "") - leader2 := s.waitLeaderChange(c, cluster, leader1) - c.Log("leader2:", leader2) + post(t, re, addr1+"/pd/api/v1/leader/resign", "") + leader2 := waitLeaderChange(re, cluster, leader1) + t.Log("leader2:", leader2) addr2 := cluster.GetServer(leader2).GetConfig().ClientUrls - s.post(c, addr2+"/pd/api/v1/leader/transfer/"+leader1, "") - leader3 := s.waitLeaderChange(c, cluster, leader2) - c.Assert(leader3, Equals, leader1) + post(t, re, addr2+"/pd/api/v1/leader/transfer/"+leader1, "") + leader3 := waitLeaderChange(re, cluster, leader2) + re.Equal(leader1, leader3) } -func (s *memberTestSuite) TestLeaderResignWithBlock(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 3) +func TestLeaderResignWithBlock(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 3) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) leader1 := cluster.WaitLeader() addr1 := cluster.GetServer(leader1).GetConfig().ClientUrls - err = failpoint.Enable("github.com/tikv/pd/server/raftclusterIsBusy", `pause`) - c.Assert(err, IsNil) - defer failpoint.Disable("github.com/tikv/pd/server/raftclusterIsBusy") - s.post(c, addr1+"/pd/api/v1/leader/resign", "") - leader2 := s.waitLeaderChange(c, cluster, leader1) - c.Log("leader2:", leader2) - c.Assert(leader2, Not(Equals), leader1) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/raftclusterIsBusy", `pause`)) + post(t, re, addr1+"/pd/api/v1/leader/resign", "") + leader2 := waitLeaderChange(re, cluster, leader1) + t.Log("leader2:", leader2) + re.NotEqual(leader1, leader2) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/raftclusterIsBusy")) } -func (s *memberTestSuite) waitLeaderChange(c *C, cluster *tests.TestCluster, old string) string { +func waitLeaderChange(re *require.Assertions, cluster *tests.TestCluster, old string) string { var leader string - testutil.WaitUntil(c, func() bool { + testutil.Eventually(re, func() bool { leader = cluster.GetLeader() if leader == old || leader == "" { return false @@ -283,13 +271,16 @@ func (s *memberTestSuite) waitLeaderChange(c *C, cluster *tests.TestCluster, old return leader } -func (s *memberTestSuite) TestMoveLeader(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 5) +func TestMoveLeader(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 5) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() var wg sync.WaitGroup @@ -315,66 +306,49 @@ func (s *memberTestSuite) TestMoveLeader(c *C) { select { case <-done: case <-time.After(10 * time.Second): - c.Fatal("move etcd leader does not return in 10 seconds") + t.Fatal("move etcd leader does not return in 10 seconds") } } -var _ = Suite(&leaderTestSuite{}) - -type leaderTestSuite struct { - ctx context.Context - cancel context.CancelFunc - svr *server.Server - wg sync.WaitGroup - done chan bool - cfg *config.Config -} - -func (s *leaderTestSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) - s.cfg = server.NewTestSingleConfig(checkerWithNilAssert(c)) - s.wg.Add(1) - s.done = make(chan bool) - svr, err := server.CreateServer(s.ctx, s.cfg) - c.Assert(err, IsNil) - err = svr.Run() +func TestGetLeader(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cfg := server.NewTestSingleConfig(checkerWithNilAssert(re)) + wg := &sync.WaitGroup{} + wg.Add(1) + done := make(chan bool) + svr, err := server.CreateServer(ctx, cfg) + re.NoError(err) + defer svr.Close() + re.NoError(svr.Run()) // Send requests after server has started. - go s.sendRequest(c, s.cfg.ClientUrls) + go sendRequest(re, wg, done, cfg.ClientUrls) time.Sleep(100 * time.Millisecond) - c.Assert(err, IsNil) - - s.svr = svr -} -func (s *leaderTestSuite) TearDownSuite(c *C) { - s.cancel() - s.svr.Close() - testutil.CleanServer(s.cfg.DataDir) -} + mustWaitLeader(re, []*server.Server{svr}) -func (s *leaderTestSuite) TestGetLeader(c *C) { - mustWaitLeader(c, []*server.Server{s.svr}) + re.NotNil(svr.GetLeader()) - leader := s.svr.GetLeader() - c.Assert(leader, NotNil) + done <- true + wg.Wait() - s.done <- true - s.wg.Wait() + testutil.CleanServer(cfg.DataDir) } -func (s *leaderTestSuite) sendRequest(c *C, addr string) { - defer s.wg.Done() +func sendRequest(re *require.Assertions, wg *sync.WaitGroup, done <-chan bool, addr string) { + defer wg.Done() req := &pdpb.AllocIDRequest{Header: testutil.NewRequestHeader(0)} for { select { - case <-s.done: + case <-done: return default: // We don't need to check the response and error, // just make sure the server will not panic. - grpcPDClient := testutil.MustNewGrpcClient(c, addr) + grpcPDClient := testutil.MustNewGrpcClientWithTestify(re, addr) if grpcPDClient != nil { _, _ = grpcPDClient.AllocID(context.Background(), req) } @@ -383,9 +357,9 @@ func (s *leaderTestSuite) sendRequest(c *C, addr string) { } } -func mustWaitLeader(c *C, svrs []*server.Server) *server.Server { +func mustWaitLeader(re *require.Assertions, svrs []*server.Server) *server.Server { var leader *server.Server - testutil.WaitUntil(c, func() bool { + testutil.Eventually(re, func() bool { for _, s := range svrs { if !s.IsClosed() && s.GetMember().IsLeader() { leader = s diff --git a/tests/server/region_syncer/region_syncer_test.go b/tests/server/region_syncer/region_syncer_test.go index c4c91806c9f..3efe04e506d 100644 --- a/tests/server/region_syncer/region_syncer_test.go +++ b/tests/server/region_syncer/region_syncer_test.go @@ -19,9 +19,10 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/pingcap/kvproto/pkg/metapb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockid" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/server/config" @@ -30,29 +31,10 @@ import ( "go.uber.org/goleak" ) -func Test(t *testing.T) { - TestingT(t) -} - func TestMain(m *testing.M) { goleak.VerifyTestMain(m, testutil.LeakOptions...) } -var _ = SerialSuites(®ionSyncerTestSuite{}) - -type regionSyncerTestSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *regionSyncerTestSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *regionSyncerTestSuite) TearDownSuite(c *C) { - s.cancel() -} - type idAllocator struct { allocator *mockid.IDAllocator } @@ -62,51 +44,51 @@ func (i *idAllocator) alloc() uint64 { return v } -func (s *regionSyncerTestSuite) TestRegionSyncer(c *C) { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/storage/regionStorageFastFlush", `return(true)`), IsNil) - c.Assert(failpoint.Enable("github.com/tikv/pd/server/syncer/noFastExitSync", `return(true)`), IsNil) - defer failpoint.Disable("github.com/tikv/pd/server/storage/regionStorageFastFlush") - defer failpoint.Disable("github.com/tikv/pd/server/syncer/noFastExitSync") +func TestRegionSyncer(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + re.NoError(failpoint.Enable("github.com/tikv/pd/server/storage/regionStorageFastFlush", `return(true)`)) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/syncer/noFastExitSync", `return(true)`)) - cluster, err := tests.NewTestCluster(s.ctx, 3, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) + cluster, err := tests.NewTestCluster(ctx, 3, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) - err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(cluster.RunInitialServers()) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) rc := leaderServer.GetServer().GetRaftCluster() - c.Assert(rc, NotNil) - c.Assert(cluster.WaitRegionSyncerClientsReady(2), IsTrue) + re.NotNil(rc) + re.True(cluster.WaitRegionSyncerClientsReady(2)) regionLen := 110 regions := initRegions(regionLen) for _, region := range regions { err = rc.HandleRegionHeartbeat(region) - c.Assert(err, IsNil) + re.NoError(err) } // merge case // region2 -> region1 -> region0 // merge A to B will increases version to max(versionA, versionB)+1, but does not increase conver regions[0] = regions[0].Clone(core.WithEndKey(regions[2].GetEndKey()), core.WithIncVersion(), core.WithIncVersion()) err = rc.HandleRegionHeartbeat(regions[2]) - c.Assert(err, IsNil) + re.NoError(err) // merge case // region3 -> region4 // merge A to B will increases version to max(versionA, versionB)+1, but does not increase conver regions[4] = regions[3].Clone(core.WithEndKey(regions[4].GetEndKey()), core.WithIncVersion()) err = rc.HandleRegionHeartbeat(regions[4]) - c.Assert(err, IsNil) + re.NoError(err) // merge case // region0 -> region4 // merge A to B will increases version to max(versionA, versionB)+1, but does not increase conver regions[4] = regions[0].Clone(core.WithEndKey(regions[4].GetEndKey()), core.WithIncVersion(), core.WithIncVersion()) err = rc.HandleRegionHeartbeat(regions[4]) - c.Assert(err, IsNil) + re.NoError(err) regions = regions[4:] regionLen = len(regions) @@ -119,14 +101,14 @@ func (s *regionSyncerTestSuite) TestRegionSyncer(c *C) { core.SetReadBytes(idx+30), core.SetReadKeys(idx+40)) err = rc.HandleRegionHeartbeat(regions[i]) - c.Assert(err, IsNil) + re.NoError(err) } // change the leader of region for i := 0; i < len(regions); i++ { regions[i] = regions[i].Clone(core.WithLeader(regions[i].GetPeers()[1])) err = rc.HandleRegionHeartbeat(regions[i]) - c.Assert(err, IsNil) + re.NoError(err) } // ensure flush to region storage, we use a duration larger than the @@ -135,15 +117,16 @@ func (s *regionSyncerTestSuite) TestRegionSyncer(c *C) { // test All regions have been synchronized to the cache of followerServer followerServer := cluster.GetServer(cluster.GetFollower()) - c.Assert(followerServer, NotNil) + re.NotNil(followerServer) cacheRegions := leaderServer.GetServer().GetBasicCluster().GetRegions() - c.Assert(cacheRegions, HasLen, regionLen) - testutil.WaitUntil(c, func() bool { + re.Len(cacheRegions, regionLen) + testutil.Eventually(re, func() bool { + assert := assert.New(t) for _, region := range cacheRegions { r := followerServer.GetServer().GetBasicCluster().GetRegion(region.GetID()) - if !(c.Check(r.GetMeta(), DeepEquals, region.GetMeta()) && - c.Check(r.GetStat(), DeepEquals, region.GetStat()) && - c.Check(r.GetLeader(), DeepEquals, region.GetLeader())) { + if !(assert.Equal(region.GetMeta(), r.GetMeta()) && + assert.Equal(region.GetStat(), r.GetStat()) && + assert.Equal(region.GetLeader(), r.GetLeader())) { return false } } @@ -151,106 +134,112 @@ func (s *regionSyncerTestSuite) TestRegionSyncer(c *C) { }) err = leaderServer.Stop() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer = cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer, NotNil) + re.NotNil(leaderServer) loadRegions := leaderServer.GetServer().GetRaftCluster().GetRegions() - c.Assert(loadRegions, HasLen, regionLen) + re.Len(loadRegions, regionLen) for _, region := range regions { r := leaderServer.GetRegionInfoByID(region.GetID()) - c.Assert(r.GetMeta(), DeepEquals, region.GetMeta()) - c.Assert(r.GetStat(), DeepEquals, region.GetStat()) - c.Assert(r.GetLeader(), DeepEquals, region.GetLeader()) - c.Assert(r.GetBuckets(), DeepEquals, region.GetBuckets()) + re.Equal(region.GetMeta(), r.GetMeta()) + re.Equal(region.GetStat(), r.GetStat()) + re.Equal(region.GetLeader(), r.GetLeader()) + re.Equal(region.GetBuckets(), r.GetBuckets()) } + re.NoError(failpoint.Disable("github.com/tikv/pd/server/syncer/noFastExitSync")) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/storage/regionStorageFastFlush")) } -func (s *regionSyncerTestSuite) TestFullSyncWithAddMember(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) +func TestFullSyncWithAddMember(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) rc := leaderServer.GetServer().GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) regionLen := 110 regions := initRegions(regionLen) for _, region := range regions { err = rc.HandleRegionHeartbeat(region) - c.Assert(err, IsNil) + re.NoError(err) } // ensure flush to region storage time.Sleep(3 * time.Second) // restart pd1 err = leaderServer.Stop() - c.Assert(err, IsNil) + re.NoError(err) err = leaderServer.Run() - c.Assert(err, IsNil) - c.Assert(cluster.WaitLeader(), Equals, "pd1") + re.NoError(err) + re.Equal("pd1", cluster.WaitLeader()) // join new PD - pd2, err := cluster.Join(s.ctx) - c.Assert(err, IsNil) - err = pd2.Run() - c.Assert(err, IsNil) - c.Assert(cluster.WaitLeader(), Equals, "pd1") + pd2, err := cluster.Join(ctx) + re.NoError(err) + re.NoError(pd2.Run()) + re.Equal("pd1", cluster.WaitLeader()) // waiting for synchronization to complete time.Sleep(3 * time.Second) - err = cluster.ResignLeader() - c.Assert(err, IsNil) - c.Assert(cluster.WaitLeader(), Equals, "pd2") + re.NoError(cluster.ResignLeader()) + re.Equal("pd2", cluster.WaitLeader()) loadRegions := pd2.GetServer().GetRaftCluster().GetRegions() - c.Assert(loadRegions, HasLen, regionLen) + re.Len(loadRegions, regionLen) } -func (s *regionSyncerTestSuite) TestPrepareChecker(c *C) { - c.Assert(failpoint.Enable("github.com/tikv/pd/server/cluster/changeCoordinatorTicker", `return(true)`), IsNil) - defer failpoint.Disable("github.com/tikv/pd/server/cluster/changeCoordinatorTicker") - cluster, err := tests.NewTestCluster(s.ctx, 1, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) +func TestPrepareChecker(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + re.NoError(failpoint.Enable("github.com/tikv/pd/server/cluster/changeCoordinatorTicker", `return(true)`)) + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.PDServerCfg.UseRegionStorage = true }) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() leaderServer := cluster.GetServer(cluster.GetLeader()) - c.Assert(leaderServer.BootstrapCluster(), IsNil) + re.NoError(leaderServer.BootstrapCluster()) rc := leaderServer.GetServer().GetRaftCluster() - c.Assert(rc, NotNil) + re.NotNil(rc) regionLen := 110 regions := initRegions(regionLen) for _, region := range regions { err = rc.HandleRegionHeartbeat(region) - c.Assert(err, IsNil) + re.NoError(err) } // ensure flush to region storage time.Sleep(3 * time.Second) - c.Assert(leaderServer.GetRaftCluster().IsPrepared(), IsTrue) + re.True(leaderServer.GetRaftCluster().IsPrepared()) // join new PD - pd2, err := cluster.Join(s.ctx) - c.Assert(err, IsNil) + pd2, err := cluster.Join(ctx) + re.NoError(err) err = pd2.Run() - c.Assert(err, IsNil) + re.NoError(err) // waiting for synchronization to complete time.Sleep(3 * time.Second) err = cluster.ResignLeader() - c.Assert(err, IsNil) - c.Assert(cluster.WaitLeader(), Equals, "pd2") + re.NoError(err) + re.Equal("pd2", cluster.WaitLeader()) leaderServer = cluster.GetServer(cluster.GetLeader()) rc = leaderServer.GetServer().GetRaftCluster() for _, region := range regions { err = rc.HandleRegionHeartbeat(region) - c.Assert(err, IsNil) + re.NoError(err) } time.Sleep(time.Second) - c.Assert(rc.IsPrepared(), IsTrue) + re.True(rc.IsPrepared()) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/cluster/changeCoordinatorTicker")) } func initRegions(regionLen int) []*core.RegionInfo { diff --git a/tests/server/server_test.go b/tests/server/server_test.go index 4369d03c966..85f1b2e4241 100644 --- a/tests/server/server_test.go +++ b/tests/server/server_test.go @@ -34,9 +34,9 @@ func TestMain(m *testing.M) { } func TestUpdateAdvertiseUrls(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - re := require.New(t) cluster, err := tests.NewTestCluster(ctx, 2) defer cluster.Destroy() re.NoError(err) @@ -60,10 +60,10 @@ func TestUpdateAdvertiseUrls(t *testing.T) { conf.AdvertisePeerURLs = conf.PeerURLs + "," + tempurl.Alloc() } for _, conf := range cluster.GetConfig().InitialServers { - serverConf, e := conf.Generate() - re.NoError(e) - s, e := tests.NewTestServer(ctx, serverConf) - re.NoError(e) + serverConf, err := conf.Generate() + re.NoError(err) + s, err := tests.NewTestServer(ctx, serverConf) + re.NoError(err) cluster.GetServers()[conf.Name] = s } err = cluster.RunInitialServers() @@ -75,9 +75,9 @@ func TestUpdateAdvertiseUrls(t *testing.T) { } func TestClusterID(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - re := require.New(t) cluster, err := tests.NewTestCluster(ctx, 3) defer cluster.Destroy() re.NoError(err) @@ -111,9 +111,9 @@ func TestClusterID(t *testing.T) { } func TestLeader(t *testing.T) { + re := require.New(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - re := require.New(t) cluster, err := tests.NewTestCluster(ctx, 3) defer cluster.Destroy() re.NoError(err) @@ -122,12 +122,11 @@ func TestLeader(t *testing.T) { re.NoError(err) leader1 := cluster.WaitLeader() - re.NotEqual("", leader1) + re.NotEmpty(leader1) err = cluster.GetServer(leader1).Stop() re.NoError(err) testutil.Eventually(re, func() bool { - leader := cluster.GetLeader() - return leader != leader1 + return cluster.GetLeader() != leader1 }) } diff --git a/tests/server/watch/leader_watch_test.go b/tests/server/watch/leader_watch_test.go index 88d1470d733..4cdbee9d868 100644 --- a/tests/server/watch/leader_watch_test.go +++ b/tests/server/watch/leader_watch_test.go @@ -19,94 +19,79 @@ import ( "testing" "time" - . "github.com/pingcap/check" "github.com/pingcap/failpoint" + "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/testutil" "github.com/tikv/pd/server/config" "github.com/tikv/pd/tests" "go.uber.org/goleak" ) -func Test(t *testing.T) { - TestingT(t) -} - func TestMain(m *testing.M) { goleak.VerifyTestMain(m, testutil.LeakOptions...) } -var _ = Suite(&watchTestSuite{}) - -type watchTestSuite struct { - ctx context.Context - cancel context.CancelFunc -} - -func (s *watchTestSuite) SetUpSuite(c *C) { - s.ctx, s.cancel = context.WithCancel(context.Background()) -} - -func (s *watchTestSuite) TearDownSuite(c *C) { - s.cancel() -} - -func (s *watchTestSuite) TestWatcher(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1, func(conf *config.Config, serverName string) { conf.AutoCompactionRetention = "1s" }) +func TestWatcher(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.AutoCompactionRetention = "1s" }) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pd1 := cluster.GetServer(cluster.GetLeader()) - c.Assert(pd1, NotNil) + re.NotNil(pd1) - pd2, err := cluster.Join(s.ctx) - c.Assert(err, IsNil) + pd2, err := cluster.Join(ctx) + re.NoError(err) err = pd2.Run() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() time.Sleep(5 * time.Second) - pd3, err := cluster.Join(s.ctx) - c.Assert(err, IsNil) - c.Assert(failpoint.Enable("github.com/tikv/pd/server/delayWatcher", `pause`), IsNil) + pd3, err := cluster.Join(ctx) + re.NoError(err) + re.NoError(failpoint.Enable("github.com/tikv/pd/server/delayWatcher", `pause`)) err = pd3.Run() - c.Assert(err, IsNil) + re.NoError(err) time.Sleep(200 * time.Millisecond) - c.Assert(pd3.GetLeader().GetName(), Equals, pd1.GetConfig().Name) + re.Equal(pd1.GetConfig().Name, pd3.GetLeader().GetName()) err = pd1.Stop() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() - c.Assert(pd2.GetLeader().GetName(), Equals, pd2.GetConfig().Name) - failpoint.Disable("github.com/tikv/pd/server/delayWatcher") - testutil.WaitUntil(c, func() bool { - return c.Check(pd3.GetLeader().GetName(), Equals, pd2.GetConfig().Name) + re.Equal(pd2.GetConfig().Name, pd2.GetLeader().GetName()) + re.NoError(failpoint.Disable("github.com/tikv/pd/server/delayWatcher")) + testutil.Eventually(re, func() bool { + return pd3.GetLeader().GetName() == pd2.GetConfig().Name }) - c.Succeed() } -func (s *watchTestSuite) TestWatcherCompacted(c *C) { - cluster, err := tests.NewTestCluster(s.ctx, 1, func(conf *config.Config, serverName string) { conf.AutoCompactionRetention = "1s" }) +func TestWatcherCompacted(t *testing.T) { + re := require.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + cluster, err := tests.NewTestCluster(ctx, 1, func(conf *config.Config, serverName string) { conf.AutoCompactionRetention = "1s" }) defer cluster.Destroy() - c.Assert(err, IsNil) + re.NoError(err) err = cluster.RunInitialServers() - c.Assert(err, IsNil) + re.NoError(err) cluster.WaitLeader() pd1 := cluster.GetServer(cluster.GetLeader()) - c.Assert(pd1, NotNil) + re.NotNil(pd1) client := pd1.GetEtcdClient() _, err = client.Put(context.Background(), "test", "v") - c.Assert(err, IsNil) + re.NoError(err) // wait compaction time.Sleep(2 * time.Second) - pd2, err := cluster.Join(s.ctx) - c.Assert(err, IsNil) + pd2, err := cluster.Join(ctx) + re.NoError(err) err = pd2.Run() - c.Assert(err, IsNil) - testutil.WaitUntil(c, func() bool { - return c.Check(pd2.GetLeader().GetName(), Equals, pd1.GetConfig().Name) + re.NoError(err) + testutil.Eventually(re, func() bool { + return pd2.GetLeader().GetName() == pd1.GetConfig().Name }) - c.Succeed() }