diff --git a/cdc/entry/schema_test_helper.go b/cdc/entry/schema_test_helper.go index cf58c893719..ca07814a7a0 100644 --- a/cdc/entry/schema_test_helper.go +++ b/cdc/entry/schema_test_helper.go @@ -14,7 +14,8 @@ package entry import ( - "github.com/pingcap/check" + "testing" + ticonfig "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" @@ -22,33 +23,34 @@ import ( timodel "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/store/mockstore" - "github.com/pingcap/tidb/util/testkit" + "github.com/pingcap/tidb/testkit" + "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" ) // SchemaTestHelper is a test helper for schema which creates an internal tidb instance to generate DDL jobs with meta information type SchemaTestHelper struct { - c *check.C + t *testing.T tk *testkit.TestKit storage kv.Storage domain *domain.Domain } // NewSchemaTestHelper creates a SchemaTestHelper -func NewSchemaTestHelper(c *check.C) *SchemaTestHelper { +func NewSchemaTestHelper(t *testing.T) *SchemaTestHelper { store, err := mockstore.NewMockStore() - c.Assert(err, check.IsNil) + require.Nil(t, err) ticonfig.UpdateGlobal(func(conf *ticonfig.Config) { conf.AlterPrimaryKey = true }) session.SetSchemaLease(0) session.DisableStats4Test() domain, err := session.BootstrapSession(store) - c.Assert(err, check.IsNil) + require.Nil(t, err) domain.SetStatsUpdating(true) - tk := testkit.NewTestKit(c, store) + tk := testkit.NewTestKit(t, store) return &SchemaTestHelper{ - c: c, + t: t, tk: tk, storage: store, domain: domain, @@ -59,8 +61,8 @@ func NewSchemaTestHelper(c *check.C) *SchemaTestHelper { func (s *SchemaTestHelper) DDL2Job(ddl string) *timodel.Job { s.tk.MustExec(ddl) jobs, err := s.GetCurrentMeta().GetLastNHistoryDDLJobs(1) - s.c.Assert(err, check.IsNil) - s.c.Assert(jobs, check.HasLen, 1) + require.Nil(s.t, err) + require.Len(s.t, jobs, 1) return jobs[0] } @@ -72,7 +74,7 @@ func (s *SchemaTestHelper) Storage() kv.Storage { // GetCurrentMeta return the current meta snapshot func (s *SchemaTestHelper) GetCurrentMeta() *timeta.Meta { ver, err := s.storage.CurrentVersion(oracle.GlobalTxnScope) - s.c.Assert(err, check.IsNil) + require.Nil(s.t, err) return timeta.NewSnapshotMeta(s.storage.GetSnapshot(ver)) } diff --git a/cdc/owner/barrier_test.go b/cdc/owner/barrier_test.go index 5b09cd030ba..3524b80eb48 100644 --- a/cdc/owner/barrier_test.go +++ b/cdc/owner/barrier_test.go @@ -18,50 +18,41 @@ import ( "math/rand" "testing" - "github.com/pingcap/check" "github.com/pingcap/tiflow/cdc/model" - "github.com/pingcap/tiflow/pkg/util/testleak" + "github.com/stretchr/testify/require" ) -func Test(t *testing.T) { check.TestingT(t) } - -var _ = check.Suite(&barrierSuite{}) - -type barrierSuite struct{} - -func (s *barrierSuite) TestBarrier(c *check.C) { - defer testleak.AfterTest(c)() +func TestBarrier(t *testing.T) { b := newBarriers() b.Update(ddlJobBarrier, 2) b.Update(syncPointBarrier, 3) b.Update(finishBarrier, 1) tp, ts := b.Min() - c.Assert(tp, check.Equals, finishBarrier) - c.Assert(ts, check.Equals, uint64(1)) + require.Equal(t, tp, finishBarrier) + require.Equal(t, ts, uint64(1)) b.Update(finishBarrier, 4) tp, ts = b.Min() - c.Assert(tp, check.Equals, ddlJobBarrier) - c.Assert(ts, check.Equals, uint64(2)) + require.Equal(t, tp, ddlJobBarrier) + require.Equal(t, ts, uint64(2)) b.Remove(ddlJobBarrier) tp, ts = b.Min() - c.Assert(tp, check.Equals, syncPointBarrier) - c.Assert(ts, check.Equals, uint64(3)) + require.Equal(t, tp, syncPointBarrier) + require.Equal(t, ts, uint64(3)) b.Update(finishBarrier, 1) tp, ts = b.Min() - c.Assert(tp, check.Equals, finishBarrier) - c.Assert(ts, check.Equals, uint64(1)) + require.Equal(t, tp, finishBarrier) + require.Equal(t, ts, uint64(1)) b.Update(ddlJobBarrier, 5) tp, ts = b.Min() - c.Assert(tp, check.Equals, finishBarrier) - c.Assert(ts, check.Equals, uint64(1)) + require.Equal(t, tp, finishBarrier) + require.Equal(t, ts, uint64(1)) } -func (s *barrierSuite) TestBarrierRandom(c *check.C) { - defer testleak.AfterTest(c)() +func TestBarrierRandom(t *testing.T) { maxBarrierType := 50 maxBarrierTs := 1000000 b := newBarriers() @@ -90,7 +81,7 @@ func (s *barrierSuite) TestBarrierRandom(c *check.C) { } } tp, ts := b.Min() - c.Assert(ts, check.Equals, expectedMinTs) - c.Assert(expectedBarriers[tp], check.Equals, expectedMinTs) + require.Equal(t, ts, expectedMinTs) + require.Equal(t, expectedBarriers[tp], expectedMinTs) } } diff --git a/cdc/owner/changefeed_test.go b/cdc/owner/changefeed_test.go index 6aa6665612c..1d756ef8a8d 100644 --- a/cdc/owner/changefeed_test.go +++ b/cdc/owner/changefeed_test.go @@ -15,13 +15,14 @@ package owner import ( "context" + "io/ioutil" "os" "path/filepath" "sync" "sync/atomic" + "testing" "time" - "github.com/pingcap/check" "github.com/pingcap/errors" timodel "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tiflow/cdc/entry" @@ -31,8 +32,8 @@ import ( "github.com/pingcap/tiflow/pkg/orchestrator" "github.com/pingcap/tiflow/pkg/pdtime" "github.com/pingcap/tiflow/pkg/txnutil/gc" - "github.com/pingcap/tiflow/pkg/util/testleak" "github.com/pingcap/tiflow/pkg/version" + "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" ) @@ -112,11 +113,7 @@ func (m *mockDDLSink) Barrier(ctx context.Context) error { return nil } -var _ = check.Suite(&changefeedSuite{}) - -type changefeedSuite struct{} - -func createChangefeed4Test(ctx cdcContext.Context, c *check.C) (*changefeed, *orchestrator.ChangefeedReactorState, +func createChangefeed4Test(ctx cdcContext.Context, t *testing.T) (*changefeed, *orchestrator.ChangefeedReactorState, map[model.CaptureID]*model.CaptureInfo, *orchestrator.ReactorStateTester) { ctx.GlobalVars().PDClient = &gc.MockPDClient{ UpdateServiceGCSafePointFunc: func(ctx context.Context, serviceID string, ttl int64, safePoint uint64) (uint64, error) { @@ -130,9 +127,9 @@ func createChangefeed4Test(ctx cdcContext.Context, c *check.C) (*changefeed, *or return &mockDDLSink{} }) state := orchestrator.NewChangefeedReactorState(ctx.ChangefeedVars().ID) - tester := orchestrator.NewReactorStateTester(c, state, nil) + tester := orchestrator.NewReactorStateTester(t, state, nil) state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { - c.Assert(info, check.IsNil) + require.Nil(t, info) info = ctx.ChangefeedVars().Info return info, true, nil }) @@ -142,14 +139,13 @@ func createChangefeed4Test(ctx cdcContext.Context, c *check.C) (*changefeed, *or return cf, state, captures, tester } -func (s *changefeedSuite) TestPreCheck(c *check.C) { - defer testleak.AfterTest(c)() +func TestPreCheck(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - cf, state, captures, tester := createChangefeed4Test(ctx, c) + cf, state, captures, tester := createChangefeed4Test(ctx, t) cf.Tick(ctx, state, captures) tester.MustApplyPatches() - c.Assert(state.Status, check.NotNil) - c.Assert(state.TaskStatuses, check.HasKey, ctx.GlobalVars().CaptureInfo.ID) + require.NotNil(t, state.Status) + require.Contains(t, state.TaskStatuses, ctx.GlobalVars().CaptureInfo.ID) // test clean the meta data of offline capture offlineCaputreID := "offline-capture" @@ -166,17 +162,16 @@ func (s *changefeedSuite) TestPreCheck(c *check.C) { cf.Tick(ctx, state, captures) tester.MustApplyPatches() - c.Assert(state.Status, check.NotNil) - c.Assert(state.TaskStatuses, check.HasKey, ctx.GlobalVars().CaptureInfo.ID) - c.Assert(state.TaskStatuses, check.Not(check.HasKey), offlineCaputreID) - c.Assert(state.TaskPositions, check.Not(check.HasKey), offlineCaputreID) - c.Assert(state.Workloads, check.Not(check.HasKey), offlineCaputreID) + require.NotNil(t, state.Status) + require.Contains(t, state.TaskStatuses, ctx.GlobalVars().CaptureInfo.ID) + require.NotContains(t, state.TaskStatuses, offlineCaputreID) + require.NotContains(t, state.TaskPositions, offlineCaputreID) + require.NotContains(t, state.Workloads, offlineCaputreID) } -func (s *changefeedSuite) TestInitialize(c *check.C) { - defer testleak.AfterTest(c)() +func TestInitialize(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - cf, state, captures, tester := createChangefeed4Test(ctx, c) + cf, state, captures, tester := createChangefeed4Test(ctx, t) defer cf.Close(ctx) // pre check cf.Tick(ctx, state, captures) @@ -185,13 +180,12 @@ func (s *changefeedSuite) TestInitialize(c *check.C) { // initialize cf.Tick(ctx, state, captures) tester.MustApplyPatches() - c.Assert(state.Status.CheckpointTs, check.Equals, ctx.ChangefeedVars().Info.StartTs) + require.Equal(t, state.Status.CheckpointTs, ctx.ChangefeedVars().Info.StartTs) } -func (s *changefeedSuite) TestHandleError(c *check.C) { - defer testleak.AfterTest(c)() +func TestChangefeedHandleError(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - cf, state, captures, tester := createChangefeed4Test(ctx, c) + cf, state, captures, tester := createChangefeed4Test(ctx, t) defer cf.Close(ctx) // pre check cf.Tick(ctx, state, captures) @@ -205,14 +199,12 @@ func (s *changefeedSuite) TestHandleError(c *check.C) { // handle error cf.Tick(ctx, state, captures) tester.MustApplyPatches() - c.Assert(state.Status.CheckpointTs, check.Equals, ctx.ChangefeedVars().Info.StartTs) - c.Assert(state.Info.Error.Message, check.Equals, "fake error") + require.Equal(t, state.Status.CheckpointTs, ctx.ChangefeedVars().Info.StartTs) + require.Equal(t, state.Info.Error.Message, "fake error") } -func (s *changefeedSuite) TestExecDDL(c *check.C) { - defer testleak.AfterTest(c)() - - helper := entry.NewSchemaTestHelper(c) +func TestExecDDL(t *testing.T) { + helper := entry.NewSchemaTestHelper(t) defer helper.Close() // Creates a table, which will be deleted at the start-ts of the changefeed. // It is expected that the changefeed DOES NOT replicate this table. @@ -237,7 +229,7 @@ func (s *changefeedSuite) TestExecDDL(c *check.C) { }, }) - cf, state, captures, tester := createChangefeed4Test(ctx, c) + cf, state, captures, tester := createChangefeed4Test(ctx, t) defer cf.Close(ctx) tickThreeTime := func() { cf.Tick(ctx, state, captures) @@ -249,13 +241,12 @@ func (s *changefeedSuite) TestExecDDL(c *check.C) { } // pre check and initialize tickThreeTime() - - c.Assert(cf.schema.AllPhysicalTables(), check.HasLen, 1) - c.Assert(state.TaskStatuses[ctx.GlobalVars().CaptureInfo.ID].Operation, check.HasLen, 0) - c.Assert(state.TaskStatuses[ctx.GlobalVars().CaptureInfo.ID].Tables, check.HasLen, 0) + require.Len(t, cf.schema.AllPhysicalTables(), 1) + require.Len(t, state.TaskStatuses[ctx.GlobalVars().CaptureInfo.ID].Operation, 0) + require.Len(t, state.TaskStatuses[ctx.GlobalVars().CaptureInfo.ID].Tables, 0) job = helper.DDL2Job("drop table test0.table0") - // ddl puller resolved ts grow uo + // ddl puller resolved ts grow up mockDDLPuller := cf.ddlPuller.(*mockDDLPuller) mockDDLPuller.resolvedTs = startTs mockDDLSink := cf.sink.(*mockDDLSink) @@ -263,15 +254,15 @@ func (s *changefeedSuite) TestExecDDL(c *check.C) { mockDDLPuller.ddlQueue = append(mockDDLPuller.ddlQueue, job) // three tick to make sure all barriers set in initialize is handled tickThreeTime() - c.Assert(state.Status.CheckpointTs, check.Equals, mockDDLPuller.resolvedTs) + require.Equal(t, state.Status.CheckpointTs, mockDDLPuller.resolvedTs) // The ephemeral table should have left no trace in the schema cache - c.Assert(cf.schema.AllPhysicalTables(), check.HasLen, 0) + require.Len(t, cf.schema.AllPhysicalTables(), 0) // executing the ddl finished mockDDLSink.ddlDone = true mockDDLPuller.resolvedTs += 1000 tickThreeTime() - c.Assert(state.Status.CheckpointTs, check.Equals, mockDDLPuller.resolvedTs) + require.Equal(t, state.Status.CheckpointTs, mockDDLPuller.resolvedTs) // handle create database job = helper.DDL2Job("create database test1") @@ -279,14 +270,14 @@ func (s *changefeedSuite) TestExecDDL(c *check.C) { job.BinlogInfo.FinishedTS = mockDDLPuller.resolvedTs mockDDLPuller.ddlQueue = append(mockDDLPuller.ddlQueue, job) tickThreeTime() - c.Assert(state.Status.CheckpointTs, check.Equals, mockDDLPuller.resolvedTs) - c.Assert(mockDDLSink.ddlExecuting.Query, check.Equals, "CREATE DATABASE `test1`") + require.Equal(t, state.Status.CheckpointTs, mockDDLPuller.resolvedTs) + require.Equal(t, mockDDLSink.ddlExecuting.Query, "CREATE DATABASE `test1`") // executing the ddl finished mockDDLSink.ddlDone = true mockDDLPuller.resolvedTs += 1000 tickThreeTime() - c.Assert(state.Status.CheckpointTs, check.Equals, mockDDLPuller.resolvedTs) + require.Equal(t, state.Status.CheckpointTs, mockDDLPuller.resolvedTs) // handle create table job = helper.DDL2Job("create table test1.test1(id int primary key)") @@ -294,22 +285,22 @@ func (s *changefeedSuite) TestExecDDL(c *check.C) { job.BinlogInfo.FinishedTS = mockDDLPuller.resolvedTs mockDDLPuller.ddlQueue = append(mockDDLPuller.ddlQueue, job) tickThreeTime() - c.Assert(state.Status.CheckpointTs, check.Equals, mockDDLPuller.resolvedTs) - c.Assert(mockDDLSink.ddlExecuting.Query, check.Equals, "CREATE TABLE `test1`.`test1` (`id` INT PRIMARY KEY)") + + require.Equal(t, state.Status.CheckpointTs, mockDDLPuller.resolvedTs) + require.Equal(t, mockDDLSink.ddlExecuting.Query, "CREATE TABLE `test1`.`test1` (`id` INT PRIMARY KEY)") // executing the ddl finished mockDDLSink.ddlDone = true mockDDLPuller.resolvedTs += 1000 tickThreeTime() - c.Assert(state.TaskStatuses[ctx.GlobalVars().CaptureInfo.ID].Tables, check.HasKey, job.TableID) + require.Contains(t, state.TaskStatuses[ctx.GlobalVars().CaptureInfo.ID].Tables, job.TableID) } -func (s *changefeedSuite) TestSyncPoint(c *check.C) { - defer testleak.AfterTest(c)() +func TestSyncPoint(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) ctx.ChangefeedVars().Info.SyncPointEnabled = true ctx.ChangefeedVars().Info.SyncPointInterval = 1 * time.Second - cf, state, captures, tester := createChangefeed4Test(ctx, c) + cf, state, captures, tester := createChangefeed4Test(ctx, t) defer cf.Close(ctx) // pre check @@ -331,16 +322,15 @@ func (s *changefeedSuite) TestSyncPoint(c *check.C) { } for i := 1; i < len(mockDDLSink.syncPointHis); i++ { // check the time interval between adjacent sync points is less or equal than one second - c.Assert(mockDDLSink.syncPointHis[i]-mockDDLSink.syncPointHis[i-1], check.LessEqual, uint64(1000<<18)) + require.LessOrEqual(t, mockDDLSink.syncPointHis[i]-mockDDLSink.syncPointHis[i-1], uint64(1000<<18)) } - c.Assert(len(mockDDLSink.syncPointHis), check.GreaterEqual, 5) + require.GreaterOrEqual(t, len(mockDDLSink.syncPointHis), 5) } -func (s *changefeedSuite) TestFinished(c *check.C) { - defer testleak.AfterTest(c)() +func TestFinished(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) ctx.ChangefeedVars().Info.TargetTs = ctx.ChangefeedVars().Info.StartTs + 1000 - cf, state, captures, tester := createChangefeed4Test(ctx, c) + cf, state, captures, tester := createChangefeed4Test(ctx, t) defer cf.Close(ctx) // pre check @@ -359,17 +349,17 @@ func (s *changefeedSuite) TestFinished(c *check.C) { tester.MustApplyPatches() } - c.Assert(state.Status.CheckpointTs, check.Equals, state.Info.TargetTs) - c.Assert(state.Info.State, check.Equals, model.StateFinished) + require.Equal(t, state.Status.CheckpointTs, state.Info.TargetTs) + require.Equal(t, state.Info.State, model.StateFinished) } -func (s *changefeedSuite) TestRemoveChangefeed(c *check.C) { - defer testleak.AfterTest(c)() - +func TestRemoveChangefeed(t *testing.T) { baseCtx, cancel := context.WithCancel(context.Background()) ctx := cdcContext.NewContext4Test(baseCtx, true) info := ctx.ChangefeedVars().Info - dir := c.MkDir() + dir, err := ioutil.TempDir("", "remove-changefeed-test") + require.NoError(t, err) + defer os.RemoveAll(dir) info.Config.Consistent = &config.ConsistentConfig{ Level: "eventual", Storage: filepath.Join("nfs://", dir), @@ -378,17 +368,17 @@ func (s *changefeedSuite) TestRemoveChangefeed(c *check.C) { ID: ctx.ChangefeedVars().ID, Info: info, }) - testChangefeedReleaseResource(c, ctx, cancel, dir, true /*expectedInitialized*/) + testChangefeedReleaseResource(t, ctx, cancel, dir, true /*expectedInitialized*/) } -func (s *changefeedSuite) TestRemovePausedChangefeed(c *check.C) { - defer testleak.AfterTest(c)() - +func TestRemovePausedChangefeed(t *testing.T) { baseCtx, cancel := context.WithCancel(context.Background()) ctx := cdcContext.NewContext4Test(baseCtx, true) info := ctx.ChangefeedVars().Info info.State = model.StateStopped - dir := c.MkDir() + dir, err := ioutil.TempDir("", "remove-paused-changefeed-test") + require.NoError(t, err) + defer os.RemoveAll(dir) info.Config.Consistent = &config.ConsistentConfig{ Level: "eventual", Storage: filepath.Join("nfs://", dir), @@ -397,17 +387,17 @@ func (s *changefeedSuite) TestRemovePausedChangefeed(c *check.C) { ID: ctx.ChangefeedVars().ID, Info: info, }) - testChangefeedReleaseResource(c, ctx, cancel, dir, false /*expectedInitialized*/) + testChangefeedReleaseResource(t, ctx, cancel, dir, false /*expectedInitialized*/) } func testChangefeedReleaseResource( - c *check.C, + t *testing.T, ctx cdcContext.Context, cancel context.CancelFunc, redoLogDir string, expectedInitialized bool, ) { - cf, state, captures, tester := createChangefeed4Test(ctx, c) + cf, state, captures, tester := createChangefeed4Test(ctx, t) // pre check cf.Tick(ctx, state, captures) @@ -416,7 +406,7 @@ func testChangefeedReleaseResource( // initialize cf.Tick(ctx, state, captures) tester.MustApplyPatches() - c.Assert(cf.initialized, check.Equals, expectedInitialized) + require.Equal(t, cf.initialized, expectedInitialized) // remove changefeed from state manager by admin job cf.feedStateManager.PushAdminJob(&model.AdminJob{ @@ -425,15 +415,14 @@ func testChangefeedReleaseResource( }) // changefeed tick will release resources err := cf.tick(ctx, state, captures) - c.Assert(err, check.IsNil) + require.Nil(t, err) cancel() // check redo log dir is deleted _, err = os.Stat(redoLogDir) - c.Assert(os.IsNotExist(err), check.IsTrue) + require.True(t, os.IsNotExist(err)) } -func (s *changefeedSuite) TestAddSpecialComment(c *check.C) { - defer testleak.AfterTest(c)() +func TestAddSpecialComment(t *testing.T) { testCase := []struct { input string result string @@ -565,10 +554,10 @@ func (s *changefeedSuite) TestAddSpecialComment(c *check.C) { } for _, ca := range testCase { re, err := addSpecialComment(ca.input) - c.Check(err, check.IsNil) - c.Check(re, check.Equals, ca.result) + require.Nil(t, err) + require.Equal(t, re, ca.result) } - c.Assert(func() { + require.Panics(t, func() { _, _ = addSpecialComment("alter table t force, auto_increment = 12;alter table t force, auto_increment = 12;") - }, check.Panics, "invalid ddlQuery statement size") + }, "invalid ddlQuery statement size") } diff --git a/cdc/owner/ddl_puller_test.go b/cdc/owner/ddl_puller_test.go index 654fa371060..08f5a7bbd26 100644 --- a/cdc/owner/ddl_puller_test.go +++ b/cdc/owner/ddl_puller_test.go @@ -18,10 +18,10 @@ import ( "encoding/json" "sync" "sync/atomic" + "testing" "time" "github.com/benbjohnson/clock" - "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/log" timodel "github.com/pingcap/tidb/parser/model" @@ -29,25 +29,21 @@ import ( "github.com/pingcap/tiflow/cdc/model" cdcContext "github.com/pingcap/tiflow/pkg/context" "github.com/pingcap/tiflow/pkg/retry" - "github.com/pingcap/tiflow/pkg/util/testleak" + "github.com/stretchr/testify/require" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" ) -var _ = check.Suite(&ddlPullerSuite{}) - -type ddlPullerSuite struct{} - type mockPuller struct { - c *check.C + t *testing.T inCh chan *model.RawKVEntry outCh chan *model.RawKVEntry resolvedTs model.Ts } -func newMockPuller(c *check.C, startTs model.Ts) *mockPuller { +func newMockPuller(t *testing.T, startTs model.Ts) *mockPuller { return &mockPuller{ - c: c, + t: t, inCh: make(chan *model.RawKVEntry), outCh: make(chan *model.RawKVEntry), resolvedTs: startTs - 1, @@ -84,7 +80,7 @@ func (m *mockPuller) append(e *model.RawKVEntry) { func (m *mockPuller) appendDDL(job *timodel.Job) { b, err := json.Marshal(job) - m.c.Assert(err, check.IsNil) + require.Nil(m.t, err) ek := []byte("m") ek = codec.EncodeBytes(ek, []byte("DDLJobList")) ek = codec.EncodeUint(ek, uint64('l')) @@ -106,13 +102,12 @@ func (m *mockPuller) appendResolvedTs(ts model.Ts) { }) } -func (s *ddlPullerSuite) TestPuller(c *check.C) { - defer testleak.AfterTest(c)() +func TestPuller(t *testing.T) { startTs := uint64(10) - mockPuller := newMockPuller(c, startTs) + mockPuller := newMockPuller(t, startTs) ctx := cdcContext.NewBackendContext4Test(true) p, err := newDDLPuller(ctx, startTs) - c.Assert(err, check.IsNil) + require.Nil(t, err) p.(*ddlPullerImpl).puller = mockPuller var wg sync.WaitGroup wg.Add(1) @@ -122,22 +117,22 @@ func (s *ddlPullerSuite) TestPuller(c *check.C) { if errors.Cause(err) == context.Canceled { err = nil } - c.Assert(err, check.IsNil) + require.Nil(t, err) }() defer wg.Wait() defer p.Close() // test initialize state resolvedTs, ddl := p.FrontDDL() - c.Assert(resolvedTs, check.Equals, startTs) - c.Assert(ddl, check.IsNil) + require.Equal(t, resolvedTs, startTs) + require.Nil(t, ddl) resolvedTs, ddl = p.PopFrontDDL() - c.Assert(resolvedTs, check.Equals, startTs) - c.Assert(ddl, check.IsNil) + require.Equal(t, resolvedTs, startTs) + require.Nil(t, ddl) // test send resolvedTs mockPuller.appendResolvedTs(15) - waitResolvedTsGrowing(c, p, 15) + waitResolvedTsGrowing(t, p, 15) // test send ddl job out of order mockPuller.appendDDL(&timodel.Job{ @@ -155,23 +150,23 @@ func (s *ddlPullerSuite) TestPuller(c *check.C) { BinlogInfo: &timodel.HistoryInfo{FinishedTS: 16}, }) resolvedTs, ddl = p.FrontDDL() - c.Assert(resolvedTs, check.Equals, uint64(15)) - c.Assert(ddl, check.IsNil) + require.Equal(t, resolvedTs, uint64(15)) + require.Nil(t, ddl) mockPuller.appendResolvedTs(20) - waitResolvedTsGrowing(c, p, 16) + waitResolvedTsGrowing(t, p, 16) resolvedTs, ddl = p.FrontDDL() - c.Assert(resolvedTs, check.Equals, uint64(16)) - c.Assert(ddl.ID, check.Equals, int64(1)) + require.Equal(t, resolvedTs, uint64(16)) + require.Equal(t, ddl.ID, int64(1)) resolvedTs, ddl = p.PopFrontDDL() - c.Assert(resolvedTs, check.Equals, uint64(16)) - c.Assert(ddl.ID, check.Equals, int64(1)) + require.Equal(t, resolvedTs, uint64(16)) + require.Equal(t, ddl.ID, int64(1)) // DDL could be processed with a delay, wait here for a pending DDL job is added - waitResolvedTsGrowing(c, p, 18) + waitResolvedTsGrowing(t, p, 18) resolvedTs, ddl = p.PopFrontDDL() - c.Assert(resolvedTs, check.Equals, uint64(18)) - c.Assert(ddl.ID, check.Equals, int64(2)) + require.Equal(t, resolvedTs, uint64(18)) + require.Equal(t, ddl.ID, int64(2)) // test add ddl job repeated mockPuller.appendDDL(&timodel.Job{ @@ -189,18 +184,18 @@ func (s *ddlPullerSuite) TestPuller(c *check.C) { BinlogInfo: &timodel.HistoryInfo{FinishedTS: 25}, }) mockPuller.appendResolvedTs(30) - waitResolvedTsGrowing(c, p, 25) + waitResolvedTsGrowing(t, p, 25) resolvedTs, ddl = p.PopFrontDDL() - c.Assert(resolvedTs, check.Equals, uint64(25)) - c.Assert(ddl.ID, check.Equals, int64(3)) + require.Equal(t, resolvedTs, uint64(25)) + require.Equal(t, ddl.ID, int64(3)) _, ddl = p.PopFrontDDL() - c.Assert(ddl, check.IsNil) + require.Nil(t, ddl) - waitResolvedTsGrowing(c, p, 30) + waitResolvedTsGrowing(t, p, 30) resolvedTs, ddl = p.PopFrontDDL() - c.Assert(resolvedTs, check.Equals, uint64(30)) - c.Assert(ddl, check.IsNil) + require.Equal(t, resolvedTs, uint64(30)) + require.Nil(t, ddl) // test add invalid ddl job mockPuller.appendDDL(&timodel.Job{ @@ -218,15 +213,14 @@ func (s *ddlPullerSuite) TestPuller(c *check.C) { BinlogInfo: &timodel.HistoryInfo{FinishedTS: 36}, }) mockPuller.appendResolvedTs(40) - waitResolvedTsGrowing(c, p, 40) + waitResolvedTsGrowing(t, p, 40) resolvedTs, ddl = p.PopFrontDDL() // no ddl should be received - c.Assert(resolvedTs, check.Equals, uint64(40)) - c.Assert(ddl, check.IsNil) + require.Equal(t, resolvedTs, uint64(40)) + require.Nil(t, ddl) } -func (*ddlPullerSuite) TestResolvedTsStuck(c *check.C) { - defer testleak.AfterTest(c)() +func TestResolvedTsStuck(t *testing.T) { // For observing the logs zapcore, logs := observer.New(zap.WarnLevel) conf := &log.Config{Level: "warn", File: log.FileLogConfig{}} @@ -236,10 +230,10 @@ func (*ddlPullerSuite) TestResolvedTsStuck(c *check.C) { defer restoreFn() startTs := uint64(10) - mockPuller := newMockPuller(c, startTs) + mockPuller := newMockPuller(t, startTs) ctx := cdcContext.NewBackendContext4Test(true) p, err := newDDLPuller(ctx, startTs) - c.Assert(err, check.IsNil) + require.Nil(t, err) mockClock := clock.NewMock() p.(*ddlPullerImpl).clock = mockClock @@ -253,22 +247,22 @@ func (*ddlPullerSuite) TestResolvedTsStuck(c *check.C) { if errors.Cause(err) == context.Canceled { err = nil } - c.Assert(err, check.IsNil) + require.Nil(t, err) }() defer wg.Wait() defer p.Close() // test initialize state resolvedTs, ddl := p.FrontDDL() - c.Assert(resolvedTs, check.Equals, startTs) - c.Assert(ddl, check.IsNil) + require.Equal(t, resolvedTs, startTs) + require.Nil(t, ddl) resolvedTs, ddl = p.PopFrontDDL() - c.Assert(resolvedTs, check.Equals, startTs) - c.Assert(ddl, check.IsNil) + require.Equal(t, resolvedTs, startTs) + require.Nil(t, ddl) mockPuller.appendResolvedTs(30) - waitResolvedTsGrowing(c, p, 30) - c.Assert(logs.Len(), check.Equals, 0) + waitResolvedTsGrowing(t, p, 30) + require.Equal(t, logs.Len(), 0) mockClock.Add(2 * ownerDDLPullerStuckWarnTimeout) for i := 0; i < 20; i++ { @@ -278,17 +272,17 @@ func (*ddlPullerSuite) TestResolvedTsStuck(c *check.C) { } time.Sleep(10 * time.Millisecond) if i == 19 { - c.Fatal("warning log not printed") + t.Fatal("warning log not printed") } } mockPuller.appendResolvedTs(40) - waitResolvedTsGrowing(c, p, 40) + waitResolvedTsGrowing(t, p, 40) } // waitResolvedTsGrowing can wait the first DDL reaches targetTs or if no pending // DDL, DDL resolved ts reaches targetTs. -func waitResolvedTsGrowing(c *check.C, p DDLPuller, targetTs model.Ts) { +func waitResolvedTsGrowing(t *testing.T, p DDLPuller, targetTs model.Ts) { err := retry.Do(context.Background(), func() error { resolvedTs, _ := p.FrontDDL() if resolvedTs < targetTs { @@ -296,5 +290,5 @@ func waitResolvedTsGrowing(c *check.C, p DDLPuller, targetTs model.Ts) { } return nil }, retry.WithBackoffBaseDelay(20), retry.WithMaxTries(100)) - c.Assert(err, check.IsNil) + require.Nil(t, err) } diff --git a/cdc/owner/ddl_sink_test.go b/cdc/owner/ddl_sink_test.go index 94dff72381b..3a6d2137b66 100644 --- a/cdc/owner/ddl_sink_test.go +++ b/cdc/owner/ddl_sink_test.go @@ -17,22 +17,18 @@ import ( "context" "sync" "sync/atomic" + "testing" "time" - "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/cdc/sink" cdcContext "github.com/pingcap/tiflow/pkg/context" cerror "github.com/pingcap/tiflow/pkg/errors" "github.com/pingcap/tiflow/pkg/retry" - "github.com/pingcap/tiflow/pkg/util/testleak" + "github.com/stretchr/testify/require" ) -var _ = check.Suite(&ddlSinkSuite{}) - -type ddlSinkSuite struct{} - type mockSink struct { sink.Sink checkpointTs model.Ts @@ -78,8 +74,7 @@ func newDDLSink4Test() (DDLSink, *mockSink) { return ddlSink, mockSink } -func (s *ddlSinkSuite) TestCheckpoint(c *check.C) { - defer testleak.AfterTest(c)() +func TestCheckpoint(t *testing.T) { ddlSink, mSink := newDDLSink4Test() ctx := cdcContext.NewBackendContext4Test(true) ctx, cancel := cdcContext.WithCancel(ctx) @@ -98,13 +93,12 @@ func (s *ddlSinkSuite) TestCheckpoint(c *check.C) { }, retry.WithBackoffBaseDelay(100), retry.WithMaxTries(30)) } ddlSink.emitCheckpointTs(ctx, 1) - c.Assert(waitCheckpointGrowingUp(mSink, 1), check.IsNil) + require.Nil(t, waitCheckpointGrowingUp(mSink, 1)) ddlSink.emitCheckpointTs(ctx, 10) - c.Assert(waitCheckpointGrowingUp(mSink, 10), check.IsNil) + require.Nil(t, waitCheckpointGrowingUp(mSink, 10)) } -func (s *ddlSinkSuite) TestExecDDL(c *check.C) { - defer testleak.AfterTest(c)() +func TestExecDDLEvents(t *testing.T) { ddlSink, mSink := newDDLSink4Test() ctx := cdcContext.NewBackendContext4Test(true) ctx, cancel := cdcContext.WithCancel(ctx) @@ -123,17 +117,16 @@ func (s *ddlSinkSuite) TestExecDDL(c *check.C) { for _, event := range ddlEvents { for { done, err := ddlSink.emitDDLEvent(ctx, event) - c.Assert(err, check.IsNil) + require.Nil(t, err) if done { - c.Assert(mSink.GetDDL(), check.DeepEquals, event) + require.Equal(t, mSink.GetDDL(), event) break } } } } -func (s *ddlSinkSuite) TestExecDDLError(c *check.C) { - defer testleak.AfterTest(c)() +func TestExecDDLError(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) var ( @@ -165,24 +158,24 @@ func (s *ddlSinkSuite) TestExecDDLError(c *check.C) { ddl1 := &model.DDLEvent{CommitTs: 1} for { done, err := ddlSink.emitDDLEvent(ctx, ddl1) - c.Assert(err, check.IsNil) + require.Nil(t, err) if done { - c.Assert(mSink.GetDDL(), check.DeepEquals, ddl1) + require.Equal(t, mSink.GetDDL(), ddl1) break } } - c.Assert(resultErr, check.IsNil) + require.Nil(t, resultErr) mSink.ddlError = cerror.ErrExecDDLFailed.GenWithStackByArgs() ddl2 := &model.DDLEvent{CommitTs: 2} for { done, err := ddlSink.emitDDLEvent(ctx, ddl2) - c.Assert(err, check.IsNil) + require.Nil(t, err) if done || readResultErr() != nil { - c.Assert(mSink.GetDDL(), check.DeepEquals, ddl2) + require.Equal(t, mSink.GetDDL(), ddl2) break } } - c.Assert(cerror.ErrExecDDLFailed.Equal(readResultErr()), check.IsTrue) + require.True(t, cerror.ErrExecDDLFailed.Equal(readResultErr())) } diff --git a/cdc/owner/feed_state_manager_test.go b/cdc/owner/feed_state_manager_test.go index cccb24b9833..3eb931722e4 100644 --- a/cdc/owner/feed_state_manager_test.go +++ b/cdc/owner/feed_state_manager_test.go @@ -14,38 +14,33 @@ package owner import ( + "testing" "time" - "github.com/pingcap/check" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" cdcContext "github.com/pingcap/tiflow/pkg/context" "github.com/pingcap/tiflow/pkg/orchestrator" - "github.com/pingcap/tiflow/pkg/util/testleak" + "github.com/stretchr/testify/require" ) -var _ = check.Suite(&feedStateManagerSuite{}) - -type feedStateManagerSuite struct{} - -func (s *feedStateManagerSuite) TestHandleJob(c *check.C) { - defer testleak.AfterTest(c)() +func TestHandleJob(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) manager := newFeedStateManager4Test() state := orchestrator.NewChangefeedReactorState(ctx.ChangefeedVars().ID) - tester := orchestrator.NewReactorStateTester(c, state, nil) + tester := orchestrator.NewReactorStateTester(t, state, nil) state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { - c.Assert(info, check.IsNil) + require.Nil(t, info) return &model.ChangeFeedInfo{SinkURI: "123", Config: &config.ReplicaConfig{}}, true, nil }) state.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { - c.Assert(status, check.IsNil) + require.Nil(t, status) return &model.ChangeFeedStatus{}, true, nil }) tester.MustApplyPatches() manager.Tick(state) tester.MustApplyPatches() - c.Assert(manager.ShouldRunning(), check.IsTrue) + require.True(t, manager.ShouldRunning()) // an admin job which of changefeed is not match manager.PushAdminJob(&model.AdminJob{ @@ -54,7 +49,7 @@ func (s *feedStateManagerSuite) TestHandleJob(c *check.C) { }) manager.Tick(state) tester.MustApplyPatches() - c.Assert(manager.ShouldRunning(), check.IsTrue) + require.True(t, manager.ShouldRunning()) // a running can not be resume manager.PushAdminJob(&model.AdminJob{ @@ -63,7 +58,7 @@ func (s *feedStateManagerSuite) TestHandleJob(c *check.C) { }) manager.Tick(state) tester.MustApplyPatches() - c.Assert(manager.ShouldRunning(), check.IsTrue) + require.True(t, manager.ShouldRunning()) // stop a changefeed manager.PushAdminJob(&model.AdminJob{ @@ -72,11 +67,12 @@ func (s *feedStateManagerSuite) TestHandleJob(c *check.C) { }) manager.Tick(state) tester.MustApplyPatches() - c.Assert(manager.ShouldRunning(), check.IsFalse) - c.Assert(manager.ShouldRemoved(), check.IsFalse) - c.Assert(state.Info.State, check.Equals, model.StateStopped) - c.Assert(state.Info.AdminJobType, check.Equals, model.AdminStop) - c.Assert(state.Status.AdminJobType, check.Equals, model.AdminStop) + + require.False(t, manager.ShouldRunning()) + require.False(t, manager.ShouldRemoved()) + require.Equal(t, state.Info.State, model.StateStopped) + require.Equal(t, state.Info.AdminJobType, model.AdminStop) + require.Equal(t, state.Status.AdminJobType, model.AdminStop) // resume a changefeed manager.PushAdminJob(&model.AdminJob{ @@ -85,11 +81,11 @@ func (s *feedStateManagerSuite) TestHandleJob(c *check.C) { }) manager.Tick(state) tester.MustApplyPatches() - c.Assert(manager.ShouldRunning(), check.IsTrue) - c.Assert(manager.ShouldRemoved(), check.IsFalse) - c.Assert(state.Info.State, check.Equals, model.StateNormal) - c.Assert(state.Info.AdminJobType, check.Equals, model.AdminNone) - c.Assert(state.Status.AdminJobType, check.Equals, model.AdminNone) + require.True(t, manager.ShouldRunning()) + require.False(t, manager.ShouldRemoved()) + require.Equal(t, state.Info.State, model.StateNormal) + require.Equal(t, state.Info.AdminJobType, model.AdminNone) + require.Equal(t, state.Status.AdminJobType, model.AdminNone) // remove a changefeed manager.PushAdminJob(&model.AdminJob{ @@ -98,51 +94,51 @@ func (s *feedStateManagerSuite) TestHandleJob(c *check.C) { }) manager.Tick(state) tester.MustApplyPatches() - c.Assert(manager.ShouldRunning(), check.IsFalse) - c.Assert(manager.ShouldRemoved(), check.IsTrue) - c.Assert(state.Exist(), check.IsFalse) + + require.False(t, manager.ShouldRunning()) + require.True(t, manager.ShouldRemoved()) + require.False(t, state.Exist()) } -func (s *feedStateManagerSuite) TestMarkFinished(c *check.C) { - defer testleak.AfterTest(c)() +func TestMarkFinished(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) manager := newFeedStateManager4Test() state := orchestrator.NewChangefeedReactorState(ctx.ChangefeedVars().ID) - tester := orchestrator.NewReactorStateTester(c, state, nil) + tester := orchestrator.NewReactorStateTester(t, state, nil) state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { - c.Assert(info, check.IsNil) + require.Nil(t, info) return &model.ChangeFeedInfo{SinkURI: "123", Config: &config.ReplicaConfig{}}, true, nil }) state.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { - c.Assert(status, check.IsNil) + require.Nil(t, status) return &model.ChangeFeedStatus{}, true, nil }) tester.MustApplyPatches() manager.Tick(state) tester.MustApplyPatches() - c.Assert(manager.ShouldRunning(), check.IsTrue) + require.True(t, manager.ShouldRunning()) manager.MarkFinished() manager.Tick(state) tester.MustApplyPatches() - c.Assert(manager.ShouldRunning(), check.IsFalse) - c.Assert(state.Info.State, check.Equals, model.StateFinished) - c.Assert(state.Info.AdminJobType, check.Equals, model.AdminFinish) - c.Assert(state.Status.AdminJobType, check.Equals, model.AdminFinish) + + require.False(t, manager.ShouldRunning()) + require.Equal(t, state.Info.State, model.StateFinished) + require.Equal(t, state.Info.AdminJobType, model.AdminFinish) + require.Equal(t, state.Status.AdminJobType, model.AdminFinish) } -func (s *feedStateManagerSuite) TestCleanUpInfos(c *check.C) { - defer testleak.AfterTest(c)() +func TestCleanUpInfos(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) manager := newFeedStateManager4Test() state := orchestrator.NewChangefeedReactorState(ctx.ChangefeedVars().ID) - tester := orchestrator.NewReactorStateTester(c, state, nil) + tester := orchestrator.NewReactorStateTester(t, state, nil) state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { - c.Assert(info, check.IsNil) + require.Nil(t, info) return &model.ChangeFeedInfo{SinkURI: "123", Config: &config.ReplicaConfig{}}, true, nil }) state.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { - c.Assert(status, check.IsNil) + require.Nil(t, status) return &model.ChangeFeedStatus{}, true, nil }) state.PatchTaskStatus(ctx.GlobalVars().CaptureInfo.ID, func(status *model.TaskStatus) (*model.TaskStatus, bool, error) { @@ -155,37 +151,36 @@ func (s *feedStateManagerSuite) TestCleanUpInfos(c *check.C) { return model.TaskWorkload{}, true, nil }) tester.MustApplyPatches() - c.Assert(state.TaskStatuses, check.HasKey, ctx.GlobalVars().CaptureInfo.ID) - c.Assert(state.TaskPositions, check.HasKey, ctx.GlobalVars().CaptureInfo.ID) - c.Assert(state.Workloads, check.HasKey, ctx.GlobalVars().CaptureInfo.ID) + require.Contains(t, state.TaskStatuses, ctx.GlobalVars().CaptureInfo.ID) + require.Contains(t, state.TaskPositions, ctx.GlobalVars().CaptureInfo.ID) + require.Contains(t, state.Workloads, ctx.GlobalVars().CaptureInfo.ID) manager.Tick(state) tester.MustApplyPatches() - c.Assert(manager.ShouldRunning(), check.IsTrue) + require.True(t, manager.ShouldRunning()) manager.MarkFinished() manager.Tick(state) tester.MustApplyPatches() - c.Assert(manager.ShouldRunning(), check.IsFalse) - c.Assert(state.Info.State, check.Equals, model.StateFinished) - c.Assert(state.Info.AdminJobType, check.Equals, model.AdminFinish) - c.Assert(state.Status.AdminJobType, check.Equals, model.AdminFinish) - c.Assert(state.TaskStatuses, check.Not(check.HasKey), ctx.GlobalVars().CaptureInfo.ID) - c.Assert(state.TaskPositions, check.Not(check.HasKey), ctx.GlobalVars().CaptureInfo.ID) - c.Assert(state.Workloads, check.Not(check.HasKey), ctx.GlobalVars().CaptureInfo.ID) + require.False(t, manager.ShouldRunning()) + require.Equal(t, state.Info.State, model.StateFinished) + require.Equal(t, state.Info.AdminJobType, model.AdminFinish) + require.Equal(t, state.Status.AdminJobType, model.AdminFinish) + require.NotContains(t, state.TaskStatuses, ctx.GlobalVars().CaptureInfo.ID) + require.NotContains(t, state.TaskPositions, ctx.GlobalVars().CaptureInfo.ID) + require.NotContains(t, state.Workloads, ctx.GlobalVars().CaptureInfo.ID) } -func (s *feedStateManagerSuite) TestHandleError(c *check.C) { - defer testleak.AfterTest(c)() +func TestHandleError(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) manager := newFeedStateManager4Test() state := orchestrator.NewChangefeedReactorState(ctx.ChangefeedVars().ID) - tester := orchestrator.NewReactorStateTester(c, state, nil) + tester := orchestrator.NewReactorStateTester(t, state, nil) state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { - c.Assert(info, check.IsNil) + require.Nil(t, info) return &model.ChangeFeedInfo{SinkURI: "123", Config: &config.ReplicaConfig{}}, true, nil }) state.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { - c.Assert(status, check.IsNil) + require.Nil(t, status) return &model.ChangeFeedStatus{}, true, nil }) state.PatchTaskStatus(ctx.GlobalVars().CaptureInfo.ID, func(status *model.TaskStatus) (*model.TaskStatus, bool, error) { @@ -206,7 +201,7 @@ func (s *feedStateManagerSuite) TestHandleError(c *check.C) { } for _, d := range intervals { - c.Assert(manager.ShouldRunning(), check.IsTrue) + require.True(t, manager.ShouldRunning()) state.PatchTaskPosition(ctx.GlobalVars().CaptureInfo.ID, func(position *model.TaskPosition) (*model.TaskPosition, bool, error) { return &model.TaskPosition{Error: &model.RunningError{ Addr: ctx.GlobalVars().CaptureInfo.AdvertiseAddr, @@ -217,17 +212,17 @@ func (s *feedStateManagerSuite) TestHandleError(c *check.C) { tester.MustApplyPatches() manager.Tick(state) tester.MustApplyPatches() - c.Assert(manager.ShouldRunning(), check.IsFalse) + require.False(t, manager.ShouldRunning()) time.Sleep(d) manager.Tick(state) tester.MustApplyPatches() } - c.Assert(manager.ShouldRunning(), check.IsFalse) - c.Assert(manager.ShouldRemoved(), check.IsFalse) - c.Assert(state.Info.State, check.Equals, model.StateFailed) - c.Assert(state.Info.AdminJobType, check.Equals, model.AdminStop) - c.Assert(state.Status.AdminJobType, check.Equals, model.AdminStop) + require.False(t, manager.ShouldRunning()) + require.False(t, manager.ShouldRemoved()) + require.Equal(t, state.Info.State, model.StateFailed) + require.Equal(t, state.Info.AdminJobType, model.AdminStop) + require.Equal(t, state.Status.AdminJobType, model.AdminStop) // admin resume must retry changefeed immediately. manager.PushAdminJob(&model.AdminJob{ @@ -237,26 +232,56 @@ func (s *feedStateManagerSuite) TestHandleError(c *check.C) { }) manager.Tick(state) tester.MustApplyPatches() - c.Assert(manager.ShouldRunning(), check.IsTrue) - c.Assert(manager.ShouldRemoved(), check.IsFalse) - c.Assert(state.Info.State, check.Equals, model.StateNormal) - c.Assert(state.Info.AdminJobType, check.Equals, model.AdminNone) - c.Assert(state.Status.AdminJobType, check.Equals, model.AdminNone) + require.True(t, manager.ShouldRunning()) + require.False(t, manager.ShouldRemoved()) + require.Equal(t, state.Info.State, model.StateNormal) + require.Equal(t, state.Info.AdminJobType, model.AdminNone) + require.Equal(t, state.Status.AdminJobType, model.AdminNone) +} + +func TestHandleFastFailError(t *testing.T) { + ctx := cdcContext.NewBackendContext4Test(true) + manager := new(feedStateManager) + state := orchestrator.NewChangefeedReactorState(ctx.ChangefeedVars().ID) + tester := orchestrator.NewReactorStateTester(t, state, nil) + state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { + require.Nil(t, info) + return &model.ChangeFeedInfo{SinkURI: "123", Config: &config.ReplicaConfig{}}, true, nil + }) + state.PatchTaskPosition(ctx.GlobalVars().CaptureInfo.ID, func(position *model.TaskPosition) (*model.TaskPosition, bool, error) { + return &model.TaskPosition{Error: &model.RunningError{ + Addr: ctx.GlobalVars().CaptureInfo.AdvertiseAddr, + Code: "CDC:ErrGCTTLExceeded", + Message: "fake error for test", + }}, true, nil + }) + tester.MustApplyPatches() + manager.Tick(state) + // test handling fast failed error with non-nil ChangeFeedInfo + tester.MustApplyPatches() + // test handling fast failed error with nil ChangeFeedInfo + // set info to nil when this patch is applied + state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { + return nil, true, nil + }) + manager.Tick(state) + // When the patches are applied, the callback function of PatchInfo in feedStateManager.HandleError will be called. + // At that time, the nil pointer will be checked instead of throwing a panic. See issue #3128 for more detail. + tester.MustApplyPatches() } -func (s *feedStateManagerSuite) TestChangefeedStatusNotExist(c *check.C) { - defer testleak.AfterTest(c)() +func TestChangefeedStatusNotExist(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) manager := newFeedStateManager4Test() state := orchestrator.NewChangefeedReactorState(ctx.ChangefeedVars().ID) - tester := orchestrator.NewReactorStateTester(c, state, map[string]string{ + tester := orchestrator.NewReactorStateTester(t, state, map[string]string{ "/tidb/cdc/capture/d563bfc0-f406-4f34-bc7d-6dc2e35a44e5": `{"id":"d563bfc0-f406-4f34-bc7d-6dc2e35a44e5","address":"172.16.6.147:8300","version":"v5.0.0-master-dirty"}`, "/tidb/cdc/changefeed/info/" + ctx.ChangefeedVars().ID: `{"sink-uri":"blackhole:///","opts":{},"create-time":"2021-06-05T00:44:15.065939487+08:00","start-ts":425381670108266496,"target-ts":0,"admin-job-type":1,"sort-engine":"unified","config":{"case-sensitive":true,"enable-old-value":true,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"open-protocol"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1}},"state":"failed","history":[],"error":{"addr":"172.16.6.147:8300","code":"CDC:ErrSnapshotLostByGC","message":"[CDC:ErrSnapshotLostByGC]fail to create or maintain changefeed due to snapshot loss caused by GC. checkpoint-ts 425381670108266496 is earlier than GC safepoint at 0"},"sync-point-enabled":false,"sync-point-interval":600000000000,"creator-version":"v5.0.0-master-dirty"}`, "/tidb/cdc/owner/156579d017f84a68": "d563bfc0-f406-4f34-bc7d-6dc2e35a44e5", }) manager.Tick(state) - c.Assert(manager.ShouldRunning(), check.IsFalse) - c.Assert(manager.ShouldRemoved(), check.IsFalse) + require.False(t, manager.ShouldRunning()) + require.False(t, manager.ShouldRemoved()) tester.MustApplyPatches() manager.PushAdminJob(&model.AdminJob{ @@ -265,9 +290,9 @@ func (s *feedStateManagerSuite) TestChangefeedStatusNotExist(c *check.C) { Opts: &model.AdminJobOption{ForceRemove: true}, }) manager.Tick(state) - c.Assert(manager.ShouldRunning(), check.IsFalse) - c.Assert(manager.ShouldRemoved(), check.IsTrue) + require.False(t, manager.ShouldRunning()) + require.True(t, manager.ShouldRemoved()) tester.MustApplyPatches() - c.Assert(state.Info, check.IsNil) - c.Assert(state.Exist(), check.IsFalse) + require.Nil(t, state.Info) + require.False(t, state.Exist()) } diff --git a/cdc/owner/main_test.go b/cdc/owner/main_test.go new file mode 100644 index 00000000000..2019d2834ec --- /dev/null +++ b/cdc/owner/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package owner + +import ( + "testing" + + "github.com/pingcap/tiflow/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/cdc/owner/owner_test.go b/cdc/owner/owner_test.go index b836d174e32..bcca13f5dad 100644 --- a/cdc/owner/owner_test.go +++ b/cdc/owner/owner_test.go @@ -18,9 +18,9 @@ import ( "context" "fmt" "math" + "testing" "time" - "github.com/pingcap/check" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" cdcContext "github.com/pingcap/tiflow/pkg/context" @@ -28,14 +28,10 @@ import ( "github.com/pingcap/tiflow/pkg/etcd" "github.com/pingcap/tiflow/pkg/orchestrator" "github.com/pingcap/tiflow/pkg/txnutil/gc" - "github.com/pingcap/tiflow/pkg/util/testleak" + "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" ) -var _ = check.Suite(&ownerSuite{}) - -type ownerSuite struct{} - type mockManager struct { gc.Manager } @@ -48,7 +44,7 @@ func (m *mockManager) CheckStaleCheckpointTs( var _ gc.Manager = (*mockManager)(nil) -func createOwner4Test(ctx cdcContext.Context, c *check.C) (*Owner, *orchestrator.GlobalReactorState, *orchestrator.ReactorStateTester) { +func createOwner4Test(ctx cdcContext.Context, t *testing.T) (*Owner, *orchestrator.GlobalReactorState, *orchestrator.ReactorStateTester) { ctx.GlobalVars().PDClient = &gc.MockPDClient{ UpdateServiceGCSafePointFunc: func(ctx context.Context, serviceID string, ttl int64, safePoint uint64) (uint64, error) { return safePoint, nil @@ -62,7 +58,7 @@ func createOwner4Test(ctx cdcContext.Context, c *check.C) (*Owner, *orchestrator ctx.GlobalVars().PDClient, ) state := orchestrator.NewGlobalState() - tester := orchestrator.NewReactorStateTester(c, state, nil) + tester := orchestrator.NewReactorStateTester(t, state, nil) // set captures cdcKey := etcd.CDCKey{ @@ -70,18 +66,17 @@ func createOwner4Test(ctx cdcContext.Context, c *check.C) (*Owner, *orchestrator CaptureID: ctx.GlobalVars().CaptureInfo.ID, } captureBytes, err := ctx.GlobalVars().CaptureInfo.Marshal() - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustUpdate(cdcKey.String(), captureBytes) return owner, state, tester } -func (s *ownerSuite) TestCreateRemoveChangefeed(c *check.C) { - defer testleak.AfterTest(c)() +func TestCreateRemoveChangefeed(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(false) ctx, cancel := cdcContext.WithCancel(ctx) defer cancel() - owner, state, tester := createOwner4Test(ctx, c) + owner, state, tester := createOwner4Test(ctx, t) changefeedID := "test-changefeed" changefeedInfo := &model.ChangeFeedInfo{ @@ -89,7 +84,7 @@ func (s *ownerSuite) TestCreateRemoveChangefeed(c *check.C) { Config: config.GetDefaultReplicaConfig(), } changefeedStr, err := changefeedInfo.Marshal() - c.Assert(err, check.IsNil) + require.Nil(t, err) cdcKey := etcd.CDCKey{ Tp: etcd.CDCKeyTypeChangefeedInfo, ChangefeedID: changefeedID, @@ -97,29 +92,28 @@ func (s *ownerSuite) TestCreateRemoveChangefeed(c *check.C) { tester.MustUpdate(cdcKey.String(), []byte(changefeedStr)) _, err = owner.Tick(ctx, state) tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(owner.changefeeds, check.HasKey, changefeedID) + require.Nil(t, err) + require.Contains(t, owner.changefeeds, changefeedID) // delete changefeed info key to remove changefeed tester.MustUpdate(cdcKey.String(), nil) // this tick to clean the leak info fo the removed changefeed _, err = owner.Tick(ctx, state) - c.Assert(err, check.IsNil) + require.Nil(t, err) // this tick to remove the changefeed state in memory tester.MustApplyPatches() _, err = owner.Tick(ctx, state) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(owner.changefeeds, check.Not(check.HasKey), changefeedID) - c.Assert(state.Changefeeds, check.Not(check.HasKey), changefeedID) + + require.NotContains(t, owner.changefeeds, changefeedID) + require.NotContains(t, state.Changefeeds, changefeedID) tester.MustUpdate(cdcKey.String(), []byte(changefeedStr)) _, err = owner.Tick(ctx, state) tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(owner.changefeeds, check.HasKey, changefeedID) - + require.Nil(t, err) + require.Contains(t, owner.changefeeds, changefeedID) removeJob := model.AdminJob{ CfID: changefeedID, Type: model.AdminRemove, @@ -131,24 +125,23 @@ func (s *ownerSuite) TestCreateRemoveChangefeed(c *check.C) { mockedManager := &mockManager{Manager: owner.gcManager} owner.gcManager = mockedManager err = owner.gcManager.CheckStaleCheckpointTs(ctx, changefeedID, 0) - c.Assert(err, check.NotNil) + require.NotNil(t, err) // this tick create remove changefeed patches owner.EnqueueJob(removeJob) _, err = owner.Tick(ctx, state) - c.Assert(err, check.IsNil) + require.Nil(t, err) // apply patches and update owner's in memory changefeed states tester.MustApplyPatches() _, err = owner.Tick(ctx, state) - c.Assert(err, check.IsNil) - c.Assert(owner.changefeeds, check.Not(check.HasKey), changefeedID) + require.Nil(t, err) + require.NotContains(t, owner.changefeeds, changefeedID) } -func (s *ownerSuite) TestStopChangefeed(c *check.C) { - defer testleak.AfterTest(c)() +func TestStopChangefeed(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(false) - owner, state, tester := createOwner4Test(ctx, c) + owner, state, tester := createOwner4Test(ctx, t) ctx, cancel := cdcContext.WithCancel(ctx) defer cancel() @@ -158,7 +151,7 @@ func (s *ownerSuite) TestStopChangefeed(c *check.C) { Config: config.GetDefaultReplicaConfig(), } changefeedStr, err := changefeedInfo.Marshal() - c.Assert(err, check.IsNil) + require.Nil(t, err) cdcKey := etcd.CDCKey{ Tp: etcd.CDCKeyTypeChangefeedInfo, ChangefeedID: changefeedID, @@ -166,9 +159,8 @@ func (s *ownerSuite) TestStopChangefeed(c *check.C) { tester.MustUpdate(cdcKey.String(), []byte(changefeedStr)) _, err = owner.Tick(ctx, state) tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(owner.changefeeds, check.HasKey, changefeedID) - + require.Nil(t, err) + require.Contains(t, owner.changefeeds, changefeedID) // remove changefeed forcibly owner.EnqueueJob(model.AdminJob{ CfID: changefeedID, @@ -180,23 +172,20 @@ func (s *ownerSuite) TestStopChangefeed(c *check.C) { // this tick to clean the leak info fo the removed changefeed _, err = owner.Tick(ctx, state) - c.Assert(err, check.IsNil) - c.Assert(err, check.IsNil) + require.Nil(t, err) // this tick to remove the changefeed state in memory tester.MustApplyPatches() _, err = owner.Tick(ctx, state) - c.Assert(err, check.IsNil) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(owner.changefeeds, check.Not(check.HasKey), changefeedID) - c.Assert(state.Changefeeds, check.Not(check.HasKey), changefeedID) + require.Nil(t, err) + require.NotContains(t, owner.changefeeds, changefeedID) + require.NotContains(t, state.Changefeeds, changefeedID) } -func (s *ownerSuite) TestFixChangefeedState(c *check.C) { - defer testleak.AfterTest(c)() +func TestFixChangefeedState(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(false) - owner, state, tester := createOwner4Test(ctx, c) + owner, state, tester := createOwner4Test(ctx, t) // We need to do bootstrap. owner.bootstrapped = false changefeedID := "test-changefeed" @@ -208,7 +197,7 @@ func (s *ownerSuite) TestFixChangefeedState(c *check.C) { Config: config.GetDefaultReplicaConfig(), } changefeedStr, err := changefeedInfo.Marshal() - c.Assert(err, check.IsNil) + require.Nil(t, err) cdcKey := etcd.CDCKey{ Tp: etcd.CDCKeyTypeChangefeedInfo, ChangefeedID: changefeedID, @@ -217,23 +206,21 @@ func (s *ownerSuite) TestFixChangefeedState(c *check.C) { // For the first tick, we do a bootstrap, and it tries to fix the meta information. _, err = owner.Tick(ctx, state) tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(owner.bootstrapped, check.IsTrue) - c.Assert(owner.changefeeds, check.Not(check.HasKey), changefeedID) - + require.Nil(t, err) + require.True(t, owner.bootstrapped) + require.NotContains(t, owner.changefeeds, changefeedID) // Start tick normally. _, err = owner.Tick(ctx, state) tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(owner.changefeeds, check.HasKey, changefeedID) + require.Nil(t, err) + require.Contains(t, owner.changefeeds, changefeedID) // The meta information is fixed correctly. - c.Assert(owner.changefeeds[changefeedID].state.Info.State, check.Equals, model.StateStopped) + require.Equal(t, owner.changefeeds[changefeedID].state.Info.State, model.StateStopped) } -func (s *ownerSuite) TestFixChangefeedSinkProtocol(c *check.C) { - defer testleak.AfterTest(c)() +func TestFixChangefeedSinkProtocol(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(false) - owner, state, tester := createOwner4Test(ctx, c) + owner, state, tester := createOwner4Test(ctx, t) // We need to do bootstrap. owner.bootstrapped = false changefeedID := "test-changefeed" @@ -249,7 +236,7 @@ func (s *ownerSuite) TestFixChangefeedSinkProtocol(c *check.C) { }, } changefeedStr, err := changefeedInfo.Marshal() - c.Assert(err, check.IsNil) + require.Nil(t, err) cdcKey := etcd.CDCKey{ Tp: etcd.CDCKeyTypeChangefeedInfo, ChangefeedID: changefeedID, @@ -258,25 +245,23 @@ func (s *ownerSuite) TestFixChangefeedSinkProtocol(c *check.C) { // For the first tick, we do a bootstrap, and it tries to fix the meta information. _, err = owner.Tick(ctx, state) tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(owner.bootstrapped, check.IsTrue) - c.Assert(owner.changefeeds, check.Not(check.HasKey), changefeedID) + require.Nil(t, err) + require.True(t, owner.bootstrapped) + require.NotContains(t, owner.changefeeds, changefeedID) // Start tick normally. _, err = owner.Tick(ctx, state) tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(owner.changefeeds, check.HasKey, changefeedID) + require.Nil(t, err) + require.Contains(t, owner.changefeeds, changefeedID) // The meta information is fixed correctly. - c.Assert(owner.changefeeds[changefeedID].state.Info.SinkURI, - check.Equals, + require.Equal(t, owner.changefeeds[changefeedID].state.Info.SinkURI, "kafka://127.0.0.1:9092/ticdc-test2?protocol=open-protocol") } -func (s *ownerSuite) TestCheckClusterVersion(c *check.C) { - defer testleak.AfterTest(c)() +func TestCheckClusterVersion(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(false) - owner, state, tester := createOwner4Test(ctx, c) + owner, state, tester := createOwner4Test(ctx, t) ctx, cancel := cdcContext.WithCancel(ctx) defer cancel() @@ -288,7 +273,7 @@ func (s *ownerSuite) TestCheckClusterVersion(c *check.C) { Config: config.GetDefaultReplicaConfig(), } changefeedStr, err := changefeedInfo.Marshal() - c.Assert(err, check.IsNil) + require.Nil(t, err) cdcKey := etcd.CDCKey{ Tp: etcd.CDCKeyTypeChangefeedInfo, ChangefeedID: changefeedID, @@ -298,8 +283,8 @@ func (s *ownerSuite) TestCheckClusterVersion(c *check.C) { // check the tick is skipped and the changefeed will not be handled _, err = owner.Tick(ctx, state) tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(owner.changefeeds, check.Not(check.HasKey), changefeedID) + require.Nil(t, err) + require.NotContains(t, owner.changefeeds, changefeedID) tester.MustUpdate("/tidb/cdc/capture/6bbc01c8-0605-4f86-a0f9-b3119109b225", []byte(`{"id":"6bbc01c8-0605-4f86-a0f9-b3119109b225","address":"127.0.0.1:8300","version":"`+ctx.GlobalVars().CaptureInfo.Version+`"}`)) @@ -307,17 +292,16 @@ func (s *ownerSuite) TestCheckClusterVersion(c *check.C) { // check the tick is not skipped and the changefeed will be handled normally _, err = owner.Tick(ctx, state) tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(owner.changefeeds, check.HasKey, changefeedID) + require.Nil(t, err) + require.Contains(t, owner.changefeeds, changefeedID) } -func (s *ownerSuite) TestAdminJob(c *check.C) { - defer testleak.AfterTest(c)() +func TestAdminJob(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(false) ctx, cancel := cdcContext.WithCancel(ctx) defer cancel() - owner, _, _ := createOwner4Test(ctx, c) + owner, _, _ := createOwner4Test(ctx, t) owner.EnqueueJob(model.AdminJob{ CfID: "test-changefeed1", Type: model.AdminResume, @@ -330,11 +314,11 @@ func (s *ownerSuite) TestAdminJob(c *check.C) { // remove job.done, it's hard to check deep equals jobs := owner.takeOwnerJobs() for _, job := range jobs { - c.Assert(job.done, check.NotNil) + require.NotNil(t, job.done) close(job.done) job.done = nil } - c.Assert(jobs, check.DeepEquals, []*ownerJob{ + require.Equal(t, jobs, []*ownerJob{ { tp: ownerJobTypeAdminJob, adminJob: &model.AdminJob{ @@ -355,11 +339,10 @@ func (s *ownerSuite) TestAdminJob(c *check.C) { debugInfoWriter: &buf, }, }) - c.Assert(owner.takeOwnerJobs(), check.HasLen, 0) + require.Len(t, owner.takeOwnerJobs(), 0) } -func (s *ownerSuite) TestUpdateGCSafePoint(c *check.C) { - defer testleak.AfterTest(c)() +func TestUpdateGCSafePoint(t *testing.T) { mockPDClient := &gc.MockPDClient{} o := NewOwner(mockPDClient) o.gcManager = gc.NewManager(mockPDClient) @@ -367,23 +350,23 @@ func (s *ownerSuite) TestUpdateGCSafePoint(c *check.C) { ctx, cancel := cdcContext.WithCancel(ctx) defer cancel() state := orchestrator.NewGlobalState() - tester := orchestrator.NewReactorStateTester(c, state, nil) + tester := orchestrator.NewReactorStateTester(t, state, nil) // no changefeed, the gc safe point should be max uint64 mockPDClient.UpdateServiceGCSafePointFunc = func(ctx context.Context, serviceID string, ttl int64, safePoint uint64) (uint64, error) { // Owner will do a snapshot read at (checkpointTs - 1) from TiKV, // set GC safepoint to (checkpointTs - 1) - c.Assert(safePoint, check.Equals, uint64(math.MaxUint64-1)) + require.Equal(t, safePoint, uint64(math.MaxUint64-1)) return 0, nil } err := o.updateGCSafepoint(ctx, state) - c.Assert(err, check.IsNil) + require.Nil(t, err) // add a failed changefeed, it must not trigger update GC safepoint. mockPDClient.UpdateServiceGCSafePointFunc = func(ctx context.Context, serviceID string, ttl int64, safePoint uint64) (uint64, error) { - c.Fatal("must not update") + t.Fatal("must not update") return 0, nil } changefeedID1 := "changefeed-test1" @@ -397,7 +380,7 @@ func (s *ownerSuite) TestUpdateGCSafePoint(c *check.C) { }) tester.MustApplyPatches() err = o.updateGCSafepoint(ctx, state) - c.Assert(err, check.IsNil) + require.Nil(t, err) // switch the state of changefeed to normal, it must update GC safepoint to // 1 (checkpoint Ts of changefeed-test1). @@ -406,8 +389,8 @@ func (s *ownerSuite) TestUpdateGCSafePoint(c *check.C) { func(ctx context.Context, serviceID string, ttl int64, safePoint uint64) (uint64, error) { // Owner will do a snapshot read at (checkpointTs - 1) from TiKV, // set GC safepoint to (checkpointTs - 1) - c.Assert(safePoint, check.Equals, uint64(1)) - c.Assert(serviceID, check.Equals, gc.CDCServiceSafePointID) + require.Equal(t, safePoint, uint64(1)) + require.Equal(t, serviceID, gc.CDCServiceSafePointID) ch <- struct{}{} return 0, nil } @@ -418,10 +401,10 @@ func (s *ownerSuite) TestUpdateGCSafePoint(c *check.C) { }) tester.MustApplyPatches() err = o.updateGCSafepoint(ctx, state) - c.Assert(err, check.IsNil) + require.Nil(t, err) select { case <-time.After(5 * time.Second): - c.Fatal("timeout") + t.Fatal("timeout") case <-ch: } @@ -444,28 +427,27 @@ func (s *ownerSuite) TestUpdateGCSafePoint(c *check.C) { func(ctx context.Context, serviceID string, ttl int64, safePoint uint64) (uint64, error) { // Owner will do a snapshot read at (checkpointTs - 1) from TiKV, // set GC safepoint to (checkpointTs - 1) - c.Assert(safePoint, check.Equals, uint64(19)) - c.Assert(serviceID, check.Equals, gc.CDCServiceSafePointID) + require.Equal(t, safePoint, uint64(19)) + require.Equal(t, serviceID, gc.CDCServiceSafePointID) ch <- struct{}{} return 0, nil } err = o.updateGCSafepoint(ctx, state) - c.Assert(err, check.IsNil) + require.Nil(t, err) select { case <-time.After(5 * time.Second): - c.Fatal("timeout") + t.Fatal("timeout") case <-ch: } } // make sure handleJobs works well even if there is two different // version of captures in the cluster -func (s *ownerSuite) TestHandleJobsDontBlock(c *check.C) { - defer testleak.AfterTest(c)() +func TestHandleJobsDontBlock(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(false) ctx, cancel := cdcContext.WithCancel(ctx) defer cancel() - owner, state, tester := createOwner4Test(ctx, c) + owner, state, tester := createOwner4Test(ctx, t) statusProvider := owner.StatusProvider() // work well @@ -476,7 +458,7 @@ func (s *ownerSuite) TestHandleJobsDontBlock(c *check.C) { State: model.StateNormal, } changefeedStr, err := cfInfo1.Marshal() - c.Assert(err, check.IsNil) + require.Nil(t, err) cdcKey := etcd.CDCKey{ Tp: etcd.CDCKeyTypeChangefeedInfo, ChangefeedID: cf1, @@ -484,9 +466,9 @@ func (s *ownerSuite) TestHandleJobsDontBlock(c *check.C) { tester.MustUpdate(cdcKey.String(), []byte(changefeedStr)) _, err = owner.Tick(ctx, state) tester.MustApplyPatches() - c.Assert(err, check.IsNil) + require.Nil(t, err) - c.Assert(owner.changefeeds, check.HasKey, cf1) + require.Contains(t, owner.changefeeds, cf1) // add an non-consistent version capture captureInfo := &model.CaptureInfo{ @@ -499,7 +481,7 @@ func (s *ownerSuite) TestHandleJobsDontBlock(c *check.C) { CaptureID: captureInfo.ID, } v, err := captureInfo.Marshal() - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustUpdate(cdcKey.String(), v) // try to add another changefeed @@ -510,7 +492,7 @@ func (s *ownerSuite) TestHandleJobsDontBlock(c *check.C) { State: model.StateNormal, } changefeedStr1, err := cfInfo2.Marshal() - c.Assert(err, check.IsNil) + require.Nil(t, err) cdcKey = etcd.CDCKey{ Tp: etcd.CDCKeyTypeChangefeedInfo, ChangefeedID: cf2, @@ -518,10 +500,10 @@ func (s *ownerSuite) TestHandleJobsDontBlock(c *check.C) { tester.MustUpdate(cdcKey.String(), []byte(changefeedStr1)) _, err = owner.Tick(ctx, state) tester.MustApplyPatches() - c.Assert(err, check.IsNil) + require.Nil(t, err) // make sure this changefeed add failed, which means that owner are return // in clusterVersionConsistent check - c.Assert(owner.changefeeds[cf2], check.IsNil) + require.Nil(t, owner.changefeeds[cf2]) // make sure statusProvider works well ctx1, cancel := context.WithTimeout(context.Background(), time.Second*5) @@ -543,13 +525,13 @@ WorkLoop: case <-done: break WorkLoop case <-ctx1.Done(): - c.Fatal(ctx1.Err()) + t.Fatal(ctx1.Err()) case <-ticker.C: _, err = owner.Tick(ctx, state) - c.Assert(err, check.IsNil) + require.Nil(t, err) } } - c.Assert(errIn, check.IsNil) - c.Assert(infos[cf1], check.NotNil) - c.Assert(infos[cf2], check.IsNil) + require.Nil(t, errIn) + require.NotNil(t, infos[cf1]) + require.Nil(t, infos[cf2]) } diff --git a/cdc/owner/scheduler_v1_test.go b/cdc/owner/scheduler_v1_test.go index eb2950b7190..289c951b3db 100644 --- a/cdc/owner/scheduler_v1_test.go +++ b/cdc/owner/scheduler_v1_test.go @@ -16,17 +16,15 @@ package owner import ( "fmt" "math/rand" + "testing" - "github.com/pingcap/check" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/etcd" "github.com/pingcap/tiflow/pkg/orchestrator" - "github.com/pingcap/tiflow/pkg/util/testleak" + "github.com/stretchr/testify/require" ) -var _ = check.Suite(&schedulerSuite{}) - -type schedulerSuite struct { +type schedulerTester struct { changefeedID model.ChangeFeedID state *orchestrator.ChangefeedReactorState tester *orchestrator.ReactorStateTester @@ -34,10 +32,10 @@ type schedulerSuite struct { scheduler *oldScheduler } -func (s *schedulerSuite) reset(c *check.C) { +func (s *schedulerTester) reset(t *testing.T) { s.changefeedID = fmt.Sprintf("test-changefeed-%x", rand.Uint32()) s.state = orchestrator.NewChangefeedReactorState("test-changefeed") - s.tester = orchestrator.NewReactorStateTester(c, s.state, nil) + s.tester = orchestrator.NewReactorStateTester(t, s.state, nil) s.scheduler = newSchedulerV1().(*schedulerV1CompatWrapper).inner s.captures = make(map[model.CaptureID]*model.CaptureInfo) s.state.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { @@ -46,7 +44,7 @@ func (s *schedulerSuite) reset(c *check.C) { s.tester.MustApplyPatches() } -func (s *schedulerSuite) addCapture(captureID model.CaptureID) { +func (s *schedulerTester) addCapture(captureID model.CaptureID) { captureInfo := &model.CaptureInfo{ ID: captureID, } @@ -57,7 +55,7 @@ func (s *schedulerSuite) addCapture(captureID model.CaptureID) { s.tester.MustApplyPatches() } -func (s *schedulerSuite) finishTableOperation(captureID model.CaptureID, tableIDs ...model.TableID) { +func (s *schedulerTester) finishTableOperation(captureID model.CaptureID, tableIDs ...model.TableID) { s.state.PatchTaskStatus(captureID, func(status *model.TaskStatus) (*model.TaskStatus, bool, error) { for _, tableID := range tableIDs { status.Operation[tableID].Status = model.OperFinished @@ -82,10 +80,9 @@ func (s *schedulerSuite) finishTableOperation(captureID model.CaptureID, tableID s.tester.MustApplyPatches() } -func (s *schedulerSuite) TestScheduleOneCapture(c *check.C) { - defer testleak.AfterTest(c)() - - s.reset(c) +func TestScheduleOneCapture(t *testing.T) { + s := &schedulerTester{} + s.reset(t) captureID := "test-capture-0" s.addCapture(captureID) @@ -100,27 +97,27 @@ func (s *schedulerSuite) TestScheduleOneCapture(c *check.C) { s.tester.MustUpdate(key.String(), nil) s.tester.MustApplyPatches() - s.reset(c) + s.reset(t) captureID = "test-capture-1" s.addCapture(captureID) // add three tables shouldUpdateState, err := s.scheduler.Tick(s.state, []model.TableID{1, 2, 3, 4}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsFalse) + require.Nil(t, err) + require.False(t, shouldUpdateState) s.tester.MustApplyPatches() - c.Assert(s.state.TaskStatuses[captureID].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{ + require.Equal(t, s.state.TaskStatuses[captureID].Tables, map[model.TableID]*model.TableReplicaInfo{ 1: {StartTs: 0}, 2: {StartTs: 0}, 3: {StartTs: 0}, 4: {StartTs: 0}, }) - c.Assert(s.state.TaskStatuses[captureID].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{ + require.Equal(t, s.state.TaskStatuses[captureID].Operation, map[model.TableID]*model.TableOperation{ 1: {Delete: false, BoundaryTs: 0, Status: model.OperDispatched}, 2: {Delete: false, BoundaryTs: 0, Status: model.OperDispatched}, 3: {Delete: false, BoundaryTs: 0, Status: model.OperDispatched}, 4: {Delete: false, BoundaryTs: 0, Status: model.OperDispatched}, }) shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{1, 2, 3, 4}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsTrue) + require.Nil(t, err) + require.True(t, shouldUpdateState) s.tester.MustApplyPatches() // two tables finish adding operation @@ -128,13 +125,13 @@ func (s *schedulerSuite) TestScheduleOneCapture(c *check.C) { // remove table 1,2 and add table 4,5 shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{3, 4, 5}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsFalse) + require.Nil(t, err) + require.False(t, shouldUpdateState) s.tester.MustApplyPatches() - c.Assert(s.state.TaskStatuses[captureID].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{ + require.Equal(t, s.state.TaskStatuses[captureID].Tables, map[model.TableID]*model.TableReplicaInfo{ 3: {StartTs: 0}, 4: {StartTs: 0}, 5: {StartTs: 0}, }) - c.Assert(s.state.TaskStatuses[captureID].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{ + require.Equal(t, s.state.TaskStatuses[captureID].Operation, map[model.TableID]*model.TableOperation{ 1: {Delete: true, BoundaryTs: 0, Status: model.OperDispatched}, 2: {Delete: true, BoundaryTs: 0, Status: model.OperDispatched}, 4: {Delete: false, BoundaryTs: 0, Status: model.OperDispatched}, @@ -147,13 +144,13 @@ func (s *schedulerSuite) TestScheduleOneCapture(c *check.C) { s.scheduler.MoveTable(3, "fake-capture") s.scheduler.MoveTable(4, "fake-capture") shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{3, 4, 5}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsFalse) + require.Nil(t, err) + require.False(t, shouldUpdateState) s.tester.MustApplyPatches() - c.Assert(s.state.TaskStatuses[captureID].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{ + require.Equal(t, s.state.TaskStatuses[captureID].Tables, map[model.TableID]*model.TableReplicaInfo{ 4: {StartTs: 0}, 5: {StartTs: 0}, }) - c.Assert(s.state.TaskStatuses[captureID].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{ + require.Equal(t, s.state.TaskStatuses[captureID].Operation, map[model.TableID]*model.TableOperation{ 1: {Delete: true, BoundaryTs: 0, Status: model.OperDispatched}, 2: {Delete: true, BoundaryTs: 0, Status: model.OperDispatched}, 3: {Delete: true, BoundaryTs: 0, Status: model.OperDispatched}, @@ -165,77 +162,77 @@ func (s *schedulerSuite) TestScheduleOneCapture(c *check.C) { s.finishTableOperation(captureID, 1, 2, 3, 4, 5) shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{3, 4, 5}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsTrue) + require.Nil(t, err) + require.True(t, shouldUpdateState) s.tester.MustApplyPatches() - c.Assert(s.state.TaskStatuses[captureID].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{ + require.Equal(t, s.state.TaskStatuses[captureID].Tables, map[model.TableID]*model.TableReplicaInfo{ 4: {StartTs: 0}, 5: {StartTs: 0}, }) - c.Assert(s.state.TaskStatuses[captureID].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{}) + require.Equal(t, s.state.TaskStatuses[captureID].Operation, map[model.TableID]*model.TableOperation{}) // table 3 is missing by expected, because the table was trying to move to a invalid capture // and the move will failed, the table 3 will be add in next tick shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{3, 4, 5}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsFalse) + require.Nil(t, err) + require.False(t, shouldUpdateState) s.tester.MustApplyPatches() - c.Assert(s.state.TaskStatuses[captureID].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{ + require.Equal(t, s.state.TaskStatuses[captureID].Tables, map[model.TableID]*model.TableReplicaInfo{ 4: {StartTs: 0}, 5: {StartTs: 0}, }) - c.Assert(s.state.TaskStatuses[captureID].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{}) + require.Equal(t, s.state.TaskStatuses[captureID].Operation, map[model.TableID]*model.TableOperation{}) shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{3, 4, 5}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsFalse) + require.Nil(t, err) + require.False(t, shouldUpdateState) s.tester.MustApplyPatches() - c.Assert(s.state.TaskStatuses[captureID].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{ + require.Equal(t, s.state.TaskStatuses[captureID].Tables, map[model.TableID]*model.TableReplicaInfo{ 3: {StartTs: 0}, 4: {StartTs: 0}, 5: {StartTs: 0}, }) - c.Assert(s.state.TaskStatuses[captureID].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{ + require.Equal(t, s.state.TaskStatuses[captureID].Operation, map[model.TableID]*model.TableOperation{ 3: {Delete: false, BoundaryTs: 0, Status: model.OperDispatched}, }) } -func (s *schedulerSuite) TestScheduleMoveTable(c *check.C) { - defer testleak.AfterTest(c)() - s.reset(c) +func TestScheduleMoveTable(t *testing.T) { + s := &schedulerTester{} + s.reset(t) captureID1 := "test-capture-1" captureID2 := "test-capture-2" s.addCapture(captureID1) // add a table shouldUpdateState, err := s.scheduler.Tick(s.state, []model.TableID{1}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsFalse) + require.Nil(t, err) + require.False(t, shouldUpdateState) s.tester.MustApplyPatches() - c.Assert(s.state.TaskStatuses[captureID1].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{ + require.Equal(t, s.state.TaskStatuses[captureID1].Tables, map[model.TableID]*model.TableReplicaInfo{ 1: {StartTs: 0}, }) - c.Assert(s.state.TaskStatuses[captureID1].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{ + require.Equal(t, s.state.TaskStatuses[captureID1].Operation, map[model.TableID]*model.TableOperation{ 1: {Delete: false, BoundaryTs: 0, Status: model.OperDispatched}, }) s.finishTableOperation(captureID1, 1) shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{1}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsTrue) + require.Nil(t, err) + require.True(t, shouldUpdateState) s.tester.MustApplyPatches() s.addCapture(captureID2) // add a table shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{1, 2}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsFalse) + require.Nil(t, err) + require.False(t, shouldUpdateState) s.tester.MustApplyPatches() - c.Assert(s.state.TaskStatuses[captureID1].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{ + require.Equal(t, s.state.TaskStatuses[captureID1].Tables, map[model.TableID]*model.TableReplicaInfo{ 1: {StartTs: 0}, }) - c.Assert(s.state.TaskStatuses[captureID1].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{}) - c.Assert(s.state.TaskStatuses[captureID2].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{ + require.Equal(t, s.state.TaskStatuses[captureID1].Operation, map[model.TableID]*model.TableOperation{}) + require.Equal(t, s.state.TaskStatuses[captureID2].Tables, map[model.TableID]*model.TableReplicaInfo{ 2: {StartTs: 0}, }) - c.Assert(s.state.TaskStatuses[captureID2].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{ + require.Equal(t, s.state.TaskStatuses[captureID2].Operation, map[model.TableID]*model.TableOperation{ 2: {Delete: false, BoundaryTs: 0, Status: model.OperDispatched}, }) @@ -243,48 +240,48 @@ func (s *schedulerSuite) TestScheduleMoveTable(c *check.C) { s.scheduler.MoveTable(2, captureID1) shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{1, 2}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsFalse) + require.Nil(t, err) + require.False(t, shouldUpdateState) s.tester.MustApplyPatches() - c.Assert(s.state.TaskStatuses[captureID1].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{ + require.Equal(t, s.state.TaskStatuses[captureID1].Tables, map[model.TableID]*model.TableReplicaInfo{ 1: {StartTs: 0}, }) - c.Assert(s.state.TaskStatuses[captureID1].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{}) - c.Assert(s.state.TaskStatuses[captureID2].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{}) - c.Assert(s.state.TaskStatuses[captureID2].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{ + require.Equal(t, s.state.TaskStatuses[captureID1].Operation, map[model.TableID]*model.TableOperation{}) + require.Equal(t, s.state.TaskStatuses[captureID2].Tables, map[model.TableID]*model.TableReplicaInfo{}) + require.Equal(t, s.state.TaskStatuses[captureID2].Operation, map[model.TableID]*model.TableOperation{ 2: {Delete: true, BoundaryTs: 0, Status: model.OperDispatched}, }) s.finishTableOperation(captureID2, 2) shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{1, 2}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsTrue) + require.Nil(t, err) + require.True(t, shouldUpdateState) s.tester.MustApplyPatches() - c.Assert(s.state.TaskStatuses[captureID1].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{ + require.Equal(t, s.state.TaskStatuses[captureID1].Tables, map[model.TableID]*model.TableReplicaInfo{ 1: {StartTs: 0}, }) - c.Assert(s.state.TaskStatuses[captureID1].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{}) - c.Assert(s.state.TaskStatuses[captureID2].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{}) - c.Assert(s.state.TaskStatuses[captureID2].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{}) + require.Equal(t, s.state.TaskStatuses[captureID1].Operation, map[model.TableID]*model.TableOperation{}) + require.Equal(t, s.state.TaskStatuses[captureID2].Tables, map[model.TableID]*model.TableReplicaInfo{}) + require.Equal(t, s.state.TaskStatuses[captureID2].Operation, map[model.TableID]*model.TableOperation{}) shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{1, 2}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsFalse) + require.Nil(t, err) + require.False(t, shouldUpdateState) s.tester.MustApplyPatches() - c.Assert(s.state.TaskStatuses[captureID1].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{ + require.Equal(t, s.state.TaskStatuses[captureID1].Tables, map[model.TableID]*model.TableReplicaInfo{ 1: {StartTs: 0}, 2: {StartTs: 0}, }) - c.Assert(s.state.TaskStatuses[captureID1].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{ + require.Equal(t, s.state.TaskStatuses[captureID1].Operation, map[model.TableID]*model.TableOperation{ 2: {Delete: false, BoundaryTs: 0, Status: model.OperDispatched}, }) - c.Assert(s.state.TaskStatuses[captureID2].Tables, check.DeepEquals, map[model.TableID]*model.TableReplicaInfo{}) - c.Assert(s.state.TaskStatuses[captureID2].Operation, check.DeepEquals, map[model.TableID]*model.TableOperation{}) + require.Equal(t, s.state.TaskStatuses[captureID2].Tables, map[model.TableID]*model.TableReplicaInfo{}) + require.Equal(t, s.state.TaskStatuses[captureID2].Operation, map[model.TableID]*model.TableOperation{}) } -func (s *schedulerSuite) TestScheduleRebalance(c *check.C) { - defer testleak.AfterTest(c)() - s.reset(c) +func TestScheduleRebalance(t *testing.T) { + s := &schedulerTester{} + s.reset(t) captureID1 := "test-capture-1" captureID2 := "test-capture-2" captureID3 := "test-capture-3" @@ -306,13 +303,13 @@ func (s *schedulerSuite) TestScheduleRebalance(c *check.C) { // rebalance table shouldUpdateState, err := s.scheduler.Tick(s.state, []model.TableID{1, 2, 3, 4, 5, 6}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsFalse) + require.Nil(t, err) + require.False(t, shouldUpdateState) s.tester.MustApplyPatches() // 4 tables remove in capture 1, this 4 tables will be added to another capture in next tick - c.Assert(s.state.TaskStatuses[captureID1].Tables, check.HasLen, 2) - c.Assert(s.state.TaskStatuses[captureID2].Tables, check.HasLen, 0) - c.Assert(s.state.TaskStatuses[captureID3].Tables, check.HasLen, 0) + require.Len(t, s.state.TaskStatuses[captureID1].Tables, 2) + require.Len(t, s.state.TaskStatuses[captureID2].Tables, 0) + require.Len(t, s.state.TaskStatuses[captureID3].Tables, 0) s.state.PatchTaskStatus(captureID1, func(status *model.TaskStatus) (*model.TaskStatus, bool, error) { for _, opt := range status.Operation { @@ -321,7 +318,7 @@ func (s *schedulerSuite) TestScheduleRebalance(c *check.C) { return status, true, nil }) s.state.PatchTaskWorkload(captureID1, func(workload model.TaskWorkload) (model.TaskWorkload, bool, error) { - c.Assert(workload, check.IsNil) + require.Nil(t, workload) workload = make(model.TaskWorkload) for tableID := range s.state.TaskStatuses[captureID1].Tables { workload[tableID] = model.WorkloadInfo{Workload: 1} @@ -332,26 +329,26 @@ func (s *schedulerSuite) TestScheduleRebalance(c *check.C) { // clean finished operation shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{1, 2, 3, 4, 5, 6}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsTrue) + require.Nil(t, err) + require.True(t, shouldUpdateState) s.tester.MustApplyPatches() // 4 tables add to another capture in this tick - c.Assert(s.state.TaskStatuses[captureID1].Operation, check.HasLen, 0) + require.Len(t, s.state.TaskStatuses[captureID1].Operation, 0) // rebalance table shouldUpdateState, err = s.scheduler.Tick(s.state, []model.TableID{1, 2, 3, 4, 5, 6}, s.captures) - c.Assert(err, check.IsNil) - c.Assert(shouldUpdateState, check.IsFalse) + require.Nil(t, err) + require.False(t, shouldUpdateState) s.tester.MustApplyPatches() // 4 tables add to another capture in this tick - c.Assert(s.state.TaskStatuses[captureID1].Tables, check.HasLen, 2) - c.Assert(s.state.TaskStatuses[captureID2].Tables, check.HasLen, 2) - c.Assert(s.state.TaskStatuses[captureID3].Tables, check.HasLen, 2) + require.Len(t, s.state.TaskStatuses[captureID1].Tables, 2) + require.Len(t, s.state.TaskStatuses[captureID2].Tables, 2) + require.Len(t, s.state.TaskStatuses[captureID3].Tables, 2) tableIDs := make(map[model.TableID]struct{}) for _, status := range s.state.TaskStatuses { for tableID := range status.Tables { tableIDs[tableID] = struct{}{} } } - c.Assert(tableIDs, check.DeepEquals, map[model.TableID]struct{}{1: {}, 2: {}, 3: {}, 4: {}, 5: {}, 6: {}}) + require.Equal(t, tableIDs, map[model.TableID]struct{}{1: {}, 2: {}, 3: {}, 4: {}, 5: {}, 6: {}}) } diff --git a/cdc/owner/schema_test.go b/cdc/owner/schema_test.go index 9ed7e8ec14f..9c8abb71316 100644 --- a/cdc/owner/schema_test.go +++ b/cdc/owner/schema_test.go @@ -15,38 +15,33 @@ package owner import ( "sort" + "testing" - "github.com/pingcap/check" timodel "github.com/pingcap/tidb/parser/model" "github.com/pingcap/tidb/parser/mysql" "github.com/pingcap/tiflow/cdc/entry" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" - "github.com/pingcap/tiflow/pkg/util/testleak" + "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" ) -var _ = check.Suite(&schemaSuite{}) - -type schemaSuite struct{} - -func (s *schemaSuite) TestAllPhysicalTables(c *check.C) { - defer testleak.AfterTest(c)() - helper := entry.NewSchemaTestHelper(c) +func TestAllPhysicalTables(t *testing.T) { + helper := entry.NewSchemaTestHelper(t) defer helper.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) - c.Assert(err, check.IsNil) + require.Nil(t, err) schema, err := newSchemaWrap4Owner(helper.Storage(), ver.Ver, config.GetDefaultReplicaConfig()) - c.Assert(err, check.IsNil) - c.Assert(schema.AllPhysicalTables(), check.HasLen, 0) + require.Nil(t, err) + require.Len(t, schema.AllPhysicalTables(), 0) // add normal table job := helper.DDL2Job("create table test.t1(id int primary key)") tableIDT1 := job.BinlogInfo.TableInfo.ID - c.Assert(schema.HandleDDL(job), check.IsNil) - c.Assert(schema.AllPhysicalTables(), check.DeepEquals, []model.TableID{tableIDT1}) + require.Nil(t, schema.HandleDDL(job)) + require.Equal(t, schema.AllPhysicalTables(), []model.TableID{tableIDT1}) // add ineligible table - c.Assert(schema.HandleDDL(helper.DDL2Job("create table test.t2(id int)")), check.IsNil) - c.Assert(schema.AllPhysicalTables(), check.DeepEquals, []model.TableID{tableIDT1}) + require.Nil(t, schema.HandleDDL(helper.DDL2Job("create table test.t2(id int)"))) + require.Equal(t, schema.AllPhysicalTables(), []model.TableID{tableIDT1}) // add partition table job = helper.DDL2Job(`CREATE TABLE test.employees ( id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, @@ -62,7 +57,7 @@ func (s *schemaSuite) TestAllPhysicalTables(c *check.C) { PARTITION p2 VALUES LESS THAN (15), PARTITION p3 VALUES LESS THAN (20) )`) - c.Assert(schema.HandleDDL(job), check.IsNil) + require.Nil(t, schema.HandleDDL(job)) expectedTableIDs := []model.TableID{tableIDT1} for _, p := range job.BinlogInfo.TableInfo.GetPartitionInfo().Definitions { expectedTableIDs = append(expectedTableIDs, p.ID) @@ -74,42 +69,41 @@ func (s *schemaSuite) TestAllPhysicalTables(c *check.C) { } sortTableIDs(expectedTableIDs) sortTableIDs(schema.AllPhysicalTables()) - c.Assert(schema.AllPhysicalTables(), check.DeepEquals, expectedTableIDs) + require.Equal(t, schema.AllPhysicalTables(), expectedTableIDs) } -func (s *schemaSuite) TestIsIneligibleTableID(c *check.C) { - defer testleak.AfterTest(c)() - helper := entry.NewSchemaTestHelper(c) +func TestIsIneligibleTableID(t *testing.T) { + helper := entry.NewSchemaTestHelper(t) defer helper.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) - c.Assert(err, check.IsNil) + require.Nil(t, err) schema, err := newSchemaWrap4Owner(helper.Storage(), ver.Ver, config.GetDefaultReplicaConfig()) - c.Assert(err, check.IsNil) + require.Nil(t, err) // add normal table job := helper.DDL2Job("create table test.t1(id int primary key)") tableIDT1 := job.BinlogInfo.TableInfo.ID - c.Assert(schema.HandleDDL(job), check.IsNil) + require.Nil(t, schema.HandleDDL(job)) // add ineligible table job = helper.DDL2Job("create table test.t2(id int)") tableIDT2 := job.BinlogInfo.TableInfo.ID - c.Assert(schema.HandleDDL(job), check.IsNil) - c.Assert(schema.IsIneligibleTableID(tableIDT1), check.IsFalse) - c.Assert(schema.IsIneligibleTableID(tableIDT2), check.IsTrue) + + require.Nil(t, schema.HandleDDL(job)) + require.False(t, schema.IsIneligibleTableID(tableIDT1)) + require.True(t, schema.IsIneligibleTableID(tableIDT2)) } -func (s *schemaSuite) TestBuildDDLEvent(c *check.C) { - defer testleak.AfterTest(c)() - helper := entry.NewSchemaTestHelper(c) +func TestBuildDDLEvent(t *testing.T) { + helper := entry.NewSchemaTestHelper(t) defer helper.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) - c.Assert(err, check.IsNil) + require.Nil(t, err) schema, err := newSchemaWrap4Owner(helper.Storage(), ver.Ver, config.GetDefaultReplicaConfig()) - c.Assert(err, check.IsNil) + require.Nil(t, err) // add normal table job := helper.DDL2Job("create table test.t1(id int primary key)") event, err := schema.BuildDDLEvent(job) - c.Assert(err, check.IsNil) - c.Assert(event, check.DeepEquals, &model.DDLEvent{ + require.Nil(t, err) + require.Equal(t, event, &model.DDLEvent{ StartTs: job.StartTS, CommitTs: job.BinlogInfo.FinishedTS, Query: "create table test.t1(id int primary key)", @@ -122,11 +116,11 @@ func (s *schemaSuite) TestBuildDDLEvent(c *check.C) { }, PreTableInfo: nil, }) - c.Assert(schema.HandleDDL(job), check.IsNil) + require.Nil(t, schema.HandleDDL(job)) job = helper.DDL2Job("ALTER TABLE test.t1 ADD COLUMN c1 CHAR(16) NOT NULL") event, err = schema.BuildDDLEvent(job) - c.Assert(err, check.IsNil) - c.Assert(event, check.DeepEquals, &model.DDLEvent{ + require.Nil(t, err) + require.Equal(t, event, &model.DDLEvent{ StartTs: job.StartTS, CommitTs: job.BinlogInfo.FinishedTS, Query: "ALTER TABLE test.t1 ADD COLUMN c1 CHAR(16) NOT NULL", @@ -146,22 +140,21 @@ func (s *schemaSuite) TestBuildDDLEvent(c *check.C) { }) } -func (s *schemaSuite) TestSinkTableInfos(c *check.C) { - defer testleak.AfterTest(c)() - helper := entry.NewSchemaTestHelper(c) +func TestSinkTableInfos(t *testing.T) { + helper := entry.NewSchemaTestHelper(t) defer helper.Close() ver, err := helper.Storage().CurrentVersion(oracle.GlobalTxnScope) - c.Assert(err, check.IsNil) + require.Nil(t, err) schema, err := newSchemaWrap4Owner(helper.Storage(), ver.Ver, config.GetDefaultReplicaConfig()) - c.Assert(err, check.IsNil) + require.Nil(t, err) // add normal table job := helper.DDL2Job("create table test.t1(id int primary key)") tableIDT1 := job.BinlogInfo.TableInfo.ID - c.Assert(schema.HandleDDL(job), check.IsNil) + require.Nil(t, schema.HandleDDL(job)) // add ineligible table job = helper.DDL2Job("create table test.t2(id int)") - c.Assert(schema.HandleDDL(job), check.IsNil) - c.Assert(schema.SinkTableInfos(), check.DeepEquals, []*model.SimpleTableInfo{ + require.Nil(t, schema.HandleDDL(job)) + require.Equal(t, schema.SinkTableInfos(), []*model.SimpleTableInfo{ { Schema: "test", Table: "t1", diff --git a/cdc/processor/main_test.go b/cdc/processor/main_test.go new file mode 100644 index 00000000000..5d7ddb7ad5e --- /dev/null +++ b/cdc/processor/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package processor + +import ( + "testing" + + "github.com/pingcap/tiflow/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/cdc/processor/manager_test.go b/cdc/processor/manager_test.go index 075fbca5fc3..8415af305aa 100644 --- a/cdc/processor/manager_test.go +++ b/cdc/processor/manager_test.go @@ -17,9 +17,9 @@ import ( "bytes" "fmt" "math" + "testing" "time" - "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/tiflow/cdc/model" tablepipeline "github.com/pingcap/tiflow/cdc/processor/pipeline" @@ -27,31 +27,29 @@ import ( cdcContext "github.com/pingcap/tiflow/pkg/context" cerrors "github.com/pingcap/tiflow/pkg/errors" "github.com/pingcap/tiflow/pkg/orchestrator" - "github.com/pingcap/tiflow/pkg/util/testleak" + "github.com/stretchr/testify/require" ) -type managerSuite struct { +type managerTester struct { manager *Manager state *orchestrator.GlobalReactorState tester *orchestrator.ReactorStateTester } -var _ = check.Suite(&managerSuite{}) - // NewManager4Test creates a new processor manager for test func NewManager4Test( - c *check.C, + t *testing.T, createTablePipeline func(ctx cdcContext.Context, tableID model.TableID, replicaInfo *model.TableReplicaInfo) (tablepipeline.TablePipeline, error), ) *Manager { m := NewManager() m.newProcessor = func(ctx cdcContext.Context) *processor { - return newProcessor4Test(ctx, c, createTablePipeline) + return newProcessor4Test(ctx, t, createTablePipeline) } return m } -func (s *managerSuite) resetSuit(ctx cdcContext.Context, c *check.C) { - s.manager = NewManager4Test(c, func(ctx cdcContext.Context, tableID model.TableID, replicaInfo *model.TableReplicaInfo) (tablepipeline.TablePipeline, error) { +func (s *managerTester) resetSuit(ctx cdcContext.Context, t *testing.T) { + s.manager = NewManager4Test(t, func(ctx cdcContext.Context, tableID model.TableID, replicaInfo *model.TableReplicaInfo) (tablepipeline.TablePipeline, error) { return &mockTablePipeline{ tableID: tableID, name: fmt.Sprintf("`test`.`table%d`", tableID), @@ -62,28 +60,28 @@ func (s *managerSuite) resetSuit(ctx cdcContext.Context, c *check.C) { }) s.state = orchestrator.NewGlobalState() captureInfoBytes, err := ctx.GlobalVars().CaptureInfo.Marshal() - c.Assert(err, check.IsNil) - s.tester = orchestrator.NewReactorStateTester(c, s.state, map[string]string{ + require.Nil(t, err) + s.tester = orchestrator.NewReactorStateTester(t, s.state, map[string]string{ fmt.Sprintf("/tidb/cdc/capture/%s", ctx.GlobalVars().CaptureInfo.ID): string(captureInfoBytes), }) } -func (s *managerSuite) TestChangefeed(c *check.C) { - defer testleak.AfterTest(c)() +func TestChangefeed(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(false) - s.resetSuit(ctx, c) + s := &managerTester{} + s.resetSuit(ctx, t) var err error // no changefeed _, err = s.manager.Tick(ctx, s.state) - c.Assert(err, check.IsNil) + require.Nil(t, err) // an inactive changefeed s.state.Changefeeds["test-changefeed"] = orchestrator.NewChangefeedReactorState("test-changefeed") _, err = s.manager.Tick(ctx, s.state) s.tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(s.manager.processors, check.HasLen, 0) + require.Nil(t, err) + require.Len(t, s.manager.processors, 0) // an active changefeed s.state.Changefeeds["test-changefeed"].PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { @@ -106,8 +104,8 @@ func (s *managerSuite) TestChangefeed(c *check.C) { s.tester.MustApplyPatches() _, err = s.manager.Tick(ctx, s.state) s.tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(s.manager.processors, check.HasLen, 1) + require.Nil(t, err) + require.Len(t, s.manager.processors, 1) // processor return errors s.state.Changefeeds["test-changefeed"].PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { @@ -121,19 +119,19 @@ func (s *managerSuite) TestChangefeed(c *check.C) { s.tester.MustApplyPatches() _, err = s.manager.Tick(ctx, s.state) s.tester.MustApplyPatches() - c.Assert(err, check.IsNil) - c.Assert(s.manager.processors, check.HasLen, 0) + require.Nil(t, err) + require.Len(t, s.manager.processors, 0) } -func (s *managerSuite) TestDebugInfo(c *check.C) { - defer testleak.AfterTest(c)() +func TestDebugInfo(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(false) - s.resetSuit(ctx, c) + s := &managerTester{} + s.resetSuit(ctx, t) var err error // no changefeed _, err = s.manager.Tick(ctx, s.state) - c.Assert(err, check.IsNil) + require.Nil(t, err) // an active changefeed s.state.Changefeeds["test-changefeed"] = orchestrator.NewChangefeedReactorState("test-changefeed") @@ -156,38 +154,38 @@ func (s *managerSuite) TestDebugInfo(c *check.C) { }) s.tester.MustApplyPatches() _, err = s.manager.Tick(ctx, s.state) - c.Assert(err, check.IsNil) + require.Nil(t, err) s.tester.MustApplyPatches() - c.Assert(s.manager.processors, check.HasLen, 1) + require.Len(t, s.manager.processors, 1) done := make(chan struct{}) go func() { defer close(done) for { _, err = s.manager.Tick(ctx, s.state) if err != nil { - c.Assert(cerrors.ErrReactorFinished.Equal(errors.Cause(err)), check.IsTrue) + require.True(t, cerrors.ErrReactorFinished.Equal(errors.Cause(err))) return } - c.Assert(err, check.IsNil) + require.Nil(t, err) s.tester.MustApplyPatches() } }() buf := bytes.NewBufferString("") s.manager.WriteDebugInfo(buf) - c.Assert(len(buf.String()), check.Greater, 0) + require.Greater(t, len(buf.String()), 0) s.manager.AsyncClose() <-done } -func (s *managerSuite) TestClose(c *check.C) { - defer testleak.AfterTest(c)() +func TestClose(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(false) - s.resetSuit(ctx, c) + s := &managerTester{} + s.resetSuit(ctx, t) var err error // no changefeed _, err = s.manager.Tick(ctx, s.state) - c.Assert(err, check.IsNil) + require.Nil(t, err) // an active changefeed s.state.Changefeeds["test-changefeed"] = orchestrator.NewChangefeedReactorState("test-changefeed") @@ -210,13 +208,13 @@ func (s *managerSuite) TestClose(c *check.C) { }) s.tester.MustApplyPatches() _, err = s.manager.Tick(ctx, s.state) - c.Assert(err, check.IsNil) + require.Nil(t, err) s.tester.MustApplyPatches() - c.Assert(s.manager.processors, check.HasLen, 1) + require.Len(t, s.manager.processors, 1) s.manager.AsyncClose() _, err = s.manager.Tick(ctx, s.state) - c.Assert(cerrors.ErrReactorFinished.Equal(errors.Cause(err)), check.IsTrue) + require.True(t, cerrors.ErrReactorFinished.Equal(errors.Cause(err))) s.tester.MustApplyPatches() - c.Assert(s.manager.processors, check.HasLen, 0) + require.Len(t, s.manager.processors, 0) } diff --git a/cdc/processor/processor_test.go b/cdc/processor/processor_test.go index b171473d167..611befc491c 100644 --- a/cdc/processor/processor_test.go +++ b/cdc/processor/processor_test.go @@ -21,7 +21,6 @@ import ( "sync/atomic" "testing" - "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/log" "github.com/pingcap/tiflow/cdc/entry" @@ -34,21 +33,15 @@ import ( cerror "github.com/pingcap/tiflow/pkg/errors" "github.com/pingcap/tiflow/pkg/etcd" "github.com/pingcap/tiflow/pkg/orchestrator" - "github.com/pingcap/tiflow/pkg/util/testleak" + "github.com/stretchr/testify/require" ) -func Test(t *testing.T) { check.TestingT(t) } - -type processorSuite struct{} - -var _ = check.Suite(&processorSuite{}) - // processor needs to implement TableExecutor. var _ scheduler.TableExecutor = (*processor)(nil) func newProcessor4Test( ctx cdcContext.Context, - c *check.C, + t *testing.T, createTablePipeline func(ctx cdcContext.Context, tableID model.TableID, replicaInfo *model.TableReplicaInfo) (tablepipeline.TablePipeline, error), ) *processor { p := newProcessor(ctx) @@ -59,12 +52,12 @@ func newProcessor4Test( p.sinkManager = &sink.Manager{} p.redoManager = redo.NewDisabledManager() p.createTablePipeline = createTablePipeline - p.schemaStorage = &mockSchemaStorage{c: c, resolvedTs: math.MaxUint64} + p.schemaStorage = &mockSchemaStorage{t: t, resolvedTs: math.MaxUint64} return p } -func initProcessor4Test(ctx cdcContext.Context, c *check.C) (*processor, *orchestrator.ReactorStateTester) { - p := newProcessor4Test(ctx, c, func(ctx cdcContext.Context, tableID model.TableID, replicaInfo *model.TableReplicaInfo) (tablepipeline.TablePipeline, error) { +func initProcessor4Test(ctx cdcContext.Context, t *testing.T) (*processor, *orchestrator.ReactorStateTester) { + p := newProcessor4Test(ctx, t, func(ctx cdcContext.Context, tableID model.TableID, replicaInfo *model.TableReplicaInfo) (tablepipeline.TablePipeline, error) { return &mockTablePipeline{ tableID: tableID, name: fmt.Sprintf("`test`.`table%d`", tableID), @@ -74,7 +67,7 @@ func initProcessor4Test(ctx cdcContext.Context, c *check.C) (*processor, *orches }, nil }) p.changefeed = orchestrator.NewChangefeedReactorState(ctx.ChangefeedVars().ID) - return p, orchestrator.NewReactorStateTester(c, p.changefeed, map[string]string{ + return p, orchestrator.NewReactorStateTester(t, p.changefeed, map[string]string{ "/tidb/cdc/capture/" + ctx.GlobalVars().CaptureInfo.ID: `{"id":"` + ctx.GlobalVars().CaptureInfo.ID + `","address":"127.0.0.1:8300"}`, "/tidb/cdc/changefeed/info/" + ctx.ChangefeedVars().ID: `{"sink-uri":"blackhole://","opts":{},"create-time":"2020-02-02T00:00:00.000000+00:00","start-ts":0,"target-ts":0,"admin-job-type":0,"sort-engine":"memory","sort-dir":".","config":{"case-sensitive":true,"enable-old-value":false,"force-replicate":false,"check-gc-safe-point":true,"filter":{"rules":["*.*"],"ignore-txn-start-ts":null,"ddl-allow-list":null},"mounter":{"worker-num":16},"sink":{"dispatchers":null,"protocol":"open-protocol"},"cyclic-replication":{"enable":false,"replica-id":0,"filter-replica-ids":null,"id-buckets":0,"sync-ddl":false},"scheduler":{"type":"table-number","polling-time":-1}},"state":"normal","history":null,"error":null,"sync-point-enabled":false,"sync-point-interval":600000000000}`, "/tidb/cdc/job/" + ctx.ChangefeedVars().ID: `{"resolved-ts":0,"checkpoint-ts":0,"admin-job-type":0}`, @@ -142,7 +135,7 @@ type mockSchemaStorage struct { // as we only need ResolvedTs() and DoGC() in unit tests. entry.SchemaStorage - c *check.C + t *testing.T lastGcTs uint64 resolvedTs uint64 } @@ -152,7 +145,7 @@ func (s *mockSchemaStorage) ResolvedTs() uint64 { } func (s *mockSchemaStorage) DoGC(ts uint64) uint64 { - s.c.Assert(s.lastGcTs, check.LessEqual, ts) + require.LessOrEqual(s.t, s.lastGcTs, ts) atomic.StoreUint64(&s.lastGcTs, ts) return ts } @@ -183,15 +176,14 @@ func (a *mockAgent) Close() error { return nil } -func (s *processorSuite) TestCheckTablesNum(c *check.C) { - defer testleak.AfterTest(c)() +func TestCheckTablesNum(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - p, tester := initProcessor4Test(ctx, c) + p, tester := initProcessor4Test(ctx, t) var err error _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID], check.DeepEquals, + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID], &model.TaskPosition{ CheckPointTs: 0, ResolvedTs: 0, @@ -199,13 +191,13 @@ func (s *processorSuite) TestCheckTablesNum(c *check.C) { Error: nil, }) - p, tester = initProcessor4Test(ctx, c) + p, tester = initProcessor4Test(ctx, t) p.changefeed.Info.StartTs = 66 p.changefeed.Status.CheckpointTs = 88 _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID], check.DeepEquals, + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID], &model.TaskPosition{ CheckPointTs: 88, ResolvedTs: 88, @@ -214,14 +206,13 @@ func (s *processorSuite) TestCheckTablesNum(c *check.C) { }) } -func (s *processorSuite) TestHandleTableOperation4SingleTable(c *check.C) { - defer testleak.AfterTest(c)() +func TestHandleTableOperation4SingleTable(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - p, tester := initProcessor4Test(ctx, c) + p, tester := initProcessor4Test(ctx, t) var err error // init tick _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() p.changefeed.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { status.CheckpointTs = 90 @@ -236,7 +227,7 @@ func (s *processorSuite) TestHandleTableOperation4SingleTable(c *check.C) { // no operation _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // add table, in processing @@ -247,9 +238,9 @@ func (s *processorSuite) TestHandleTableOperation4SingleTable(c *check.C) { }) tester.MustApplyPatches() _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{ 66: {StartTs: 60}, }, @@ -260,9 +251,9 @@ func (s *processorSuite) TestHandleTableOperation4SingleTable(c *check.C) { // add table, not finished _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{ 66: {StartTs: 60}, }, @@ -275,9 +266,9 @@ func (s *processorSuite) TestHandleTableOperation4SingleTable(c *check.C) { table66 := p.tables[66].(*mockTablePipeline) table66.resolvedTs = 101 _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{ 66: {StartTs: 60}, }, @@ -285,13 +276,13 @@ func (s *processorSuite) TestHandleTableOperation4SingleTable(c *check.C) { 66: {Delete: false, BoundaryTs: 60, Status: model.OperProcessed}, }, }) - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID].ResolvedTs, check.Equals, uint64(101)) + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID].ResolvedTs, uint64(101)) // finish the operation _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{ 66: {StartTs: 60}, }, @@ -310,21 +301,21 @@ func (s *processorSuite) TestHandleTableOperation4SingleTable(c *check.C) { }) tester.MustApplyPatches() _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{}, Operation: map[int64]*model.TableOperation{ 66: {Delete: true, BoundaryTs: 120, Status: model.OperProcessed}, }, }) - c.Assert(table66.stopTs, check.Equals, uint64(120)) + require.Equal(t, table66.stopTs, uint64(120)) // remove table, not finished _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{}, Operation: map[int64]*model.TableOperation{ 66: {Delete: true, BoundaryTs: 120, Status: model.OperProcessed}, @@ -335,26 +326,25 @@ func (s *processorSuite) TestHandleTableOperation4SingleTable(c *check.C) { table66.status = tablepipeline.TableStatusStopped table66.checkpointTs = 121 _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{}, Operation: map[int64]*model.TableOperation{ 66: {Delete: true, BoundaryTs: 121, Status: model.OperFinished}, }, }) - c.Assert(table66.canceled, check.IsTrue) - c.Assert(p.tables[66], check.IsNil) + require.True(t, table66.canceled) + require.Nil(t, p.tables[66]) } -func (s *processorSuite) TestHandleTableOperation4MultiTable(c *check.C) { - defer testleak.AfterTest(c)() +func TestHandleTableOperation4MultiTable(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - p, tester := initProcessor4Test(ctx, c) + p, tester := initProcessor4Test(ctx, t) var err error // init tick _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() p.changefeed.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { status.CheckpointTs = 20 @@ -370,7 +360,7 @@ func (s *processorSuite) TestHandleTableOperation4MultiTable(c *check.C) { // no operation _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // add table, in processing @@ -384,9 +374,9 @@ func (s *processorSuite) TestHandleTableOperation4MultiTable(c *check.C) { }) tester.MustApplyPatches() _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{ 1: {StartTs: 60}, 2: {StartTs: 50}, @@ -399,9 +389,9 @@ func (s *processorSuite) TestHandleTableOperation4MultiTable(c *check.C) { 3: {Delete: false, BoundaryTs: 40, Status: model.OperProcessed}, }, }) - c.Assert(p.tables, check.HasLen, 4) - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID].CheckPointTs, check.Equals, uint64(30)) - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID].ResolvedTs, check.Equals, uint64(30)) + require.Len(t, p.tables, 4) + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID].CheckPointTs, uint64(30)) + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID].ResolvedTs, uint64(30)) // add table, push the resolvedTs, finished add table table1 := p.tables[1].(*mockTablePipeline) @@ -419,9 +409,9 @@ func (s *processorSuite) TestHandleTableOperation4MultiTable(c *check.C) { }) tester.MustApplyPatches() _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{ 1: {StartTs: 60}, 2: {StartTs: 50}, @@ -433,18 +423,18 @@ func (s *processorSuite) TestHandleTableOperation4MultiTable(c *check.C) { 3: {Delete: true, BoundaryTs: 60, Status: model.OperProcessed}, }, }) - c.Assert(p.tables, check.HasLen, 4) - c.Assert(table3.canceled, check.IsFalse) - c.Assert(table3.stopTs, check.Equals, uint64(60)) - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID].ResolvedTs, check.Equals, uint64(101)) + require.Len(t, p.tables, 4) + require.False(t, table3.canceled) + require.Equal(t, table3.stopTs, uint64(60)) + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID].ResolvedTs, uint64(101)) // finish remove operations table3.status = tablepipeline.TableStatusStopped table3.checkpointTs = 65 _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{ 1: {StartTs: 60}, 2: {StartTs: 50}, @@ -456,8 +446,8 @@ func (s *processorSuite) TestHandleTableOperation4MultiTable(c *check.C) { 3: {Delete: true, BoundaryTs: 65, Status: model.OperFinished}, }, }) - c.Assert(p.tables, check.HasLen, 3) - c.Assert(table3.canceled, check.IsTrue) + require.Len(t, p.tables, 3) + require.True(t, table3.canceled) // clear finished operations cleanUpFinishedOpOperation(p.changefeed, p.captureInfo.ID, tester) @@ -471,25 +461,25 @@ func (s *processorSuite) TestHandleTableOperation4MultiTable(c *check.C) { }) tester.MustApplyPatches() _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{}, Operation: map[int64]*model.TableOperation{ 1: {Delete: true, BoundaryTs: 120, Status: model.OperProcessed}, 4: {Delete: true, BoundaryTs: 120, Status: model.OperProcessed}, }, }) - c.Assert(table1.stopTs, check.Equals, uint64(120)) - c.Assert(table4.stopTs, check.Equals, uint64(120)) - c.Assert(table2.canceled, check.IsTrue) - c.Assert(p.tables, check.HasLen, 2) + require.Equal(t, table1.stopTs, uint64(120)) + require.Equal(t, table4.stopTs, uint64(120)) + require.True(t, table2.canceled) + require.Len(t, p.tables, 2) // remove table, not finished _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{}, Operation: map[int64]*model.TableOperation{ 1: {Delete: true, BoundaryTs: 120, Status: model.OperProcessed}, @@ -503,24 +493,23 @@ func (s *processorSuite) TestHandleTableOperation4MultiTable(c *check.C) { table4.status = tablepipeline.TableStatusStopped table4.checkpointTs = 122 _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{}, Operation: map[int64]*model.TableOperation{ 1: {Delete: true, BoundaryTs: 121, Status: model.OperFinished}, 4: {Delete: true, BoundaryTs: 122, Status: model.OperFinished}, }, }) - c.Assert(table1.canceled, check.IsTrue) - c.Assert(table4.canceled, check.IsTrue) - c.Assert(p.tables, check.HasLen, 0) + require.True(t, table1.canceled) + require.True(t, table4.canceled) + require.Len(t, p.tables, 0) } -func (s *processorSuite) TestTableExecutor(c *check.C) { - defer testleak.AfterTest(c)() +func TestTableExecutor(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - p, tester := initProcessor4Test(ctx, c) + p, tester := initProcessor4Test(ctx, t) p.newSchedulerEnabled = true p.lazyInit = func(ctx cdcContext.Context) error { p.agent = &mockAgent{executor: p} @@ -530,7 +519,7 @@ func (s *processorSuite) TestTableExecutor(c *check.C) { var err error // init tick _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() p.changefeed.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { status.CheckpointTs = 20 @@ -546,40 +535,38 @@ func (s *processorSuite) TestTableExecutor(c *check.C) { // no operation _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() ok, err := p.AddTable(ctx, 1) - c.Check(err, check.IsNil) - c.Check(ok, check.IsTrue) + require.Nil(t, err) + require.True(t, ok) ok, err = p.AddTable(ctx, 2) - c.Check(err, check.IsNil) - c.Check(ok, check.IsTrue) + require.Nil(t, err) + require.True(t, ok) ok, err = p.AddTable(ctx, 3) - c.Check(err, check.IsNil) - c.Check(ok, check.IsTrue) + require.Nil(t, err) + require.True(t, ok) ok, err = p.AddTable(ctx, 4) - c.Check(err, check.IsNil) - c.Check(ok, check.IsTrue) - - c.Assert(p.tables, check.HasLen, 4) + require.Nil(t, err) + require.True(t, ok) + require.Len(t, p.tables, 4) checkpointTs := p.agent.GetLastSentCheckpointTs() - c.Assert(checkpointTs, check.Equals, uint64(0)) + require.Equal(t, checkpointTs, uint64(0)) done := p.IsAddTableFinished(ctx, 1) - c.Check(done, check.IsFalse) + require.False(t, done) done = p.IsAddTableFinished(ctx, 2) - c.Check(done, check.IsFalse) + require.False(t, done) done = p.IsAddTableFinished(ctx, 3) - c.Check(done, check.IsFalse) + require.False(t, done) done = p.IsAddTableFinished(ctx, 4) - c.Check(done, check.IsFalse) - - c.Assert(p.tables, check.HasLen, 4) + require.False(t, done) + require.Len(t, p.tables, 4) _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // add table, push the resolvedTs, finished add table @@ -598,16 +585,16 @@ func (s *processorSuite) TestTableExecutor(c *check.C) { table4.checkpointTs = 30 done = p.IsAddTableFinished(ctx, 1) - c.Check(done, check.IsTrue) + require.True(t, done) done = p.IsAddTableFinished(ctx, 2) - c.Check(done, check.IsTrue) + require.True(t, done) done = p.IsAddTableFinished(ctx, 3) - c.Check(done, check.IsTrue) + require.True(t, done) done = p.IsAddTableFinished(ctx, 4) - c.Check(done, check.IsTrue) + require.True(t, done) _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() table1.checkpointTs = 75 @@ -616,77 +603,78 @@ func (s *processorSuite) TestTableExecutor(c *check.C) { table4.checkpointTs = 75 _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() checkpointTs = p.agent.GetLastSentCheckpointTs() - c.Assert(checkpointTs, check.Equals, uint64(60)) + require.Equal(t, checkpointTs, uint64(60)) - updateChangeFeedPosition(c, tester, ctx.ChangefeedVars().ID, 103, 60) + updateChangeFeedPosition(t, tester, ctx.ChangefeedVars().ID, 103, 60) _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() ok, err = p.RemoveTable(ctx, 3) - c.Check(err, check.IsNil) - c.Check(ok, check.IsTrue) + require.Nil(t, err) + require.True(t, ok) _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) + tester.MustApplyPatches() - c.Assert(p.tables, check.HasLen, 4) - c.Assert(table3.canceled, check.IsFalse) - c.Assert(table3.stopTs, check.Equals, uint64(60)) + require.Len(t, p.tables, 4) + require.False(t, table3.canceled) + require.Equal(t, table3.stopTs, uint64(60)) done = p.IsRemoveTableFinished(ctx, 3) - c.Assert(done, check.IsFalse) + require.False(t, done) _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() checkpointTs = p.agent.GetLastSentCheckpointTs() - c.Assert(checkpointTs, check.Equals, uint64(60)) + require.Equal(t, checkpointTs, uint64(60)) // finish remove operations table3.status = tablepipeline.TableStatusStopped table3.checkpointTs = 65 _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) + tester.MustApplyPatches() - c.Assert(p.tables, check.HasLen, 4) - c.Assert(table3.canceled, check.IsFalse) + require.Len(t, p.tables, 4) + require.False(t, table3.canceled) done = p.IsRemoveTableFinished(ctx, 3) - c.Assert(done, check.IsTrue) + require.True(t, done) - c.Assert(p.tables, check.HasLen, 3) - c.Assert(table3.canceled, check.IsTrue) + require.Len(t, p.tables, 3) + require.True(t, table3.canceled) _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() checkpointTs = p.agent.GetLastSentCheckpointTs() - c.Assert(checkpointTs, check.Equals, uint64(75)) + require.Equal(t, checkpointTs, uint64(75)) err = p.Close() - c.Assert(err, check.IsNil) - c.Assert(p.agent, check.IsNil) + require.Nil(t, err) + require.Nil(t, p.agent) } -func (s *processorSuite) TestInitTable(c *check.C) { - defer testleak.AfterTest(c)() +func TestInitTable(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - p, tester := initProcessor4Test(ctx, c) + p, tester := initProcessor4Test(ctx, t) var err error // init tick _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() p.changefeed.PatchTaskStatus(p.captureInfo.ID, func(status *model.TaskStatus) (*model.TaskStatus, bool, error) { @@ -696,28 +684,27 @@ func (s *processorSuite) TestInitTable(c *check.C) { }) tester.MustApplyPatches() _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.tables[1], check.Not(check.IsNil)) - c.Assert(p.tables[2], check.Not(check.IsNil)) + require.NotNil(t, p.tables[1]) + require.NotNil(t, p.tables[2]) } -func (s *processorSuite) TestProcessorError(c *check.C) { - defer testleak.AfterTest(c)() +func TestProcessorError(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - p, tester := initProcessor4Test(ctx, c) + p, tester := initProcessor4Test(ctx, t) var err error // init tick _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // send a abnormal error p.sendError(cerror.ErrSinkURIInvalid) _, err = p.Tick(ctx, p.changefeed) tester.MustApplyPatches() - c.Assert(cerror.ErrReactorFinished.Equal(errors.Cause(err)), check.IsTrue) - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID], check.DeepEquals, &model.TaskPosition{ + require.True(t, cerror.ErrReactorFinished.Equal(errors.Cause(err))) + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID], &model.TaskPosition{ Error: &model.RunningError{ Addr: "127.0.0.1:0000", Code: "CDC:ErrSinkURIInvalid", @@ -725,30 +712,29 @@ func (s *processorSuite) TestProcessorError(c *check.C) { }, }) - p, tester = initProcessor4Test(ctx, c) + p, tester = initProcessor4Test(ctx, t) // init tick _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // send a normal error p.sendError(context.Canceled) _, err = p.Tick(ctx, p.changefeed) tester.MustApplyPatches() - c.Assert(cerror.ErrReactorFinished.Equal(errors.Cause(err)), check.IsTrue) - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID], check.DeepEquals, &model.TaskPosition{ + require.True(t, cerror.ErrReactorFinished.Equal(errors.Cause(err))) + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID], &model.TaskPosition{ Error: nil, }) } -func (s *processorSuite) TestProcessorExit(c *check.C) { - defer testleak.AfterTest(c)() +func TestProcessorExit(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - p, tester := initProcessor4Test(ctx, c) + p, tester := initProcessor4Test(ctx, t) var err error // init tick _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // stop the changefeed @@ -762,21 +748,20 @@ func (s *processorSuite) TestProcessorExit(c *check.C) { }) tester.MustApplyPatches() _, err = p.Tick(ctx, p.changefeed) - c.Assert(cerror.ErrReactorFinished.Equal(errors.Cause(err)), check.IsTrue) + require.True(t, cerror.ErrReactorFinished.Equal(errors.Cause(err))) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID], check.DeepEquals, &model.TaskPosition{ + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID], &model.TaskPosition{ Error: nil, }) } -func (s *processorSuite) TestProcessorClose(c *check.C) { - defer testleak.AfterTest(c)() +func TestProcessorClose(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - p, tester := initProcessor4Test(ctx, c) + p, tester := initProcessor4Test(ctx, t) var err error // init tick _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // add tables @@ -787,7 +772,7 @@ func (s *processorSuite) TestProcessorClose(c *check.C) { }) tester.MustApplyPatches() _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // push the resolvedTs and checkpointTs @@ -801,27 +786,27 @@ func (s *processorSuite) TestProcessorClose(c *check.C) { p.tables[1].(*mockTablePipeline).checkpointTs = 90 p.tables[2].(*mockTablePipeline).checkpointTs = 95 _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID], check.DeepEquals, &model.TaskPosition{ + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID], &model.TaskPosition{ CheckPointTs: 90, ResolvedTs: 90, Error: nil, }) - c.Assert(p.changefeed.TaskStatuses[p.captureInfo.ID], check.DeepEquals, &model.TaskStatus{ + require.Equal(t, p.changefeed.TaskStatuses[p.captureInfo.ID], &model.TaskStatus{ Tables: map[int64]*model.TableReplicaInfo{1: {StartTs: 20}, 2: {StartTs: 30}}, }) - c.Assert(p.changefeed.Workloads[p.captureInfo.ID], check.DeepEquals, model.TaskWorkload{1: {Workload: 1}, 2: {Workload: 1}}) + require.Equal(t, p.changefeed.Workloads[p.captureInfo.ID], model.TaskWorkload{1: {Workload: 1}, 2: {Workload: 1}}) - c.Assert(p.Close(), check.IsNil) + require.Nil(t, p.Close()) tester.MustApplyPatches() - c.Assert(p.tables[1].(*mockTablePipeline).canceled, check.IsTrue) - c.Assert(p.tables[2].(*mockTablePipeline).canceled, check.IsTrue) + require.True(t, p.tables[1].(*mockTablePipeline).canceled) + require.True(t, p.tables[2].(*mockTablePipeline).canceled) - p, tester = initProcessor4Test(ctx, c) + p, tester = initProcessor4Test(ctx, t) // init tick _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // add tables @@ -832,30 +817,29 @@ func (s *processorSuite) TestProcessorClose(c *check.C) { }) tester.MustApplyPatches() _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // send error p.sendError(cerror.ErrSinkURIInvalid) _, err = p.Tick(ctx, p.changefeed) - c.Assert(cerror.ErrReactorFinished.Equal(errors.Cause(err)), check.IsTrue) + require.True(t, cerror.ErrReactorFinished.Equal(errors.Cause(err))) tester.MustApplyPatches() - c.Assert(p.Close(), check.IsNil) + require.Nil(t, p.Close()) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID].Error, check.DeepEquals, &model.RunningError{ + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID].Error, &model.RunningError{ Addr: "127.0.0.1:0000", Code: "CDC:ErrSinkURIInvalid", Message: "[CDC:ErrSinkURIInvalid]sink uri invalid", }) - c.Assert(p.tables[1].(*mockTablePipeline).canceled, check.IsTrue) - c.Assert(p.tables[2].(*mockTablePipeline).canceled, check.IsTrue) + require.True(t, p.tables[1].(*mockTablePipeline).canceled) + require.True(t, p.tables[2].(*mockTablePipeline).canceled) } -func (s *processorSuite) TestPositionDeleted(c *check.C) { - defer testleak.AfterTest(c)() +func TestPositionDeleted(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - p, tester := initProcessor4Test(ctx, c) + p, tester := initProcessor4Test(ctx, t) p.changefeed.PatchTaskStatus(p.captureInfo.ID, func(status *model.TaskStatus) (*model.TaskStatus, bool, error) { status.Tables[1] = &model.TableReplicaInfo{StartTs: 30} status.Tables[2] = &model.TableReplicaInfo{StartTs: 40} @@ -864,14 +848,14 @@ func (s *processorSuite) TestPositionDeleted(c *check.C) { var err error // init tick _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // cal position _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID], check.DeepEquals, &model.TaskPosition{ + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID], &model.TaskPosition{ CheckPointTs: 30, ResolvedTs: 30, }) @@ -883,27 +867,26 @@ func (s *processorSuite) TestPositionDeleted(c *check.C) { tester.MustApplyPatches() // position created again _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID], check.DeepEquals, &model.TaskPosition{ + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID], &model.TaskPosition{ CheckPointTs: 0, ResolvedTs: 0, }) // cal position _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - c.Assert(p.changefeed.TaskPositions[p.captureInfo.ID], check.DeepEquals, &model.TaskPosition{ + require.Equal(t, p.changefeed.TaskPositions[p.captureInfo.ID], &model.TaskPosition{ CheckPointTs: 30, ResolvedTs: 30, }) } -func (s *processorSuite) TestSchemaGC(c *check.C) { - defer testleak.AfterTest(c)() +func TestSchemaGC(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - p, tester := initProcessor4Test(ctx, c) + p, tester := initProcessor4Test(ctx, t) p.changefeed.PatchTaskStatus(p.captureInfo.ID, func(status *model.TaskStatus) (*model.TaskStatus, bool, error) { status.Tables[1] = &model.TableReplicaInfo{StartTs: 30} status.Tables[2] = &model.TableReplicaInfo{StartTs: 40} @@ -913,17 +896,17 @@ func (s *processorSuite) TestSchemaGC(c *check.C) { var err error // init tick _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() - updateChangeFeedPosition(c, tester, "changefeed-id-test", 50, 50) + updateChangeFeedPosition(t, tester, "changefeed-id-test", 50, 50) _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // GC Ts should be (checkpoint - 1). - c.Assert(p.schemaStorage.(*mockSchemaStorage).lastGcTs, check.Equals, uint64(49)) - c.Assert(p.lastSchemaTs, check.Equals, uint64(49)) + require.Equal(t, p.schemaStorage.(*mockSchemaStorage).lastGcTs, uint64(49)) + require.Equal(t, p.lastSchemaTs, uint64(49)) } func cleanUpFinishedOpOperation(state *orchestrator.ChangefeedReactorState, captureID model.CaptureID, tester *orchestrator.ReactorStateTester) { @@ -941,7 +924,7 @@ func cleanUpFinishedOpOperation(state *orchestrator.ChangefeedReactorState, capt tester.MustApplyPatches() } -func updateChangeFeedPosition(c *check.C, tester *orchestrator.ReactorStateTester, cfID model.ChangeFeedID, resolvedTs, checkpointTs model.Ts) { +func updateChangeFeedPosition(t *testing.T, tester *orchestrator.ReactorStateTester, cfID model.ChangeFeedID, resolvedTs, checkpointTs model.Ts) { key := etcd.CDCKey{ Tp: etcd.CDCKeyTypeChangeFeedStatus, ChangefeedID: cfID, @@ -953,14 +936,12 @@ func updateChangeFeedPosition(c *check.C, tester *orchestrator.ReactorStateTeste CheckpointTs: checkpointTs, } valueBytes, err := json.Marshal(cfStatus) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustUpdate(keyStr, valueBytes) } -func (s *processorSuite) TestIgnorableError(c *check.C) { - defer testleak.AfterTest(c)() - +func TestIgnorableError(t *testing.T) { testCases := []struct { err error ignorable bool @@ -974,14 +955,13 @@ func (s *processorSuite) TestIgnorableError(c *check.C) { {errors.New("test error"), false}, } for _, tc := range testCases { - c.Assert(isProcessorIgnorableError(tc.err), check.Equals, tc.ignorable) + require.Equal(t, isProcessorIgnorableError(tc.err), tc.ignorable) } } -func (s *processorSuite) TestUpdateBarrierTs(c *check.C) { - defer testleak.AfterTest(c)() +func TestUpdateBarrierTs(t *testing.T) { ctx := cdcContext.NewBackendContext4Test(true) - p, tester := initProcessor4Test(ctx, c) + p, tester := initProcessor4Test(ctx, t) p.changefeed.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { status.CheckpointTs = 5 status.ResolvedTs = 10 @@ -995,11 +975,11 @@ func (s *processorSuite) TestUpdateBarrierTs(c *check.C) { // init tick, add table OperDispatched. _, err := p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // tick again, add table OperProcessed. _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() // Global resolved ts has advanced while schema storage stalls. @@ -1008,16 +988,16 @@ func (s *processorSuite) TestUpdateBarrierTs(c *check.C) { return status, true, nil }) _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() tb := p.tables[model.TableID(1)].(*mockTablePipeline) - c.Assert(tb.barrierTs, check.Equals, uint64(10)) + require.Equal(t, tb.barrierTs, uint64(10)) // Schema storage has advanced too. p.schemaStorage.(*mockSchemaStorage).resolvedTs = 15 _, err = p.Tick(ctx, p.changefeed) - c.Assert(err, check.IsNil) + require.Nil(t, err) tester.MustApplyPatches() tb = p.tables[model.TableID(1)].(*mockTablePipeline) - c.Assert(tb.barrierTs, check.Equals, uint64(15)) + require.Equal(t, tb.barrierTs, uint64(15)) } diff --git a/pkg/orchestrator/etcd_worker_bank_test.go b/pkg/orchestrator/etcd_worker_bank_test.go index fbbcec988c5..f79f3c18edd 100644 --- a/pkg/orchestrator/etcd_worker_bank_test.go +++ b/pkg/orchestrator/etcd_worker_bank_test.go @@ -20,19 +20,19 @@ import ( "strconv" "strings" "sync" + "testing" "time" - "github.com/pingcap/check" "github.com/pingcap/failpoint" "github.com/pingcap/log" cerror "github.com/pingcap/tiflow/pkg/errors" "github.com/pingcap/tiflow/pkg/orchestrator/util" - "github.com/pingcap/tiflow/pkg/util/testleak" + "github.com/stretchr/testify/require" "go.uber.org/zap" ) type bankReactorState struct { - c *check.C + t *testing.T account []int pendingPatch [][]DataPatch index int @@ -42,7 +42,7 @@ type bankReactorState struct { const bankTestPrefix = "/ticdc/test/bank/" func (b *bankReactorState) Update(key util.EtcdKey, value []byte, isInit bool) error { - b.c.Assert(strings.HasPrefix(key.String(), bankTestPrefix), check.IsTrue) + require.True(b.t, strings.HasPrefix(key.String(), bankTestPrefix)) indexStr := key.String()[len(bankTestPrefix):] b.account[b.atoi(indexStr)] = b.atoi(string(value)) return nil @@ -62,12 +62,12 @@ func (b *bankReactorState) Check() { if sum != 0 { log.Info("show account", zap.Int("index", b.index), zap.Int("sum", sum), zap.Ints("account", b.account)) } - b.c.Assert(sum, check.Equals, 0, check.Commentf("not ft:%t", b.notFirstTick)) + require.Equal(b.t, sum, 0, fmt.Sprintf("not ft:%t", b.notFirstTick)) } func (b *bankReactorState) atoi(value string) int { i, err := strconv.Atoi(value) - b.c.Assert(err, check.IsNil) + require.Nil(b.t, err) return i } @@ -120,9 +120,7 @@ func (b *bankReactor) Tick(ctx context.Context, state ReactorState) (nextState R return state, err } -func (s *etcdWorkerSuite) TestEtcdBank(c *check.C) { - defer testleak.AfterTest(c)() - +func TestEtcdBank(t *testing.T) { _ = failpoint.Enable("github.com/pingcap/tiflow/pkg/orchestrator/InjectProgressRequestAfterCommit", "10%return(true)") defer func() { _ = failpoint.Disable("github.com/pingcap/tiflow/pkg/orchestrator/InjectProgressRequestAfterCommit") @@ -134,7 +132,7 @@ func (s *etcdWorkerSuite) TestEtcdBank(c *check.C) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - newClient, closer := setUpTest(c) + newClient, closer := setUpTest(t) defer closer() cli := newClient() @@ -144,7 +142,7 @@ func (s *etcdWorkerSuite) TestEtcdBank(c *check.C) { for i := 0; i < totalAccountNumber; i++ { _, err := cli.Put(ctx, fmt.Sprintf("%s%d", bankTestPrefix, i), "0") - c.Assert(err, check.IsNil) + require.Nil(t, err) } for i := 0; i < workerNumber; i++ { @@ -155,13 +153,13 @@ func (s *etcdWorkerSuite) TestEtcdBank(c *check.C) { for { worker, err := NewEtcdWorker(cli, bankTestPrefix, &bankReactor{ accountNumber: totalAccountNumber, - }, &bankReactorState{c: c, index: i, account: make([]int, totalAccountNumber)}) - c.Assert(err, check.IsNil) + }, &bankReactorState{t: t, index: i, account: make([]int, totalAccountNumber)}) + require.Nil(t, err) err = worker.Run(ctx, nil, 100*time.Millisecond, "127.0.0.1", "") if err == nil || err.Error() == "etcdserver: request timed out" { continue } - c.Assert(err, check.ErrorMatches, ".*context deadline exceeded.*") + require.Contains(t, err.Error(), "context deadline exceeded") return } }() diff --git a/pkg/orchestrator/etcd_worker_test.go b/pkg/orchestrator/etcd_worker_test.go index d5b00f2f3c7..942298dffb5 100644 --- a/pkg/orchestrator/etcd_worker_test.go +++ b/pkg/orchestrator/etcd_worker_test.go @@ -16,6 +16,8 @@ package orchestrator import ( "context" "encoding/json" + "io/ioutil" + "os" "regexp" "strconv" "strings" @@ -23,14 +25,13 @@ import ( "testing" "time" - "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/log" cerrors "github.com/pingcap/tiflow/pkg/errors" "github.com/pingcap/tiflow/pkg/etcd" "github.com/pingcap/tiflow/pkg/orchestrator/util" - "github.com/pingcap/tiflow/pkg/util/testleak" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "go.etcd.io/etcd/clientv3" "go.uber.org/zap" "golang.org/x/sync/errgroup" @@ -43,12 +44,6 @@ const ( totalTicksPerReactor = 1000 ) -func Test(t *testing.T) { check.TestingT(t) } - -var _ = check.Suite(&etcdWorkerSuite{}) - -type etcdWorkerSuite struct{} - type simpleReactor struct { state *simpleReactorState tickCount int @@ -198,26 +193,27 @@ func (s *simpleReactorState) GetPatches() [][]DataPatch { return [][]DataPatch{ret} } -func setUpTest(c *check.C) (func() *etcd.Client, func()) { - dir := c.MkDir() +func setUpTest(t *testing.T) (func() *etcd.Client, func()) { + dir, err := ioutil.TempDir("", "etcd-test") + require.Nil(t, err) url, server, err := etcd.SetupEmbedEtcd(dir) - c.Assert(err, check.IsNil) + require.Nil(t, err) endpoints := []string{url.String()} return func() *etcd.Client { rawCli, err := clientv3.NewFromURLs(endpoints) - c.Check(err, check.IsNil) + require.Nil(t, err) return etcd.Wrap(rawCli, map[string]prometheus.Counter{}) }, func() { server.Close() + os.RemoveAll(dir) } } -func (s *etcdWorkerSuite) TestEtcdSum(c *check.C) { - defer testleak.AfterTest(c)() +func TestEtcdSum(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) defer cancel() - newClient, closer := setUpTest(c) + newClient, closer := setUpTest(t) defer closer() cli := newClient() @@ -225,15 +221,15 @@ func (s *etcdWorkerSuite) TestEtcdSum(c *check.C) { _ = cli.Unwrap().Close() }() _, err := cli.Put(ctx, testEtcdKeyPrefix+"/sum", "0") - c.Check(err, check.IsNil) + require.Nil(t, err) initArray := make([]int, numValuesPerGroup) jsonStr, err := json.Marshal(initArray) - c.Check(err, check.IsNil) + require.Nil(t, err) for i := 0; i < numGroups; i++ { _, err := cli.Put(ctx, testEtcdKeyPrefix+"/"+strconv.Itoa(i), string(jsonStr)) - c.Check(err, check.IsNil) + require.Nil(t, err) } errg, ctx := errgroup.WithContext(ctx) @@ -277,7 +273,7 @@ func (s *etcdWorkerSuite) TestEtcdSum(c *check.C) { strings.Contains(err.Error(), "etcdserver: request timeout")) { return } - c.Check(err, check.IsNil) + require.Nil(t, err) } type intReactorState struct { @@ -326,20 +322,18 @@ func (r *linearizabilityReactor) Tick(ctx context.Context, state ReactorState) ( return r.state, nil } -func (s *etcdWorkerSuite) TestLinearizability(c *check.C) { - defer testleak.AfterTest(c)() - +func TestLinearizability(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) defer cancel() - newClient, closer := setUpTest(c) + newClient, closer := setUpTest(t) defer closer() cli0 := newClient() cli := newClient() for i := 0; i < 1000; i++ { _, err := cli.Put(ctx, testEtcdKeyPrefix+"/lin", strconv.Itoa(i)) - c.Assert(err, check.IsNil) + require.Nil(t, err) } reactor, err := NewEtcdWorker(cli0, testEtcdKeyPrefix+"/lin", &linearizabilityReactor{ @@ -349,7 +343,7 @@ func (s *etcdWorkerSuite) TestLinearizability(c *check.C) { val: 0, isUpdated: false, }) - c.Assert(err, check.IsNil) + require.Nil(t, err) errg := &errgroup.Group{} errg.Go(func() error { return reactor.Run(ctx, nil, 10*time.Millisecond, "127.0.0.1", "") @@ -358,16 +352,16 @@ func (s *etcdWorkerSuite) TestLinearizability(c *check.C) { time.Sleep(500 * time.Millisecond) for i := 999; i < 2000; i++ { _, err := cli.Put(ctx, testEtcdKeyPrefix+"/lin", strconv.Itoa(i)) - c.Assert(err, check.IsNil) + require.Nil(t, err) } err = errg.Wait() - c.Assert(err, check.IsNil) + require.Nil(t, err) err = cli.Unwrap().Close() - c.Assert(err, check.IsNil) + require.Nil(t, err) err = cli0.Unwrap().Close() - c.Assert(err, check.IsNil) + require.Nil(t, err) } type commonReactorState struct { @@ -420,13 +414,11 @@ func (r *finishedReactor) Tick(ctx context.Context, state ReactorState) (nextSta return r.state, cerrors.ErrReactorFinished } -func (s *etcdWorkerSuite) TestFinished(c *check.C) { - defer testleak.AfterTest(c)() - +func TestFinished(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) defer cancel() - newClient, closer := setUpTest(c) + newClient, closer := setUpTest(t) defer closer() cli := newClient() @@ -436,19 +428,18 @@ func (s *etcdWorkerSuite) TestFinished(c *check.C) { }, &commonReactorState{ state: make(map[string]string), }) - c.Assert(err, check.IsNil) - + require.Nil(t, err) err = reactor.Run(ctx, nil, 10*time.Millisecond, "127.0.0.1", "") - c.Assert(err, check.IsNil) + require.Nil(t, err) resp, err := cli.Get(ctx, prefix+"/key1") - c.Assert(err, check.IsNil) - c.Assert(string(resp.Kvs[0].Key), check.Equals, "/cdc_etcd_worker_test/finished/key1") - c.Assert(string(resp.Kvs[0].Value), check.Equals, "abcabcfin") + require.Nil(t, err) + require.Equal(t, string(resp.Kvs[0].Key), "/cdc_etcd_worker_test/finished/key1") + require.Equal(t, string(resp.Kvs[0].Value), "abcabcfin") resp, err = cli.Get(ctx, prefix+"/key2") - c.Assert(err, check.IsNil) - c.Assert(resp.Kvs, check.HasLen, 0) + require.Nil(t, err) + require.Len(t, resp.Kvs, 0) err = cli.Unwrap().Close() - c.Assert(err, check.IsNil) + require.Nil(t, err) } type coverReactor struct { @@ -490,13 +481,11 @@ func (r *coverReactor) Tick(ctx context.Context, state ReactorState) (nextState return r.state, cerrors.ErrReactorFinished } -func (s *etcdWorkerSuite) TestCover(c *check.C) { - defer testleak.AfterTest(c)() - +func TestCover(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) defer cancel() - newClient, closer := setUpTest(c) + newClient, closer := setUpTest(t) defer closer() cli := newClient() @@ -506,20 +495,20 @@ func (s *etcdWorkerSuite) TestCover(c *check.C) { }, &commonReactorState{ state: make(map[string]string), }) - c.Assert(err, check.IsNil) + require.Nil(t, err) err = reactor.Run(ctx, nil, 10*time.Millisecond, "127.0.0.1", "") - c.Assert(err, check.IsNil) + require.Nil(t, err) resp, err := cli.Get(ctx, prefix+"/key1") - c.Assert(err, check.IsNil) - c.Assert(string(resp.Kvs[0].Key), check.Equals, "/cdc_etcd_worker_test/cover/key1") - c.Assert(string(resp.Kvs[0].Value), check.Equals, "abccbaabccbafinfin") + require.Nil(t, err) + require.Equal(t, string(resp.Kvs[0].Key), "/cdc_etcd_worker_test/cover/key1") + require.Equal(t, string(resp.Kvs[0].Value), "abccbaabccbafinfin") resp, err = cli.Get(ctx, prefix+"/key2") - c.Assert(err, check.IsNil) - c.Assert(string(resp.Kvs[0].Key), check.Equals, "/cdc_etcd_worker_test/cover/key2") - c.Assert(string(resp.Kvs[0].Value), check.Equals, "fin") + require.Nil(t, err) + require.Equal(t, string(resp.Kvs[0].Key), "/cdc_etcd_worker_test/cover/key2") + require.Equal(t, string(resp.Kvs[0].Value), "fin") err = cli.Unwrap().Close() - c.Assert(err, check.IsNil) + require.Nil(t, err) } type emptyTxnReactor struct { @@ -569,13 +558,11 @@ func (r *emptyTxnReactor) Tick(ctx context.Context, state ReactorState) (nextSta return r.state, cerrors.ErrReactorFinished } -func (s *etcdWorkerSuite) TestEmptyTxn(c *check.C) { - defer testleak.AfterTest(c)() - +func TestEmptyTxn(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) defer cancel() - newClient, closer := setUpTest(c) + newClient, closer := setUpTest(t) defer closer() cli := newClient() @@ -586,19 +573,19 @@ func (s *etcdWorkerSuite) TestEmptyTxn(c *check.C) { }, &commonReactorState{ state: make(map[string]string), }) - c.Assert(err, check.IsNil) + require.Nil(t, err) err = reactor.Run(ctx, nil, 10*time.Millisecond, "127.0.0.1", "") - c.Assert(err, check.IsNil) + require.Nil(t, err) resp, err := cli.Get(ctx, prefix+"/key1") - c.Assert(err, check.IsNil) - c.Assert(resp.Kvs, check.HasLen, 0) + require.Nil(t, err) + require.Len(t, resp.Kvs, 0) resp, err = cli.Get(ctx, prefix+"/key2") - c.Assert(err, check.IsNil) - c.Assert(string(resp.Kvs[0].Key), check.Equals, "/cdc_etcd_worker_test/empty_txn/key2") - c.Assert(string(resp.Kvs[0].Value), check.Equals, "123") + require.Nil(t, err) + require.Equal(t, string(resp.Kvs[0].Key), "/cdc_etcd_worker_test/empty_txn/key2") + require.Equal(t, string(resp.Kvs[0].Value), "123") err = cli.Unwrap().Close() - c.Assert(err, check.IsNil) + require.Nil(t, err) } type emptyOrNilReactor struct { @@ -638,13 +625,11 @@ func (r *emptyOrNilReactor) Tick(ctx context.Context, state ReactorState) (nextS return r.state, cerrors.ErrReactorFinished } -func (s *etcdWorkerSuite) TestEmptyOrNil(c *check.C) { - defer testleak.AfterTest(c)() - +func TestEmptyOrNil(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) defer cancel() - newClient, closer := setUpTest(c) + newClient, closer := setUpTest(t) defer closer() cli := newClient() @@ -654,19 +639,19 @@ func (s *etcdWorkerSuite) TestEmptyOrNil(c *check.C) { }, &commonReactorState{ state: make(map[string]string), }) - c.Assert(err, check.IsNil) + require.Nil(t, err) err = reactor.Run(ctx, nil, 10*time.Millisecond, "127.0.0.1", "") - c.Assert(err, check.IsNil) + require.Nil(t, err) resp, err := cli.Get(ctx, prefix+"/key1") - c.Assert(err, check.IsNil) - c.Assert(string(resp.Kvs[0].Key), check.Equals, "/cdc_etcd_worker_test/emptyOrNil/key1") - c.Assert(string(resp.Kvs[0].Value), check.Equals, "") + require.Nil(t, err) + require.Equal(t, string(resp.Kvs[0].Key), "/cdc_etcd_worker_test/emptyOrNil/key1") + require.Equal(t, string(resp.Kvs[0].Value), "") resp, err = cli.Get(ctx, prefix+"/key2") - c.Assert(err, check.IsNil) - c.Assert(resp.Kvs, check.HasLen, 0) + require.Nil(t, err) + require.Len(t, resp.Kvs, 0) err = cli.Unwrap().Close() - c.Assert(err, check.IsNil) + require.Nil(t, err) } type modifyOneReactor struct { @@ -708,20 +693,18 @@ func (r *modifyOneReactor) Tick(ctx context.Context, state ReactorState) (nextSt // TestModifyAfterDelete tests snapshot isolation when there is one modifying transaction delayed in the middle while a deleting transaction // commits. The first transaction should be aborted and retried, and isolation should not be violated. -func (s *etcdWorkerSuite) TestModifyAfterDelete(c *check.C) { - defer testleak.AfterTest(c)() - +func TestModifyAfterDelete(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) defer cancel() - newClient, closer := setUpTest(c) + newClient, closer := setUpTest(t) defer closer() cli1 := newClient() cli2 := newClient() _, err := cli1.Put(ctx, "/test/key1", "original value") - c.Assert(err, check.IsNil) + require.Nil(t, err) modifyReactor := &modifyOneReactor{ key: []byte("/test/key1"), @@ -731,14 +714,15 @@ func (s *etcdWorkerSuite) TestModifyAfterDelete(c *check.C) { worker1, err := NewEtcdWorker(cli1, "/test", modifyReactor, &commonReactorState{ state: make(map[string]string), }) - c.Assert(err, check.IsNil) + require.Nil(t, err) var wg sync.WaitGroup wg.Add(1) go func() { defer wg.Done() + err := worker1.Run(ctx, nil, time.Millisecond*100, "127.0.0.1", "") - c.Assert(err, check.IsNil) + require.Nil(t, err) }() modifyReactor.waitOnCh <- struct{}{} @@ -750,18 +734,17 @@ func (s *etcdWorkerSuite) TestModifyAfterDelete(c *check.C) { worker2, err := NewEtcdWorker(cli2, "/test", deleteReactor, &commonReactorState{ state: make(map[string]string), }) - c.Assert(err, check.IsNil) + require.Nil(t, err) err = worker2.Run(ctx, nil, time.Millisecond*100, "127.0.0.1", "") - c.Assert(err, check.IsNil) - + require.Nil(t, err) modifyReactor.waitOnCh <- struct{}{} wg.Wait() resp, err := cli1.Get(ctx, "/test/key1") - c.Assert(err, check.IsNil) - c.Assert(resp.Kvs, check.HasLen, 0) - c.Assert(worker1.deleteCounter, check.Equals, int64(1)) + require.Nil(t, err) + require.Len(t, resp.Kvs, 0) + require.Equal(t, worker1.deleteCounter, int64(1)) _ = cli1.Unwrap().Close() _ = cli2.Unwrap().Close() diff --git a/pkg/orchestrator/main_test.go b/pkg/orchestrator/main_test.go new file mode 100644 index 00000000000..5b44e017491 --- /dev/null +++ b/pkg/orchestrator/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package orchestrator + +import ( + "testing" + + "github.com/pingcap/tiflow/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/pkg/orchestrator/reactor_state_test.go b/pkg/orchestrator/reactor_state_test.go index 8d51eaa6f4d..442cfec0386 100644 --- a/pkg/orchestrator/reactor_state_test.go +++ b/pkg/orchestrator/reactor_state_test.go @@ -15,38 +15,33 @@ package orchestrator import ( "encoding/json" + "fmt" + "testing" "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/pingcap/check" "github.com/pingcap/tiflow/cdc/model" "github.com/pingcap/tiflow/pkg/config" "github.com/pingcap/tiflow/pkg/etcd" "github.com/pingcap/tiflow/pkg/orchestrator/util" - "github.com/pingcap/tiflow/pkg/util/testleak" + "github.com/stretchr/testify/require" ) -type stateSuite struct{} - -var _ = check.Suite(&stateSuite{}) - -func (s *stateSuite) TestCheckCaptureAlive(c *check.C) { - defer testleak.AfterTest(c)() +func TestCheckCaptureAlive(t *testing.T) { state := NewChangefeedReactorState("test") - stateTester := NewReactorStateTester(c, state, nil) + stateTester := NewReactorStateTester(t, state, nil) state.CheckCaptureAlive("6bbc01c8-0605-4f86-a0f9-b3119109b225") - c.Assert(stateTester.ApplyPatches(), check.ErrorMatches, ".*[CDC:ErrLeaseExpired].*") + require.Contains(t, stateTester.ApplyPatches().Error(), "[CDC:ErrLeaseExpired]") err := stateTester.Update("/tidb/cdc/capture/6bbc01c8-0605-4f86-a0f9-b3119109b225", []byte(`{"id":"6bbc01c8-0605-4f86-a0f9-b3119109b225","address":"127.0.0.1:8300"}`)) - c.Assert(err, check.IsNil) + require.Nil(t, err) state.CheckCaptureAlive("6bbc01c8-0605-4f86-a0f9-b3119109b225") stateTester.MustApplyPatches() } -func (s *stateSuite) TestChangefeedStateUpdate(c *check.C) { - defer testleak.AfterTest(c)() +func TestChangefeedStateUpdate(t *testing.T) { createTime, err := time.Parse("2006-01-02", "2020-02-02") - c.Assert(err, check.IsNil) + require.Nil(t, err) testCases := []struct { changefeedID string updateKey []string @@ -320,24 +315,23 @@ func (s *stateSuite) TestChangefeedStateUpdate(c *check.C) { value = nil } err = state.Update(util.NewEtcdKey(k), value, false) - c.Assert(err, check.IsNil) + require.Nil(t, err) } - c.Assert(cmp.Equal(state, &tc.expected, cmpopts.IgnoreUnexported(ChangefeedReactorState{})), check.IsTrue, - check.Commentf("%d,%s", i, cmp.Diff(state, &tc.expected, cmpopts.IgnoreUnexported(ChangefeedReactorState{})))) + require.True(t, cmp.Equal(state, &tc.expected, cmpopts.IgnoreUnexported(ChangefeedReactorState{})), + fmt.Sprintf("%d,%s", i, cmp.Diff(state, &tc.expected, cmpopts.IgnoreUnexported(ChangefeedReactorState{})))) } } -func (s *stateSuite) TestPatchInfo(c *check.C) { - defer testleak.AfterTest(c)() +func TestPatchInfo(t *testing.T) { state := NewChangefeedReactorState("test1") - stateTester := NewReactorStateTester(c, state, nil) + stateTester := NewReactorStateTester(t, state, nil) state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { - c.Assert(info, check.IsNil) + require.Nil(t, info) return &model.ChangeFeedInfo{SinkURI: "123", Config: &config.ReplicaConfig{}}, true, nil }) stateTester.MustApplyPatches() defaultConfig := config.GetDefaultReplicaConfig() - c.Assert(state.Info, check.DeepEquals, &model.ChangeFeedInfo{ + require.Equal(t, state.Info, &model.ChangeFeedInfo{ SinkURI: "123", Engine: model.SortUnified, Config: &config.ReplicaConfig{ @@ -354,7 +348,7 @@ func (s *stateSuite) TestPatchInfo(c *check.C) { return info, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.Info, check.DeepEquals, &model.ChangeFeedInfo{ + require.Equal(t, state.Info, &model.ChangeFeedInfo{ SinkURI: "123", StartTs: 6, Engine: model.SortUnified, @@ -371,52 +365,50 @@ func (s *stateSuite) TestPatchInfo(c *check.C) { return nil, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.Info, check.IsNil) + require.Nil(t, state.Info) } -func (s *stateSuite) TestPatchStatus(c *check.C) { - defer testleak.AfterTest(c)() +func TestPatchStatus(t *testing.T) { state := NewChangefeedReactorState("test1") - stateTester := NewReactorStateTester(c, state, nil) + stateTester := NewReactorStateTester(t, state, nil) state.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { - c.Assert(status, check.IsNil) + require.Nil(t, status) return &model.ChangeFeedStatus{CheckpointTs: 5}, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.Status, check.DeepEquals, &model.ChangeFeedStatus{CheckpointTs: 5}) + require.Equal(t, state.Status, &model.ChangeFeedStatus{CheckpointTs: 5}) state.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { status.ResolvedTs = 6 return status, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.Status, check.DeepEquals, &model.ChangeFeedStatus{CheckpointTs: 5, ResolvedTs: 6}) + require.Equal(t, state.Status, &model.ChangeFeedStatus{CheckpointTs: 5, ResolvedTs: 6}) state.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { return nil, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.Status, check.IsNil) + require.Nil(t, state.Status) } -func (s *stateSuite) TestPatchTaskPosition(c *check.C) { - defer testleak.AfterTest(c)() +func TestPatchTaskPosition(t *testing.T) { state := NewChangefeedReactorState("test1") - stateTester := NewReactorStateTester(c, state, nil) + stateTester := NewReactorStateTester(t, state, nil) captureID1 := "capture1" captureID2 := "capture2" state.PatchTaskPosition(captureID1, func(position *model.TaskPosition) (*model.TaskPosition, bool, error) { - c.Assert(position, check.IsNil) + require.Nil(t, position) return &model.TaskPosition{ CheckPointTs: 1, }, true, nil }) state.PatchTaskPosition(captureID2, func(position *model.TaskPosition) (*model.TaskPosition, bool, error) { - c.Assert(position, check.IsNil) + require.Nil(t, position) return &model.TaskPosition{ CheckPointTs: 2, }, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.TaskPositions, check.DeepEquals, map[string]*model.TaskPosition{ + require.Equal(t, state.TaskPositions, map[string]*model.TaskPosition{ captureID1: { CheckPointTs: 1, }, @@ -433,7 +425,7 @@ func (s *stateSuite) TestPatchTaskPosition(c *check.C) { return position, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.TaskPositions, check.DeepEquals, map[string]*model.TaskPosition{ + require.Equal(t, state.TaskPositions, map[string]*model.TaskPosition{ captureID1: { CheckPointTs: 3, }, @@ -453,7 +445,7 @@ func (s *stateSuite) TestPatchTaskPosition(c *check.C) { return position, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.TaskPositions, check.DeepEquals, map[string]*model.TaskPosition{ + require.Equal(t, state.TaskPositions, map[string]*model.TaskPosition{ captureID1: { CheckPointTs: 3, Count: 6, @@ -461,26 +453,25 @@ func (s *stateSuite) TestPatchTaskPosition(c *check.C) { }) } -func (s *stateSuite) TestPatchTaskStatus(c *check.C) { - defer testleak.AfterTest(c)() +func TestPatchTaskStatus(t *testing.T) { state := NewChangefeedReactorState("test1") - stateTester := NewReactorStateTester(c, state, nil) + stateTester := NewReactorStateTester(t, state, nil) captureID1 := "capture1" captureID2 := "capture2" state.PatchTaskStatus(captureID1, func(status *model.TaskStatus) (*model.TaskStatus, bool, error) { - c.Assert(status, check.IsNil) + require.Nil(t, status) return &model.TaskStatus{ Tables: map[model.TableID]*model.TableReplicaInfo{45: {StartTs: 1}}, }, true, nil }) state.PatchTaskStatus(captureID2, func(status *model.TaskStatus) (*model.TaskStatus, bool, error) { - c.Assert(status, check.IsNil) + require.Nil(t, status) return &model.TaskStatus{ Tables: map[model.TableID]*model.TableReplicaInfo{46: {StartTs: 1}}, }, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.TaskStatuses, check.DeepEquals, map[model.CaptureID]*model.TaskStatus{ + require.Equal(t, state.TaskStatuses, map[model.CaptureID]*model.TaskStatus{ captureID1: {Tables: map[model.TableID]*model.TableReplicaInfo{45: {StartTs: 1}}}, captureID2: {Tables: map[model.TableID]*model.TableReplicaInfo{46: {StartTs: 1}}}, }) @@ -493,7 +484,7 @@ func (s *stateSuite) TestPatchTaskStatus(c *check.C) { return status, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.TaskStatuses, check.DeepEquals, map[model.CaptureID]*model.TaskStatus{ + require.Equal(t, state.TaskStatuses, map[model.CaptureID]*model.TaskStatus{ captureID1: {Tables: map[model.TableID]*model.TableReplicaInfo{45: {StartTs: 1}, 46: {StartTs: 2}}}, captureID2: {Tables: map[model.TableID]*model.TableReplicaInfo{46: {StartTs: 2}}}, }) @@ -501,27 +492,26 @@ func (s *stateSuite) TestPatchTaskStatus(c *check.C) { return nil, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.TaskStatuses, check.DeepEquals, map[model.CaptureID]*model.TaskStatus{ + require.Equal(t, state.TaskStatuses, map[model.CaptureID]*model.TaskStatus{ captureID1: {Tables: map[model.TableID]*model.TableReplicaInfo{45: {StartTs: 1}, 46: {StartTs: 2}}}, }) } -func (s *stateSuite) TestPatchTaskWorkload(c *check.C) { - defer testleak.AfterTest(c)() +func TestPatchTaskWorkload(t *testing.T) { state := NewChangefeedReactorState("test1") - stateTester := NewReactorStateTester(c, state, nil) + stateTester := NewReactorStateTester(t, state, nil) captureID1 := "capture1" captureID2 := "capture2" state.PatchTaskWorkload(captureID1, func(workload model.TaskWorkload) (model.TaskWorkload, bool, error) { - c.Assert(workload, check.IsNil) + require.Nil(t, workload) return model.TaskWorkload{45: {Workload: 1}}, true, nil }) state.PatchTaskWorkload(captureID2, func(workload model.TaskWorkload) (model.TaskWorkload, bool, error) { - c.Assert(workload, check.IsNil) + require.Nil(t, workload) return model.TaskWorkload{46: {Workload: 1}}, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.Workloads, check.DeepEquals, map[model.CaptureID]model.TaskWorkload{ + require.Equal(t, state.Workloads, map[model.CaptureID]model.TaskWorkload{ captureID1: {45: {Workload: 1}}, captureID2: {46: {Workload: 1}}, }) @@ -534,7 +524,7 @@ func (s *stateSuite) TestPatchTaskWorkload(c *check.C) { return workload, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.Workloads, check.DeepEquals, map[model.CaptureID]model.TaskWorkload{ + require.Equal(t, state.Workloads, map[model.CaptureID]model.TaskWorkload{ captureID1: {45: {Workload: 1}, 46: {Workload: 2}}, captureID2: {45: {Workload: 3}, 46: {Workload: 1}}, }) @@ -542,13 +532,12 @@ func (s *stateSuite) TestPatchTaskWorkload(c *check.C) { return nil, true, nil }) stateTester.MustApplyPatches() - c.Assert(state.Workloads, check.DeepEquals, map[model.CaptureID]model.TaskWorkload{ + require.Equal(t, state.Workloads, map[model.CaptureID]model.TaskWorkload{ captureID1: {45: {Workload: 1}, 46: {Workload: 2}}, }) } -func (s *stateSuite) TestGlobalStateUpdate(c *check.C) { - defer testleak.AfterTest(c)() +func TestGlobalStateUpdate(t *testing.T) { testCases := []struct { updateKey []string updateValue []string @@ -647,26 +636,25 @@ func (s *stateSuite) TestGlobalStateUpdate(c *check.C) { value = nil } err := state.Update(util.NewEtcdKey(k), value, false) - c.Assert(err, check.IsNil) + require.Nil(t, err) } - c.Assert(cmp.Equal(state, &tc.expected, cmpopts.IgnoreUnexported(GlobalReactorState{}, ChangefeedReactorState{})), check.IsTrue, - check.Commentf("%s", cmp.Diff(state, &tc.expected, cmpopts.IgnoreUnexported(GlobalReactorState{}, ChangefeedReactorState{})))) + require.True(t, cmp.Equal(state, &tc.expected, cmpopts.IgnoreUnexported(GlobalReactorState{}, ChangefeedReactorState{})), + cmp.Diff(state, &tc.expected, cmpopts.IgnoreUnexported(GlobalReactorState{}, ChangefeedReactorState{}))) } } -func (s *stateSuite) TestCaptureChangeHooks(c *check.C) { - defer testleak.AfterTest(c)() +func TestCaptureChangeHooks(t *testing.T) { state := NewGlobalState() var callCount int state.onCaptureAdded = func(captureID model.CaptureID, addr string) { callCount++ - c.Check(captureID, check.Equals, "capture-1") - c.Check(addr, check.Equals, "ip-1:8300") + require.Equal(t, captureID, "capture-1") + require.Equal(t, addr, "ip-1:8300") } state.onCaptureRemoved = func(captureID model.CaptureID) { callCount++ - c.Check(captureID, check.Equals, "capture-1") + require.Equal(t, captureID, "capture-1") } captureInfo := &model.CaptureInfo{ @@ -674,21 +662,20 @@ func (s *stateSuite) TestCaptureChangeHooks(c *check.C) { AdvertiseAddr: "ip-1:8300", } captureInfoBytes, err := json.Marshal(captureInfo) - c.Check(err, check.IsNil) + require.Nil(t, err) err = state.Update(util.NewEtcdKey(etcd.CaptureInfoKeyPrefix+"/capture-1"), captureInfoBytes, false) - c.Check(err, check.IsNil) - c.Check(callCount, check.Equals, 1) + require.Nil(t, err) + require.Equal(t, callCount, 1) err = state.Update(util.NewEtcdKey(etcd.CaptureInfoKeyPrefix+"/capture-1"), nil /* delete */, false) - c.Check(err, check.IsNil) - c.Check(callCount, check.Equals, 2) + require.Nil(t, err) + require.Equal(t, callCount, 2) } -func (s *stateSuite) TestCheckChangefeedNormal(c *check.C) { - defer testleak.AfterTest(c)() +func TestCheckChangefeedNormal(t *testing.T) { state := NewChangefeedReactorState("test1") - stateTester := NewReactorStateTester(c, state, nil) + stateTester := NewReactorStateTester(t, state, nil) state.CheckChangefeedNormal() stateTester.MustApplyPatches() state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { @@ -699,7 +686,7 @@ func (s *stateSuite) TestCheckChangefeedNormal(c *check.C) { }) state.CheckChangefeedNormal() stateTester.MustApplyPatches() - c.Assert(state.Status.ResolvedTs, check.Equals, uint64(1)) + require.Equal(t, state.Status.ResolvedTs, uint64(1)) state.PatchInfo(func(info *model.ChangeFeedInfo) (*model.ChangeFeedInfo, bool, error) { info.AdminJobType = model.AdminStop @@ -711,7 +698,7 @@ func (s *stateSuite) TestCheckChangefeedNormal(c *check.C) { }) state.CheckChangefeedNormal() stateTester.MustApplyPatches() - c.Assert(state.Status.ResolvedTs, check.Equals, uint64(1)) + require.Equal(t, state.Status.ResolvedTs, uint64(1)) state.PatchStatus(func(status *model.ChangeFeedStatus) (*model.ChangeFeedStatus, bool, error) { status.ResolvedTs = 2 @@ -719,5 +706,5 @@ func (s *stateSuite) TestCheckChangefeedNormal(c *check.C) { }) state.CheckChangefeedNormal() stateTester.MustApplyPatches() - c.Assert(state.Status.ResolvedTs, check.Equals, uint64(2)) + require.Equal(t, state.Status.ResolvedTs, uint64(2)) } diff --git a/pkg/orchestrator/reactor_state_tester.go b/pkg/orchestrator/reactor_state_tester.go index 72b5f5e2c0a..5e724616697 100644 --- a/pkg/orchestrator/reactor_state_tester.go +++ b/pkg/orchestrator/reactor_state_tester.go @@ -14,55 +14,57 @@ package orchestrator import ( - "github.com/pingcap/check" + "testing" + "github.com/pingcap/errors" cerrors "github.com/pingcap/tiflow/pkg/errors" "github.com/pingcap/tiflow/pkg/orchestrator/util" + "github.com/stretchr/testify/require" ) // ReactorStateTester is a helper struct for unit-testing an implementer of ReactorState type ReactorStateTester struct { - c *check.C + t *testing.T state ReactorState kvEntries map[string]string } // NewReactorStateTester creates a new ReactorStateTester -func NewReactorStateTester(c *check.C, state ReactorState, initKVEntries map[string]string) *ReactorStateTester { +func NewReactorStateTester(t *testing.T, state ReactorState, initKVEntries map[string]string) *ReactorStateTester { if initKVEntries == nil { initKVEntries = make(map[string]string) } for k, v := range initKVEntries { err := state.Update(util.NewEtcdKey(k), []byte(v), true) - c.Assert(err, check.IsNil) + require.NoError(t, err) } return &ReactorStateTester{ - c: c, + t: t, state: state, kvEntries: initKVEntries, } } // Update is used to update keys in the mocked kv-store. -func (t *ReactorStateTester) Update(key string, value []byte) error { +func (r *ReactorStateTester) Update(key string, value []byte) error { k := util.NewEtcdKey(key) - err := t.state.Update(k, value, false) + err := r.state.Update(k, value, false) if err != nil { return errors.Trace(err) } if value != nil { - t.kvEntries[key] = string(value) + r.kvEntries[key] = string(value) } else { - delete(t.kvEntries, key) + delete(r.kvEntries, key) } return nil } // ApplyPatches calls the GetPatches method on the ReactorState and apply the changes to the mocked kv-store. -func (t *ReactorStateTester) ApplyPatches() error { - patchGroups := t.state.GetPatches() +func (r *ReactorStateTester) ApplyPatches() error { + patchGroups := r.state.GetPatches() for _, patches := range patchGroups { - err := t.applyPatches(patches) + err := r.applyPatches(patches) if err != nil { return err } @@ -70,11 +72,11 @@ func (t *ReactorStateTester) ApplyPatches() error { return nil } -func (t *ReactorStateTester) applyPatches(patches []DataPatch) error { +func (r *ReactorStateTester) applyPatches(patches []DataPatch) error { RetryLoop: for { tmpKVEntries := make(map[util.EtcdKey][]byte) - for k, v := range t.kvEntries { + for k, v := range r.kvEntries { tmpKVEntries[util.NewEtcdKey(k)] = []byte(v) } changedSet := make(map[util.EtcdKey]struct{}) @@ -89,14 +91,14 @@ RetryLoop: } } for k := range changedSet { - err := t.state.Update(k, tmpKVEntries[k], false) + err := r.state.Update(k, tmpKVEntries[k], false) if err != nil { return err } if value := tmpKVEntries[k]; value != nil { - t.kvEntries[k.String()] = string(value) + r.kvEntries[k.String()] = string(value) } else { - delete(t.kvEntries, k.String()) + delete(r.kvEntries, k.String()) } } return nil @@ -104,16 +106,16 @@ RetryLoop: } // MustApplyPatches calls ApplyPatches and must successfully -func (t *ReactorStateTester) MustApplyPatches() { - t.c.Assert(t.ApplyPatches(), check.IsNil) +func (r *ReactorStateTester) MustApplyPatches() { + require.Nil(r.t, r.ApplyPatches()) } // MustUpdate calls Update and must successfully -func (t *ReactorStateTester) MustUpdate(key string, value []byte) { - t.c.Assert(t.Update(key, value), check.IsNil) +func (r *ReactorStateTester) MustUpdate(key string, value []byte) { + require.Nil(r.t, r.Update(key, value)) } // KVEntries returns the contents of the mocked KV store. -func (t *ReactorStateTester) KVEntries() map[string]string { - return t.kvEntries +func (r *ReactorStateTester) KVEntries() map[string]string { + return r.kvEntries }