diff --git a/pkg/ccl/changefeedccl/BUILD.bazel b/pkg/ccl/changefeedccl/BUILD.bazel index cd41fc5367e9..46f84a449e2c 100644 --- a/pkg/ccl/changefeedccl/BUILD.bazel +++ b/pkg/ccl/changefeedccl/BUILD.bazel @@ -138,7 +138,6 @@ go_test( "changefeed_test.go", "encoder_test.go", "event_processing_test.go", - "helpers_tenant_shim_test.go", "helpers_test.go", "main_test.go", "name_test.go", @@ -173,7 +172,6 @@ go_test( "//pkg/cloud", "//pkg/cloud/impl:cloudimpl", "//pkg/clusterversion", - "//pkg/config", "//pkg/gossip", "//pkg/jobs", "//pkg/jobs/jobspb", @@ -182,17 +180,14 @@ go_test( "//pkg/kv/kvclient/kvcoord", "//pkg/kv/kvserver", "//pkg/kv/kvserver/kvserverbase", - "//pkg/kv/kvserver/liveness/livenesspb", "//pkg/kv/kvserver/protectedts", "//pkg/kv/kvserver/protectedts/ptpb", "//pkg/roachpb", - "//pkg/rpc", "//pkg/security/securityassets", "//pkg/security/securitytest", "//pkg/security/username", "//pkg/server", "//pkg/server/serverpb", - "//pkg/server/status", "//pkg/server/telemetry", "//pkg/settings/cluster", "//pkg/spanconfig", @@ -241,7 +236,6 @@ go_test( "//pkg/util/randutil", "//pkg/util/retry", "//pkg/util/span", - "//pkg/util/stop", "//pkg/util/syncutil", "//pkg/util/timeutil", "//pkg/util/timeutil/pgdate", diff --git a/pkg/ccl/changefeedccl/alter_changefeed_test.go b/pkg/ccl/changefeedccl/alter_changefeed_test.go index 81d05b5d9d24..aa9e9ba1679f 100644 --- a/pkg/ccl/changefeedccl/alter_changefeed_test.go +++ b/pkg/ccl/changefeedccl/alter_changefeed_test.go @@ -10,7 +10,6 @@ package changefeedccl import ( "context" - gosql "database/sql" "fmt" "sync/atomic" "testing" @@ -23,7 +22,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/sql/catalog/desctestutils" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" @@ -45,8 +43,8 @@ func TestAlterChangefeedAddTarget(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -75,15 +73,15 @@ func TestAlterChangefeedAddTarget(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedAddTargetFamily(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, FAMILY onlya (a), FAMILY onlyb (b))`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo FAMILY onlya`) @@ -113,15 +111,16 @@ func TestAlterChangefeedAddTargetFamily(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + // TODO: Figure out why this freezes on other sinks (ex: webhook) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAlterChangefeedSwitchFamily(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, FAMILY onlya (a), FAMILY onlyb (b))`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo FAMILY onlya`) @@ -150,15 +149,16 @@ func TestAlterChangefeedSwitchFamily(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + // TODO: Figure out why this freezes on other sinks (ex: cloudstorage) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAlterChangefeedDropTarget(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -185,15 +185,15 @@ func TestAlterChangefeedDropTarget(t *testing.T) { assertPayloads(t, testFeed, nil) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedDropTargetFamily(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, FAMILY onlya (a), FAMILY onlyb (b))`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo FAMILY onlya, foo FAMILY onlyb`) @@ -219,15 +219,15 @@ func TestAlterChangefeedDropTargetFamily(t *testing.T) { } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedSetDiffOption(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`) @@ -250,15 +250,15 @@ func TestAlterChangefeedSetDiffOption(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedUnsetDiffOption(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH diff`) @@ -281,15 +281,16 @@ func TestAlterChangefeedUnsetDiffOption(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + // TODO: Figure out why this fails on other sinks + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAlterChangefeedErrors(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`) @@ -372,15 +373,15 @@ func TestAlterChangefeedErrors(t *testing.T) { ) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedDropAllTargetsError(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -399,15 +400,15 @@ func TestAlterChangefeedDropAllTargetsError(t *testing.T) { ) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedTelemetry(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -435,7 +436,7 @@ func TestAlterChangefeedTelemetry(t *testing.T) { require.Equal(t, int32(1), counts[`changefeed.alter.unset_options.1`]) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } // The purpose of this test is to ensure that the ALTER CHANGEFEED statement @@ -497,8 +498,9 @@ func TestAlterChangefeedChangeSinkTypeError(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) + sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`) @@ -516,18 +518,18 @@ func TestAlterChangefeedChangeSinkTypeError(t *testing.T) { ) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAlterChangefeedChangeSinkURI(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - registry := f.Server().JobRegistry().(*jobs.Registry) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + registry := s.Server.JobRegistry().(*jobs.Registry) ctx := context.Background() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`) @@ -554,19 +556,19 @@ func TestAlterChangefeedChangeSinkURI(t *testing.T) { require.Equal(t, newSinkURI, details.SinkURI) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAlterChangefeedAddTargetErrors(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo (a) SELECT * FROM generate_series(1, 1000)`) - knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs. + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -621,7 +623,7 @@ func TestAlterChangefeedAddTargetErrors(t *testing.T) { // Wait for the high water mark to be non-zero. testutils.SucceedsSoon(t, func() error { - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) job, err := registry.LoadJob(context.Background(), feed.JobID()) require.NoError(t, err) prog := job.Progress() @@ -643,15 +645,15 @@ func TestAlterChangefeedAddTargetErrors(t *testing.T) { ) } - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedDatabaseQualifiedNames(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING)`) sqlDB.Exec(t, `CREATE TABLE movr.users (id INT PRIMARY KEY, name STRING)`) @@ -692,15 +694,15 @@ func TestAlterChangefeedDatabaseQualifiedNames(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedDatabaseScope(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE DATABASE new_movr`) @@ -737,15 +739,15 @@ func TestAlterChangefeedDatabaseScope(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedDatabaseScopeUnqualifiedName(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE DATABASE new_movr`) @@ -786,15 +788,15 @@ func TestAlterChangefeedDatabaseScopeUnqualifiedName(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedColumnFamilyDatabaseScope(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING, FAMILY onlyid (id), FAMILY onlyname (name))`) @@ -831,15 +833,15 @@ func TestAlterChangefeedColumnFamilyDatabaseScope(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedAlterTableName(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE TABLE movr.users (id INT PRIMARY KEY, name STRING)`) sqlDB.Exec(t, @@ -888,15 +890,15 @@ func TestAlterChangefeedAlterTableName(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedInitialScan(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1), (2), (3)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -930,15 +932,15 @@ func TestAlterChangefeedInitialScan(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedNoInitialScan(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1), (2), (3)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -973,7 +975,7 @@ func TestAlterChangefeedNoInitialScan(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedAddTargetsDuringSchemaChangeError(t *testing.T) { @@ -982,11 +984,11 @@ func TestAlterChangefeedAddTargetsDuringSchemaChangeError(t *testing.T) { rnd, _ := randutil.NewPseudoRand() - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) - knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs. + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -1005,7 +1007,7 @@ func TestAlterChangefeedAddTargetsDuringSchemaChangeError(t *testing.T) { testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved = '1s', no_initial_scan`) jobFeed := testFeed.(cdctest.EnterpriseTestFeed) - jobRegistry := f.Server().JobRegistry().(*jobs.Registry) + jobRegistry := s.Server.JobRegistry().(*jobs.Registry) // Kafka feeds are not buffered, so we have to consume messages. g := ctxgroup.WithContext(context.Background()) @@ -1045,13 +1047,13 @@ func TestAlterChangefeedAddTargetsDuringSchemaChangeError(t *testing.T) { var maxCheckpointSize int64 = 100 << 20 // Checkpoint progress frequently, and set the checkpoint size limit. changefeedbase.FrontierCheckpointFrequency.Override( - context.Background(), &f.Server().ClusterSettings().SV, 10*time.Millisecond) + context.Background(), &s.Server.ClusterSettings().SV, 10*time.Millisecond) changefeedbase.FrontierCheckpointMaxBytes.Override( - context.Background(), &f.Server().ClusterSettings().SV, maxCheckpointSize) + context.Background(), &s.Server.ClusterSettings().SV, maxCheckpointSize) // Note the tableSpan to avoid resolved events that leave no gaps fooDesc := desctestutils.TestingGetPublicTableDescriptor( - f.Server().DB(), keys.SystemSQLCodec, "d", "foo") + s.SystemServer.DB(), s.Codec, "d", "foo") tableSpan := fooDesc.PrimaryIndexSpan(keys.SystemSQLCodec) // ShouldSkipResolved should ensure that once the backfill begins, the following resolved events @@ -1118,7 +1120,7 @@ func TestAlterChangefeedAddTargetsDuringSchemaChangeError(t *testing.T) { sqlDB.ExpectErr(t, errMsg, fmt.Sprintf(`ALTER CHANGEFEED %d ADD bar WITH initial_scan`, jobFeed.JobID())) } - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) + cdcTestWithSystem(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedAddTargetsDuringBackfill(t *testing.T) { @@ -1130,8 +1132,8 @@ func TestAlterChangefeedAddTargetsDuringBackfill(t *testing.T) { const maxCheckpointSize = 1 << 20 const numRowsPerTable = 1000 - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo(val INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo (val) SELECT * FROM generate_series(0, $1)`, numRowsPerTable-1) @@ -1139,10 +1141,10 @@ func TestAlterChangefeedAddTargetsDuringBackfill(t *testing.T) { sqlDB.Exec(t, `INSERT INTO bar (val) SELECT * FROM generate_series(0, $1)`, numRowsPerTable-1) fooDesc := desctestutils.TestingGetPublicTableDescriptor( - f.Server().DB(), keys.SystemSQLCodec, "d", "foo") - fooTableSpan := fooDesc.PrimaryIndexSpan(keys.SystemSQLCodec) + s.SystemServer.DB(), s.Codec, "d", "foo") + fooTableSpan := fooDesc.PrimaryIndexSpan(s.Codec) - knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs. + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -1180,11 +1182,11 @@ func TestAlterChangefeedAddTargetsDuringBackfill(t *testing.T) { // Checkpoint progress frequently, and set the checkpoint size limit. changefeedbase.FrontierCheckpointFrequency.Override( - context.Background(), &f.Server().ClusterSettings().SV, 1) + context.Background(), &s.Server.ClusterSettings().SV, 1) changefeedbase.FrontierCheckpointMaxBytes.Override( - context.Background(), &f.Server().ClusterSettings().SV, maxCheckpointSize) + context.Background(), &s.Server.ClusterSettings().SV, maxCheckpointSize) - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved = '100ms'`) g := ctxgroup.WithContext(context.Background()) @@ -1271,15 +1273,15 @@ func TestAlterChangefeedAddTargetsDuringBackfill(t *testing.T) { } } - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) + cdcTestWithSystem(t, testFn, feedTestEnterpriseSinks) } func TestAlterChangefeedUpdateFilter(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) testFeed := feed(t, f, `CREATE CHANGEFEED FOR foo`) @@ -1297,7 +1299,7 @@ func TestAlterChangefeedUpdateFilter(t *testing.T) { feed, ok := testFeed.(cdctest.EnterpriseTestFeed) require.True(t, ok) - require.NoError(t, feed.TickHighWaterMark(f.Server().Clock().Now())) + require.NoError(t, feed.TickHighWaterMark(s.Server.Clock().Now())) require.NoError(t, feed.Pause()) // Try to set an invalid filter (column b is not part of primary key). @@ -1320,7 +1322,7 @@ func TestAlterChangefeedUpdateFilter(t *testing.T) { }) // Pause again, clear out filter and verify we get expected values. - require.NoError(t, feed.TickHighWaterMark(f.Server().Clock().Now())) + require.NoError(t, feed.TickHighWaterMark(s.Server.Clock().Now())) require.NoError(t, feed.Pause()) // Set filter to emit a > 4. We expect to see update row 5, and onward. @@ -1342,5 +1344,5 @@ func TestAlterChangefeedUpdateFilter(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } diff --git a/pkg/ccl/changefeedccl/cdctest/BUILD.bazel b/pkg/ccl/changefeedccl/cdctest/BUILD.bazel index 0a866a6aaee9..72f8ee168100 100644 --- a/pkg/ccl/changefeedccl/cdctest/BUILD.bazel +++ b/pkg/ccl/changefeedccl/cdctest/BUILD.bazel @@ -25,7 +25,6 @@ go_library( "//pkg/sql/catalog", "//pkg/sql/catalog/descs", "//pkg/sql/sem/tree", - "//pkg/testutils/serverutils", "//pkg/util", "//pkg/util/fsm", "//pkg/util/hlc", diff --git a/pkg/ccl/changefeedccl/cdctest/testfeed.go b/pkg/ccl/changefeedccl/cdctest/testfeed.go index 3a1835be3d9c..a0dc830a740e 100644 --- a/pkg/ccl/changefeedccl/cdctest/testfeed.go +++ b/pkg/ccl/changefeedccl/cdctest/testfeed.go @@ -13,7 +13,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" - "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/util/hlc" ) @@ -21,8 +20,6 @@ import ( type TestFeedFactory interface { // Feed creates a new TestFeed. Feed(create string, args ...interface{}) (TestFeed, error) - // Server returns the raw underlying TestServer, if applicable. - Server() serverutils.TestServerInterface } // TestFeedMessage represents one row update or resolved timestamp message from diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 74cd1d273f9d..5c0b71194e95 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -51,7 +51,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server" - "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/server/telemetry" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" @@ -96,8 +95,8 @@ func TestChangefeedBasics(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`) @@ -128,33 +127,31 @@ func TestChangefeedBasics(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) + cdcTest(t, testFn, feedTestForceSink("enterprise")) + cdcTest(t, testFn, feedTestForceSink("webhook")) + cdcTest(t, testFn, feedTestForceSink("pubsub")) + cdcTest(t, testFn, feedTestForceSink("sinkless")) + cdcTest(t, testFn, feedTestForceSink("cloudstorage")) // NB running TestChangefeedBasics, which includes a DELETE, with // cloudStorageTest is a regression test for #36994. } -// TestChangefeedSendError validates that SendErrors do not fail the changefeed -// as they can occur in normal situations such as a cluster update func TestChangefeedIdleness(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + cdcTest(t, func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) changefeedbase.IdleTimeout.Override( - context.Background(), &f.Server().ClusterSettings().SV, 3*time.Second) + context.Background(), &s.Server.ClusterSettings().SV, 3*time.Second) // Idleness functionality is version gated - knobs := f.Server().TestingKnobs().Server.(*server.TestingKnobs) + knobs := s.TestingKnobs.Server.(*server.TestingKnobs) knobs.BinaryVersionOverride = clusterversion.ByKey(clusterversion.ChangefeedIdleness) - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) currentlyIdle := registry.MetricsStruct().JobMetrics[jobspb.TypeChangefeed].CurrentlyIdle waitForIdleCount := func(numIdle int64) { testutils.SucceedsSoon(t, func() error { @@ -194,24 +191,21 @@ func TestChangefeedIdleness(t *testing.T) { `foo: [0]->{"after": {"a": 0}}`, `foo: [1]->{"after": {"a": 1}}`, }) - } - - // Tenant testing disabled due to TestServerInterface being required - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) + }, feedTestEnterpriseSinks) } +// TestChangefeedSendError validates that SendErrors do not fail the changefeed +// as they can occur in normal situations such as a cluster update func TestChangefeedSendError(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + cdcTest(t, func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0)`) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -236,7 +230,7 @@ func TestChangefeedSendError(t *testing.T) { sqlDB.Exec(t, `INSERT INTO foo VALUES (4)`) // Changefeed should've been retried due to the SendError - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) sli, err := registry.MetricsStruct().Changefeed.(*Metrics).getSLIMetrics(defaultSLIScope) require.NoError(t, err) retryCounter := sli.ErrorRetries @@ -254,21 +248,15 @@ func TestChangefeedSendError(t *testing.T) { `foo: [3]->{"after": {"a": 3}}`, `foo: [4]->{"after": {"a": 4}}`, }) - } - - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + }, feedTestEnterpriseSinks) } func TestChangefeedBasicConfluentKafka(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`) @@ -301,15 +289,15 @@ func TestChangefeedBasicConfluentKafka(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestChangefeedDiff(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`) @@ -346,19 +334,14 @@ func TestChangefeedDiff(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedTenants(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - kvServer, kvSQLdb, cleanup := startTestServer(t, feedTestOptions{argsFn: func(args *base.TestServerArgs) { + kvServer, kvSQLdb, cleanup := startTestFullServer(t, feedTestOptions{argsFn: func(args *base.TestServerArgs) { args.ExternalIODirConfig.DisableOutbound = true }}) defer cleanup() @@ -413,13 +396,11 @@ func TestMissingTableErr(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - _, kvSQLdb, cleanup := startTestServer(t, feedTestOptions{argsFn: func(args *base.TestServerArgs) { - args.ExternalIODirConfig.DisableOutbound = true - }}) + s, cleanup := makeServer(t) defer cleanup() t.Run("changefeed on non existing table fails", func(t *testing.T) { - kvSQL := sqlutils.MakeSQLRunner(kvSQLdb) + kvSQL := sqlutils.MakeSQLRunner(s.DB) kvSQL.ExpectErr(t, `^pq: failed to resolve targets in the CHANGEFEED stmt: table "foo" does not exist$`, `CREATE CHANGEFEED FOR foo`, ) @@ -430,31 +411,17 @@ func TestChangefeedTenantsExternalIOEnabled(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - kvServer, _, cleanup := startTestServer(t, feedTestOptions{argsFn: func(args *base.TestServerArgs) { + s, cleanup := makeTenantServer(t, withArgsFn(func(args *base.TestServerArgs) { args.ExternalIODirConfig.DisableOutbound = true - }}) + })) defer cleanup() - tenantArgs := base.TestTenantArgs{ - // crdb_internal.create_tenant called by StartTenant - TenantID: serverutils.TestTenantID(), - UseDatabase: `d`, - TestingKnobs: base.TestingKnobs{ - DistSQL: &execinfra.TestingKnobs{Changefeed: &TestingKnobs{}}, - JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), - }, - } - - tenantServer, tenantDB := serverutils.StartTenant(t, kvServer, tenantArgs) - tenantSQL := sqlutils.MakeSQLRunner(tenantDB) - tenantSQL.ExecMultiple(t, strings.Split(serverSetupStatements, ";")...) + tenantSQL := sqlutils.MakeSQLRunner(s.DB) tenantSQL.Exec(t, `CREATE TABLE foo_in_tenant (pk INT PRIMARY KEY)`) t.Run("sinkful changefeed works", func(t *testing.T) { - f := makeKafkaFeedFactory(&testServerShim{ - TestTenantInterface: tenantServer, - kvServer: kvServer}, - tenantDB) + f, cleanup := makeFeedFactory(t, "kafka", s.Server, s.DB) + defer cleanup() tenantSQL.Exec(t, `INSERT INTO foo_in_tenant VALUES (1)`) feed := feed(t, f, `CREATE CHANGEFEED FOR foo_in_tenant`) defer closeFeed(t, feed) @@ -468,8 +435,8 @@ func TestChangefeedEnvelope(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a')`) @@ -500,17 +467,16 @@ func TestChangefeedEnvelope(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + // some sinks are incompatible with envelope + cdcTest(t, testFn, feedTestRestrictSinks("sinkless", "enterprise", "kafka")) } func TestChangefeedFullTableName(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + cdcTest(t, func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a')`) @@ -519,22 +485,15 @@ func TestChangefeedFullTableName(t *testing.T) { defer closeFeed(t, foo) assertPayloads(t, foo, []string{`d.public.foo: [1]->{"after": {"a": 1, "b": "a"}}`}) }) - } - - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + }) } func TestChangefeedMultiTable(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a')`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY, b STRING)`) @@ -549,19 +508,15 @@ func TestChangefeedMultiTable(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedCursor(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) // To make sure that these timestamps are after 'before' and before @@ -611,20 +566,16 @@ func TestChangefeedCursor(t *testing.T) { } } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedTimestamps(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { ctx := context.Background() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0)`) @@ -655,7 +606,7 @@ func TestChangefeedTimestamps(t *testing.T) { // Assert the remaining key using assertPayloads, since we know the exact // timestamp expected. var ts1 string - if err := crdb.ExecuteTx(ctx, db, nil /* txopts */, func(tx *gosql.Tx) error { + if err := crdb.ExecuteTx(ctx, s.DB, nil /* txopts */, func(tx *gosql.Tx) error { return tx.QueryRow( `INSERT INTO foo VALUES (1) RETURNING cluster_logical_timestamp()`, ).Scan(&ts1) @@ -675,19 +626,15 @@ func TestChangefeedTimestamps(t *testing.T) { } } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedMVCCTimestamps(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE mvcc_timestamp_test_table (id UUID PRIMARY KEY DEFAULT gen_random_uuid())`) rowCount := 5 @@ -707,19 +654,15 @@ func TestChangefeedMVCCTimestamps(t *testing.T) { assertPayloads(t, changeFeed, expectedPayloads) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedResolvedFrequency(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) const freq = 10 * time.Millisecond @@ -741,11 +684,7 @@ func TestChangefeedResolvedFrequency(t *testing.T) { } } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } // Test how Changefeeds react to schema changes that do not require a backfill @@ -764,8 +703,8 @@ func TestChangefeedInitialScan(t *testing.T) { `cursor - with initial backfill`: `CREATE CHANGEFEED FOR initial_scan WITH initial_scan = 'yes', resolved='1s', cursor='%s'`, } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) for testName, changefeedStmt := range noInitialScanTests { t.Run(testName, func(t *testing.T) { @@ -812,25 +751,18 @@ func TestChangefeedInitialScan(t *testing.T) { } } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedBackfillObservability(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) - - knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs. - DistSQL.(*execinfra.TestingKnobs). - Changefeed.(*TestingKnobs) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) - registry := f.Server().JobRegistry().(*jobs.Registry) + knobs := s.TestingKnobs.DistSQL.(*execinfra.TestingKnobs).Changefeed.(*TestingKnobs) + registry := s.Server.JobRegistry().(*jobs.Registry) sli, err := registry.MetricsStruct().Changefeed.(*Metrics).getSLIMetrics(defaultSLIScope) require.NoError(t, err) pendingRanges := sli.BackfillPendingRanges @@ -887,13 +819,14 @@ func TestChangefeedBackfillObservability(t *testing.T) { }) } - t.Run("enterprise", enterpriseTest(testFn, feedTestNoTenants)) + // Can't run on tenants due to lack of SPLIT AT support (#54254) + cdcTest(t, testFn, feedTestNoTenants, feedTestEnterpriseSinks) } func TestChangefeedUserDefinedTypes(t *testing.T) { defer leaktest.AfterTest(t)() - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // Set up a type and table. sqlDB.Exec(t, `CREATE TYPE t AS ENUM ('hello', 'howdy', 'hi')`) @@ -946,11 +879,7 @@ func TestChangefeedUserDefinedTypes(t *testing.T) { } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedExternalIODisabled(t *testing.T) { @@ -988,17 +917,16 @@ func TestChangefeedExternalIODisabled(t *testing.T) { }) withDisabledOutbound := func(args *base.TestServerArgs) { args.ExternalIODirConfig.DisableOutbound = true } - t.Run("sinkless changfeeds are allowed with disabled external io", - sinklessTest(func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, "CREATE TABLE target_table (pk INT PRIMARY KEY)") - sqlDB.Exec(t, "INSERT INTO target_table VALUES (1)") - feed := feed(t, f, "CREATE CHANGEFEED FOR target_table") - defer closeFeed(t, feed) - assertPayloads(t, feed, []string{ - `target_table: [1]->{"after": {"pk": 1}}`, - }) - }, withArgsFn(withDisabledOutbound))) + cdcTestNamed(t, "sinkless changfeeds are allowed with disabled external io", func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) + sqlDB.Exec(t, "CREATE TABLE target_table (pk INT PRIMARY KEY)") + sqlDB.Exec(t, "INSERT INTO target_table VALUES (1)") + feed := feed(t, f, "CREATE CHANGEFEED FOR target_table") + defer closeFeed(t, feed) + assertPayloads(t, feed, []string{ + `target_table: [1]->{"after": {"pk": 1}}`, + }) + }, feedTestForceSink("sinkless"), withArgsFn(withDisabledOutbound)) } // Test how Changefeeds react to schema changes that do not require a backfill @@ -1008,8 +936,8 @@ func TestChangefeedSchemaChangeNoBackfill(t *testing.T) { defer log.Scope(t).Close(t) skip.UnderRace(t, "takes >1 min under race") - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // Schema changes that predate the changefeed. @@ -1181,11 +1109,8 @@ func TestChangefeedSchemaChangeNoBackfill(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) @@ -1204,7 +1129,7 @@ func TestChangefeedLaggingSpanCheckpointing(t *testing.T) { defer log.Scope(t).Close(t) rnd, _ := randutil.NewPseudoRand() - s, db, stopServer := startTestServer(t, feedTestOptions{}) + s, db, stopServer := startTestFullServer(t, feedTestOptions{}) defer stopServer() sqlDB := sqlutils.MakeSQLRunner(db) @@ -1338,11 +1263,11 @@ func TestChangefeedSchemaChangeBackfillCheckpoint(t *testing.T) { // truncation var maxCheckpointSize int64 = 100 << 20 - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) - knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs. + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -1360,7 +1285,7 @@ func TestChangefeedSchemaChangeBackfillCheckpoint(t *testing.T) { // Setup changefeed job details, avoid relying on initial scan functionality baseFeed := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved='100ms', no_initial_scan`) jobFeed := baseFeed.(cdctest.EnterpriseTestFeed) - jobRegistry := f.Server().JobRegistry().(*jobs.Registry) + jobRegistry := s.Server.JobRegistry().(*jobs.Registry) // Ensure events are consumed for sinks that don't buffer (ex: Kafka) g := ctxgroup.WithContext(context.Background()) @@ -1399,14 +1324,14 @@ func TestChangefeedSchemaChangeBackfillCheckpoint(t *testing.T) { // Checkpoint progress frequently, and set the checkpoint size limit. changefeedbase.FrontierCheckpointFrequency.Override( - context.Background(), &f.Server().ClusterSettings().SV, 10*time.Millisecond) + context.Background(), &s.Server.ClusterSettings().SV, 10*time.Millisecond) changefeedbase.FrontierCheckpointMaxBytes.Override( - context.Background(), &f.Server().ClusterSettings().SV, maxCheckpointSize) + context.Background(), &s.Server.ClusterSettings().SV, maxCheckpointSize) // Note the tableSpan to avoid resolved events that leave no gaps fooDesc := desctestutils.TestingGetPublicTableDescriptor( - f.Server().DB(), keys.SystemSQLCodec, "d", "foo") - tableSpan := fooDesc.PrimaryIndexSpan(keys.SystemSQLCodec) + s.SystemServer.DB(), s.Codec, "d", "foo") + tableSpan := fooDesc.PrimaryIndexSpan(s.Codec) // ShouldSkipResolved should ensure that once the backfill begins, the following resolved events // that are for that backfill (are of the timestamp right after the backfill timestamp) resolve some @@ -1554,10 +1479,7 @@ func TestChangefeedSchemaChangeBackfillCheckpoint(t *testing.T) { } } - // TODO(ssd): Tenant testing disabled because of use of DB() - t.Run("enterprise", enterpriseTest(testFn, feedTestNoTenants)) - t.Run("cloudstorage", cloudStorageTest(testFn, feedTestNoTenants)) - t.Run("kafka", kafkaTest(testFn, feedTestNoTenants)) + cdcTestWithSystem(t, testFn, feedTestEnterpriseSinks) log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, @@ -1576,8 +1498,8 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // Expected semantics: @@ -1609,7 +1531,7 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { `add_column_def: [2]->{"after": {"a": 2}}`, }) sqlDB.Exec(t, `ALTER TABLE add_column_def ADD COLUMN b STRING DEFAULT 'd'`) - ts := fetchDescVersionModificationTime(t, db, f, `add_column_def`, 4) + ts := fetchDescVersionModificationTime(t, s, `add_column_def`, 4) // Schema change backfill assertPayloadsStripTs(t, addColumnDef, []string{ `add_column_def: [1]->{"after": {"a": 1}}`, @@ -1639,7 +1561,7 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { `add_col_comp: [1]->{"after": {"a": 1, "b": 6}}`, `add_col_comp: [2]->{"after": {"a": 2, "b": 7}}`, }) - ts := fetchDescVersionModificationTime(t, db, f, `add_col_comp`, 4) + ts := fetchDescVersionModificationTime(t, s, `add_col_comp`, 4) assertPayloads(t, addColComp, []string{ fmt.Sprintf(`add_col_comp: [1]->{"after": {"a": 1, "b": 6, "c": 11}, "updated": "%s"}`, ts.AsOfSystemTime()), @@ -1689,7 +1611,7 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { wg.Wait() return nil } - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.BeforeEmitRow = waitSinkHook @@ -1731,7 +1653,7 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { `multiple_alters: [1]->{"after": {"a": 1, "c": "cee"}}`, `multiple_alters: [2]->{"after": {"a": 2, "c": "cee"}}`, }) - ts := fetchDescVersionModificationTime(t, db, f, `multiple_alters`, 10) + ts := fetchDescVersionModificationTime(t, s, `multiple_alters`, 10) // Changefeed level backfill for ADD COLUMN d. assertPayloads(t, multipleAlters, []string{ // Backfill no-ops for column D (C schema change is complete) @@ -1743,13 +1665,8 @@ func TestChangefeedSchemaChangeAllowBackfill(t *testing.T) { }) } - // TODO(ssd): tenant tests skipped because of f.Server() use - // in fetchDescVersionModificationTime - t.Run(`sinkless`, sinklessTest(testFn, feedTestNoTenants)) - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) - t.Run(`pubsub`, pubsubTest(testFn, feedTestNoTenants)) + cdcTestWithSystem(t, testFn) + log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) @@ -1766,8 +1683,8 @@ func TestChangefeedSchemaChangeBackfillScope(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) t.Run(`add column with default`, func(t *testing.T) { @@ -1784,7 +1701,7 @@ func TestChangefeedSchemaChangeBackfillScope(t *testing.T) { `no_def_change: [3]->{"after": {"a": 3}}`, }) sqlDB.Exec(t, `ALTER TABLE add_column_def ADD COLUMN b STRING DEFAULT 'd'`) - ts := fetchDescVersionModificationTime(t, db, f, `add_column_def`, 4) + ts := fetchDescVersionModificationTime(t, s, `add_column_def`, 4) // Schema change backfill assertPayloadsStripTs(t, combinedFeed, []string{ `add_column_def: [1]->{"after": {"a": 1}}`, @@ -1801,13 +1718,7 @@ func TestChangefeedSchemaChangeBackfillScope(t *testing.T) { } - // TODO(ssd): tenant tests skipped because of f.Server() use - // in fetchDescVerionModifationTime - t.Run(`sinkless`, sinklessTest(testFn, feedTestNoTenants)) - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) - t.Run(`pubsub`, pubsubTest(testFn, feedTestNoTenants)) + cdcTestWithSystem(t, testFn) log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) @@ -1822,14 +1733,14 @@ func TestChangefeedSchemaChangeBackfillScope(t *testing.T) { // fetchDescVersionModificationTime fetches the `ModificationTime` of the specified // `version` of `tableName`'s table descriptor. func fetchDescVersionModificationTime( - t testing.TB, db *gosql.DB, f cdctest.TestFeedFactory, tableName string, version int, + t testing.TB, s TestServerWithSystem, tableName string, version int, ) hlc.Timestamp { - tblKey := keys.SystemSQLCodec.TablePrefix(keys.DescriptorTableID) + tblKey := s.Codec.TablePrefix(keys.DescriptorTableID) header := roachpb.RequestHeader{ Key: tblKey, EndKey: tblKey.PrefixEnd(), } - dropColTblID := sqlutils.QueryTableID(t, db, `d`, "public", tableName) + dropColTblID := sqlutils.QueryTableID(t, s.DB, `d`, "public", tableName) req := &roachpb.ExportRequest{ RequestHeader: header, MVCCFilter: roachpb.MVCCFilter_All, @@ -1839,7 +1750,7 @@ func fetchDescVersionModificationTime( clock := hlc.NewClockWithSystemTimeSource(time.Minute /* maxOffset */) hh := roachpb.Header{Timestamp: clock.Now()} res, pErr := kv.SendWrappedWith(context.Background(), - f.Server().DB().NonTransactionalSender(), hh, req) + s.SystemServer.DB().NonTransactionalSender(), hh, req) if pErr != nil { t.Fatal(pErr.GoError()) } @@ -1856,7 +1767,7 @@ func fetchDescVersionModificationTime( continue } k := it.UnsafeKey() - remaining, _, _, err := keys.SystemSQLCodec.DecodeIndexPrefix(k.Key) + remaining, _, _, err := s.Codec.DecodeIndexPrefix(k.Key) if err != nil { t.Fatal(err) } @@ -1892,8 +1803,8 @@ func TestChangefeedAfterSchemaChangeBackfill(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE after_backfill (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO after_backfill VALUES (0)`) sqlDB.Exec(t, `ALTER TABLE after_backfill ADD COLUMN b INT DEFAULT 1`) @@ -1906,11 +1817,7 @@ func TestChangefeedAfterSchemaChangeBackfill(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) @@ -1926,9 +1833,9 @@ func TestChangefeedEachColumnFamily(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) // Table with 2 column families. sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, c STRING, FAMILY most (a,b), FAMILY only_c (c))`) @@ -1990,21 +1897,15 @@ func TestChangefeedEachColumnFamily(t *testing.T) { } } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedSingleColumnFamily(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) // Table with 2 column families. sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, c STRING, FAMILY most (a,b), FAMILY rest (c))`) @@ -2036,10 +1937,7 @@ func TestChangefeedSingleColumnFamily(t *testing.T) { `foo.rest: [1]->{"after": {"c": "cent"}}`, }) } - - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedSingleColumnFamilySchemaChanges(t *testing.T) { @@ -2050,9 +1948,8 @@ func TestChangefeedSingleColumnFamilySchemaChanges(t *testing.T) { skip.UnderStress(t) skip.UnderRace(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // Table with 2 column families. @@ -2083,18 +1980,15 @@ func TestChangefeedSingleColumnFamilySchemaChanges(t *testing.T) { regexp.MustCompile(`CHANGEFEED targeting nonexistent or removed column family rest of table foo`)) } - - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedEachColumnFamilySchemaChanges(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // Table with 2 column families. @@ -2122,20 +2016,16 @@ func TestChangefeedEachColumnFamilySchemaChanges(t *testing.T) { assertPayloads(t, foo, []string{ `foo.f3: [0]->{"after": {"e": "hello"}}`, }) - } - - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedColumnFamilyAvro(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING, c STRING, FAMILY most (a,b), FAMILY justc (c))`) sqlDB.Exec(t, `INSERT INTO foo values (0, 'dog', 'cat')`) @@ -2145,9 +2035,8 @@ func TestChangefeedColumnFamilyAvro(t *testing.T) { `foo.most: {"a":{"long":0}}->{"after":{"foo_u002e_most":{"a":{"long":0},"b":{"string":"dog"}}}}`, `foo.justc: {"a":{"long":0}}->{"after":{"foo_u002e_justc":{"c":{"string":"cat"}}}}`, }) - } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestChangefeedAuthorization(t *testing.T) { @@ -2171,9 +2060,9 @@ func TestChangefeedAuthorization(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - s, db, stop := startTestServer(t, feedTestOptions{}) + s, stop := makeServer(t) defer stop() - rootDB := sqlutils.MakeSQLRunner(db) + rootDB := sqlutils.MakeSQLRunner(s.DB) rootDB.Exec(t, `create user guest with password 'password'`) rootDB.Exec(t, `create user feedcreator with controlchangefeed password 'hunter2'`) @@ -2181,7 +2070,7 @@ func TestChangefeedAuthorization(t *testing.T) { pgURL := url.URL{ Scheme: "postgres", User: url.UserPassword(`guest`, `password`), - Host: s.ServingSQLAddr(), + Host: s.Server.SQLAddr(), } db2, err := gosql.Open("postgres", pgURL.String()) @@ -2192,7 +2081,7 @@ func TestChangefeedAuthorization(t *testing.T) { pgURL = url.URL{ Scheme: "postgres", User: url.UserPassword(`feedcreator`, `hunter2`), - Host: s.ServingSQLAddr(), + Host: s.Server.SQLAddr(), } db3, err := gosql.Open("postgres", pgURL.String()) @@ -2229,25 +2118,25 @@ func TestChangefeedAvroNotice(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, stop := startTestServer(t, feedTestOptions{}) + s, stop := makeServer(t) defer stop() schemaReg := cdctest.StartTestSchemaRegistry() defer schemaReg.Close() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "CREATE table foo (i int)") sqlDB.Exec(t, `INSERT INTO foo VALUES (0)`) sql := fmt.Sprintf("CREATE CHANGEFEED FOR d.foo INTO 'null://' WITH format=experimental_avro, confluent_schema_registry='%s'", schemaReg.URL()) - expectNotice(t, s, sql, `avro is no longer experimental, use format=avro`) + expectNotice(t, s.Server, sql, `avro is no longer experimental, use format=avro`) } func TestChangefeedOutputTopics(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - pgURL, cleanup := sqlutils.PGUrl(t, f.Server().ServingSQLAddr(), t.Name(), url.User(username.RootUser)) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + pgURL, cleanup := sqlutils.PGUrl(t, s.Server.SQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() pgBase, err := pq.NewConnector(pgURL.String()) if err != nil { @@ -2296,8 +2185,7 @@ func TestChangefeedOutputTopics(t *testing.T) { sqlDB.Exec(t, `CREATE CHANGEFEED FOR ☃ INTO 'kafka://does.not.matter/'`) require.Equal(t, `changefeed will emit to topic _u2603_`, actual) } - - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func requireErrorSoon( @@ -2332,8 +2220,8 @@ func TestChangefeedFailOnTableOffline(t *testing.T) { })) defer dataSrv.Close() - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '50ms'") t.Run("import fails changefeed", func(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE for_import (a INT PRIMARY KEY, b INT)`) @@ -2349,16 +2237,8 @@ func TestChangefeedFailOnTableOffline(t *testing.T) { regexp.MustCompile(`CHANGEFEED cannot target offline table: for_import \(offline reason: "importing"\)`)) }) } - // TODO(ssd): tenant tests skipped because of: - // changefeed_test.go:1409: error executing 'IMPORT INTO - // for_import CSV DATA ($1)': pq: fake protectedts.Provide - t.Run(`sinkless`, sinklessTest(testFn, feedTestNoTenants)) - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) - t.Run(`cloudstorage`, cloudStorageTest(testFn, feedTestNoTenants)) - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) - t.Run(`pubsub`, pubsubTest(testFn, feedTestNoTenants)) + cdcTest(t, testFn) } func TestChangefeedRestartMultiNode(t *testing.T) { @@ -2468,8 +2348,8 @@ func TestChangefeedWorksOnRBRChange(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFnJSON := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFnJSON := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '50ms'") t.Run("regional by row change works", func(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE rbr (a INT PRIMARY KEY, b INT)`) @@ -2489,8 +2369,8 @@ func TestChangefeedWorksOnRBRChange(t *testing.T) { }) }) } - testFnAvro := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFnAvro := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '50ms'") t.Run("regional by row change works", func(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE rbr (a INT PRIMARY KEY, b INT)`) @@ -2535,17 +2415,18 @@ func TestChangefeedWorksOnRBRChange(t *testing.T) { }) } - // Tenants skiped because of: + // Tenants skipped because of: // // error executing 'ALTER DATABASE d PRIMARY REGION // "us-east-1"': pq: get_live_cluster_regions: unimplemented: // operation is unsupported in multi-tenancy mode opts := []feedTestOption{ feedTestNoTenants, + feedTestEnterpriseSinks, withArgsFn(withTestServerRegion), } - RunRandomSinkTest(t, "format=json", testFnJSON, opts...) - t.Run("kafka/format=avro", kafkaTest(testFnAvro, opts...)) + cdcTestNamed(t, "format=json", testFnJSON, opts...) + cdcTestNamed(t, "format=avro", testFnAvro, append(opts, feedTestForceSink("kafka"))...) } func TestChangefeedRBRAvroAddRegion(t *testing.T) { @@ -2609,8 +2490,8 @@ func TestChangefeedStopOnSchemaChange(t *testing.T) { } } } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) // Shorten the intervals so this test doesn't take so long. We need to wait // for timestamps to get resolved. sqlDB.Exec(t, "SET CLUSTER SETTING changefeed.experimental_poll_interval = '200ms'") @@ -2727,11 +2608,7 @@ func TestChangefeedStopOnSchemaChange(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedNoBackfill(t *testing.T) { @@ -2740,8 +2617,8 @@ func TestChangefeedNoBackfill(t *testing.T) { skip.UnderRace(t) skip.UnderShort(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // Shorten the intervals so this test doesn't take so long. We need to wait // for timestamps to get resolved. @@ -2847,19 +2724,15 @@ func TestChangefeedNoBackfill(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedStoredComputedColumn(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE cc ( a INT, b INT AS (a + 1) STORED, c INT AS (a + 2) STORED, PRIMARY KEY (b, a) )`) @@ -2878,11 +2751,7 @@ func TestChangefeedStoredComputedColumn(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedVirtualComputedColumn(t *testing.T) { @@ -2923,8 +2792,8 @@ func TestChangefeedVirtualComputedColumn(t *testing.T) { } for _, test := range tests { - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE cc ( a INT primary key, b INT, c INT AS (b + 1) VIRTUAL NOT NULL @@ -2945,12 +2814,10 @@ func TestChangefeedVirtualComputedColumn(t *testing.T) { } if test.formatOpt != changefeedbase.OptFormatAvro { - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) + cdcTest(t, testFn) + } else { + cdcTest(t, testFn, feedTestForceSink("kafka")) } - - t.Run(`kafka`, kafkaTest(testFn)) } } @@ -2958,8 +2825,8 @@ func TestChangefeedUpdatePrimaryKey(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) // This NOT NULL column checks a regression when used with UPDATE-ing a // primary key column or with DELETE. sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING NOT NULL)`) @@ -2983,11 +2850,7 @@ func TestChangefeedUpdatePrimaryKey(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestChangefeedTruncateOrDrop(t *testing.T) { @@ -3010,9 +2873,9 @@ func TestChangefeedTruncateOrDrop(t *testing.T) { }) } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) - registry := f.Server().JobRegistry().(*jobs.Registry) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) + registry := s.Server.JobRegistry().(*jobs.Registry) metrics := registry.MetricsStruct().Changefeed.(*Metrics) drainUntilErr := func(f cdctest.TestFeed) (err error) { @@ -3055,11 +2918,7 @@ func TestChangefeedTruncateOrDrop(t *testing.T) { assertFailuresCounter(t, metrics, 3) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) // will sometimes fail, non deterministic } @@ -3067,34 +2926,33 @@ func TestChangefeedMonitoring(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`) - s := f.Server() - if c := s.MustGetSQLCounter(`changefeed.emitted_messages`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.emitted_messages`); c != 0 { t.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.emitted_bytes`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.emitted_bytes`); c != 0 { t.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.flushed_bytes`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.flushed_bytes`); c != 0 { t.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.flushes`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.flushes`); c != 0 { t.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.max_behind_nanos`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.max_behind_nanos`); c != 0 { t.Errorf(`expected %d got %d`, 0, c) } - if c := s.MustGetSQLCounter(`changefeed.buffer_entries.in`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.buffer_entries.in`); c != 0 { t.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.buffer_entries.out`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.buffer_entries.out`); c != 0 { t.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.table_metadata_nanos`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.table_metadata_nanos`); c != 0 { t.Errorf(`expected 0 got %d`, c) } @@ -3110,28 +2968,28 @@ func TestChangefeedMonitoring(t *testing.T) { require.NoError(t, err) testutils.SucceedsSoon(t, func() error { - if c := s.MustGetSQLCounter(`changefeed.emitted_messages`); c != 1 { + if c := s.Server.MustGetSQLCounter(`changefeed.emitted_messages`); c != 1 { return errors.Errorf(`expected 1 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.emitted_bytes`); c != 22 { + if c := s.Server.MustGetSQLCounter(`changefeed.emitted_bytes`); c != 22 { return errors.Errorf(`expected 22 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.flushed_bytes`); c != 22 { + if c := s.Server.MustGetSQLCounter(`changefeed.flushed_bytes`); c != 22 { return errors.Errorf(`expected 22 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.flushes`); c <= 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.flushes`); c <= 0 { return errors.Errorf(`expected > 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.running`); c != 1 { + if c := s.Server.MustGetSQLCounter(`changefeed.running`); c != 1 { return errors.Errorf(`expected 1 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.max_behind_nanos`); c <= 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.max_behind_nanos`); c <= 0 { return errors.Errorf(`expected > 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.buffer_entries.in`); c <= 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.buffer_entries.in`); c <= 0 { return errors.Errorf(`expected > 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.buffer_entries.out`); c <= 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.buffer_entries.out`); c <= 0 { return errors.Errorf(`expected > 0 got %d`, c) } return nil @@ -3148,10 +3006,10 @@ func TestChangefeedMonitoring(t *testing.T) { testutils.SucceedsSoon(t, func() error { // We can't assert exactly 4 or 88 in case we get (allowed) duplicates // from RangeFeed. - if c := s.MustGetSQLCounter(`changefeed.emitted_messages`); c < 4 { + if c := s.Server.MustGetSQLCounter(`changefeed.emitted_messages`); c < 4 { return errors.Errorf(`expected >= 4 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.emitted_bytes`); c < 88 { + if c := s.Server.MustGetSQLCounter(`changefeed.emitted_bytes`); c < 88 { return errors.Errorf(`expected >= 88 got %d`, c) } return nil @@ -3162,17 +3020,17 @@ func TestChangefeedMonitoring(t *testing.T) { require.NoError(t, foo.Close()) require.NoError(t, fooCopy.Close()) testutils.SucceedsSoon(t, func() error { - if c := s.MustGetSQLCounter(`changefeed.max_behind_nanos`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.max_behind_nanos`); c != 0 { return errors.Errorf(`expected 0 got %d`, c) } - if c := s.MustGetSQLCounter(`changefeed.running`); c != 0 { + if c := s.Server.MustGetSQLCounter(`changefeed.running`); c != 0 { return errors.Errorf(`expected 0 got %d`, c) } return nil }) } - // TODO(ssd): tenant tests skipped because of f.Server() use - t.Run(`sinkless`, sinklessTest(testFn, feedTestNoTenants)) + + cdcTest(t, testFn, feedTestForceSink("sinkless")) } func TestChangefeedRetryableError(t *testing.T) { @@ -3180,8 +3038,8 @@ func TestChangefeedRetryableError(t *testing.T) { defer log.Scope(t).Close(t) defer utilccl.TestingEnableEnterprise()() - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - knobs := f.Server().TestingKnobs(). + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) var failEmit int64 @@ -3197,7 +3055,7 @@ func TestChangefeedRetryableError(t *testing.T) { } // Set up a new feed and verify that the sink is started up. - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) foo := feed(t, f, `CREATE CHANGEFEED FOR foo`) defer closeFeed(t, foo) @@ -3210,7 +3068,7 @@ func TestChangefeedRetryableError(t *testing.T) { // sink is failing requests. atomic.StoreInt64(&failEmit, 1) sqlDB.Exec(t, `INSERT INTO foo VALUES (2)`) - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) sli, err := registry.MetricsStruct().Changefeed.(*Metrics).getSLIMetrics(defaultSLIScope) require.NoError(t, err) @@ -3257,11 +3115,7 @@ func TestChangefeedRetryableError(t *testing.T) { } } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedJobRetryOnNoInboundStream(t *testing.T) { @@ -3296,7 +3150,7 @@ func TestChangefeedJobRetryOnNoInboundStream(t *testing.T) { defer closeFeed(t, foo) // Verify job progress contains retryable error status. - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := cluster.Server(feedServerID).JobRegistry().(*jobs.Registry) jobID := foo.(cdctest.EnterpriseTestFeed).JobID() testutils.SucceedsSoon(t, func() error { job, err := registry.LoadJob(context.Background(), jobID) @@ -3336,15 +3190,15 @@ func TestChangefeedJobUpdateFailsIfNotClaimed(t *testing.T) { // undo our deletion of the claim ID below. knobs.JobsTestingKnobs.(*jobs.TestingKnobs).IntervalOverrides.Adopt = &adoptionInterval }) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - knobs := f.Server().TestingKnobs().DistSQL.(*execinfra.TestingKnobs).Changefeed.(*TestingKnobs) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + knobs := s.TestingKnobs.DistSQL.(*execinfra.TestingKnobs).Changefeed.(*TestingKnobs) errChan := make(chan error, 1) knobs.HandleDistChangefeedError = func(err error) error { errChan <- err return err } - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b INT)`) sqlDB.Exec(t, `INSERT INTO foo (a, b) VALUES (1, 1)`) @@ -3371,9 +3225,10 @@ func TestChangefeedJobUpdateFailsIfNotClaimed(t *testing.T) { case <-time.After(5 * time.Second): t.Fatal("expected distflow to fail but it hasn't after 5 seconds") } - } - RunRandomSinkTest(t, "fails as expected", testFn, feedTestNoTenants, sessionOverride) + + // TODO: Figure out why this freezes on tenants + cdcTest(t, testFn, sessionOverride, feedTestNoTenants, feedTestEnterpriseSinks) } // TestChangefeedDataTTL ensures that changefeeds fail with an error in the case @@ -3382,13 +3237,13 @@ func TestChangefeedDataTTL(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { // Set a very simple channel-based, wait-and-resume function as the // BeforeEmitRow hook. var shouldWait int32 wait := make(chan struct{}) resume := make(chan struct{}) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.FeedKnobs.BeforeScanRequest = func(_ *kv.Batch) error { @@ -3400,7 +3255,7 @@ func TestChangefeedDataTTL(t *testing.T) { return nil } - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) // Create the data table; it will only contain a // single row with multiple versions. @@ -3456,7 +3311,7 @@ func TestChangefeedDataTTL(t *testing.T) { // Force a GC of the table. This should cause both // versions of the table to be deleted. - forceTableGC(t, f.Server(), sqlDB, "d", "foo") + forceTableGC(t, s.SystemServer, sqlDB, "d", "foo") // Resume our changefeed normally. atomic.StoreInt32(&shouldWait, 0) @@ -3495,10 +3350,9 @@ func TestChangefeedDataTTL(t *testing.T) { // NOTE(ssd): This test doesn't apply to enterprise // changefeeds since enterprise changefeeds create a protected // timestamp before beginning their backfill. - // - // TODO(ssd): Tenant test disabled because this test requires - // the fully TestServerInterface. - t.Run("sinkless", sinklessTest(testFn, feedTestNoTenants)) + // TODO(samiskin): Tenant test disabled because this test requires + // forceTableGC which doesn't work on tenants + cdcTestWithSystem(t, testFn, feedTestForceSink("sinkless"), feedTestNoTenants) } // TestChangefeedSchemaTTL ensures that changefeeds fail with an error in the case @@ -3507,13 +3361,13 @@ func TestChangefeedSchemaTTL(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { // Set a very simple channel-based, wait-and-resume function as the // BeforeEmitRow hook. var shouldWait int32 wait := make(chan struct{}) resume := make(chan struct{}) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.BeforeEmitRow = func(_ context.Context) error { @@ -3525,7 +3379,7 @@ func TestChangefeedSchemaTTL(t *testing.T) { return nil } - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) // Create the data table; it will only contain a single row with multiple // versions. @@ -3558,7 +3412,7 @@ func TestChangefeedSchemaTTL(t *testing.T) { // Force a GC of the table. This should cause both older versions of the // table to be deleted, with the middle version being lost to the changefeed. - forceTableGC(t, f.Server(), sqlDB, "system", "descriptor") + forceTableGC(t, s.SystemServer, sqlDB, "system", "descriptor") // Resume our changefeed normally. atomic.StoreInt32(&shouldWait, 0) @@ -3577,14 +3431,10 @@ func TestChangefeedSchemaTTL(t *testing.T) { } } - // TODO(ssd): tenant tests skipped because of f.Server() use - // in forceTableGC - t.Run("sinkless", sinklessTest(testFn, feedTestNoTenants)) - t.Run("enterprise", enterpriseTest(testFn, feedTestNoTenants)) - t.Run("cloudstorage", cloudStorageTest(testFn, feedTestNoTenants)) - t.Run("kafka", kafkaTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) - t.Run(`pubsub`, pubsubTest(testFn, feedTestNoTenants)) + + // TODO(samiskin): tenant tests skipped because of forceTableGC not working + // with a TestTenantInterface + cdcTestWithSystem(t, testFn, feedTestNoTenants) } func TestChangefeedErrors(t *testing.T) { @@ -4159,14 +4009,14 @@ func TestChangefeedDescription(t *testing.T) { // Intentionally don't use the TestFeedFactory because we want to // control the placeholders. - s, db, stopServer := startTestServer(t, feedTestOptions{}) + s, stopServer := makeServer(t) defer stopServer() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`) - sink, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(username.RootUser)) + sink, cleanup := sqlutils.PGUrl(t, s.Server.SQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() sink.Scheme = changefeedbase.SinkSchemeExperimentalSQL sink.Path = `d` @@ -4190,8 +4040,8 @@ func TestChangefeedPauseUnpause(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`) @@ -4241,11 +4091,7 @@ func TestChangefeedPauseUnpause(t *testing.T) { }) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedPauseUnpauseCursorAndInitialScan(t *testing.T) { @@ -4253,8 +4099,8 @@ func TestChangefeedPauseUnpauseCursorAndInitialScan(t *testing.T) { defer log.Scope(t).Close(t) skip.UnderRaceWithIssue(t, 67565) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`) @@ -4288,24 +4134,20 @@ func TestChangefeedPauseUnpauseCursorAndInitialScan(t *testing.T) { }) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedUpdateProtectedTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { ctx := context.Background() ptsInterval := 50 * time.Millisecond changefeedbase.ProtectTimestampInterval.Override( - context.Background(), &f.Server().ClusterSettings().SV, ptsInterval) + context.Background(), &s.Server.ClusterSettings().SV, ptsInterval) - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "SET CLUSTER SETTING kv.protectedts.poll_interval = '10ms';") sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'") // speeds up the test sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) @@ -4313,10 +4155,10 @@ func TestChangefeedUpdateProtectedTimestamp(t *testing.T) { defer closeFeed(t, foo) fooDesc := desctestutils.TestingGetPublicTableDescriptor( - f.Server().DB(), keys.SystemSQLCodec, "d", "foo") + s.SystemServer.DB(), s.Codec, "d", "foo") - ptp := f.Server().DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider - store, err := f.Server().GetStores().(*kvserver.Stores).GetStore(f.Server().GetFirstStoreID()) + ptp := s.Server.DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider + store, err := s.SystemServer.GetStores().(*kvserver.Stores).GetStore(s.SystemServer.GetFirstStoreID()) require.NoError(t, err) ptsReader := store.GetStoreConfig().ProtectedTimestampReader @@ -4332,7 +4174,7 @@ func TestChangefeedUpdateProtectedTimestamp(t *testing.T) { } mkGetProtections := func(t *testing.T, ptp protectedts.Provider, - srv serverutils.TestServerInterface, ptsReader spanconfig.ProtectedTSReader, + srv serverutils.TestTenantInterface, ptsReader spanconfig.ProtectedTSReader, span roachpb.Span) func() []hlc.Timestamp { return func() (r []hlc.Timestamp) { require.NoError(t, @@ -4352,19 +4194,19 @@ func TestChangefeedUpdateProtectedTimestamp(t *testing.T) { } // Setup helpers on the system.descriptors table. - descriptorTableKey := keys.SystemSQLCodec.TablePrefix(keys.DescriptorTableID) + descriptorTableKey := s.Codec.TablePrefix(keys.DescriptorTableID) descriptorTableSpan := roachpb.Span{ Key: descriptorTableKey, EndKey: descriptorTableKey.PrefixEnd(), } - getDescriptorTableProtection := mkGetProtections(t, ptp, f.Server(), ptsReader, + getDescriptorTableProtection := mkGetProtections(t, ptp, s.Server, ptsReader, descriptorTableSpan) // Setup helpers on the user table. - tableKey := keys.SystemSQLCodec.TablePrefix(uint32(fooDesc.GetID())) + tableKey := s.Codec.TablePrefix(uint32(fooDesc.GetID())) tableSpan := roachpb.Span{ Key: tableKey, EndKey: tableKey.PrefixEnd(), } - getTableProtection := mkGetProtections(t, ptp, f.Server(), ptsReader, tableSpan) + getTableProtection := mkGetProtections(t, ptp, s.Server, ptsReader, tableSpan) waitForProtectionAdvanced := func(ts hlc.Timestamp, getProtection func() []hlc.Timestamp) { check := func(protections []hlc.Timestamp) error { if len(protections) == 0 { @@ -4390,9 +4232,7 @@ func TestChangefeedUpdateProtectedTimestamp(t *testing.T) { } } - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) - t.Run(`kafka`, kafkaTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) + cdcTestWithSystem(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedProtectedTimestamps(t *testing.T) { @@ -4444,7 +4284,7 @@ func TestChangefeedProtectedTimestamps(t *testing.T) { return nil }) mkGetProtections = func(t *testing.T, ptp protectedts.Provider, - srv serverutils.TestServerInterface, ptsReader spanconfig.ProtectedTSReader, + srv serverutils.TestTenantInterface, ptsReader spanconfig.ProtectedTSReader, span roachpb.Span) func() []hlc.Timestamp { return func() (r []hlc.Timestamp) { require.NoError(t, @@ -4475,124 +4315,123 @@ func TestChangefeedProtectedTimestamps(t *testing.T) { } ) - t.Run(`enterprise`, enterpriseTest( - func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - defer close(done) - sqlDB := sqlutils.MakeSQLRunner(db) - sqlDB.Exec(t, `SET CLUSTER SETTING kv.protectedts.poll_interval = '10ms';`) - sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms';`) - sqlDB.Exec(t, `ALTER RANGE default CONFIGURE ZONE USING gc.ttlseconds = 100`) - sqlDB.Exec(t, `ALTER RANGE system CONFIGURE ZONE USING gc.ttlseconds = 100`) - sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) - sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) + sqlDB.Exec(t, `SET CLUSTER SETTING kv.protectedts.poll_interval = '10ms';`) + sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms';`) + sqlDB.Exec(t, `ALTER RANGE default CONFIGURE ZONE USING gc.ttlseconds = 100`) + sqlDB.Exec(t, `ALTER RANGE system CONFIGURE ZONE USING gc.ttlseconds = 100`) + sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) + sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`) - var tableID int - sqlDB.QueryRow(t, `SELECT table_id FROM crdb_internal.tables `+ - `WHERE name = 'foo' AND database_name = current_database()`). - Scan(&tableID) + var tableID int + sqlDB.QueryRow(t, `SELECT table_id FROM crdb_internal.tables `+ + `WHERE name = 'foo' AND database_name = current_database()`). + Scan(&tableID) - changefeedbase.ProtectTimestampInterval.Override( - context.Background(), &f.Server().ClusterSettings().SV, 100*time.Millisecond) + changefeedbase.ProtectTimestampInterval.Override( + context.Background(), &s.Server.ClusterSettings().SV, 100*time.Millisecond) - ptp := f.Server().DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider - store, err := f.Server().GetStores().(*kvserver.Stores).GetStore(f.Server().GetFirstStoreID()) - require.NoError(t, err) - ptsReader := store.GetStoreConfig().ProtectedTimestampReader + ptp := s.Server.DistSQLServer().(*distsql.ServerImpl).ServerConfig.ProtectedTimestampProvider + store, err := s.SystemServer.GetStores().(*kvserver.Stores).GetStore(s.SystemServer.GetFirstStoreID()) + require.NoError(t, err) + ptsReader := store.GetStoreConfig().ProtectedTimestampReader - // Setup helpers on the system.descriptors table. - descriptorTableKey := keys.SystemSQLCodec.TablePrefix(keys.DescriptorTableID) - descriptorTableSpan := roachpb.Span{ - Key: descriptorTableKey, EndKey: descriptorTableKey.PrefixEnd(), - } - getDescriptorTableProtection := mkGetProtections(t, ptp, f.Server(), ptsReader, - descriptorTableSpan) - waitForDescriptorTableProtection := mkWaitForProtectionCond(t, getDescriptorTableProtection, - checkProtection) - waitForNoDescriptorTableProtection := mkWaitForProtectionCond(t, getDescriptorTableProtection, - checkNoProtection) - - // Setup helpers on the user table. - tableKey := keys.SystemSQLCodec.TablePrefix(uint32(tableID)) - tableSpan := roachpb.Span{ - Key: tableKey, EndKey: tableKey.PrefixEnd(), - } - getTableProtection := mkGetProtections(t, ptp, f.Server(), ptsReader, tableSpan) - waitForTableProtection := mkWaitForProtectionCond(t, getTableProtection, checkProtection) - waitForNoTableProtection := mkWaitForProtectionCond(t, getTableProtection, checkNoProtection) - waitForBlocked := requestBlockedScan() - waitForProtectionAdvanced := func(ts hlc.Timestamp, getProtection func() []hlc.Timestamp) { - check := func(protections []hlc.Timestamp) error { - if len(protections) != 0 { - for _, p := range protections { - if p.LessEq(ts) { - return errors.Errorf("expected protected timestamp to exceed %v, found %v", ts, p) - } + // Setup helpers on the system.descriptors table. + descriptorTableKey := s.Codec.TablePrefix(keys.DescriptorTableID) + descriptorTableSpan := roachpb.Span{ + Key: descriptorTableKey, EndKey: descriptorTableKey.PrefixEnd(), + } + getDescriptorTableProtection := mkGetProtections(t, ptp, s.Server, ptsReader, + descriptorTableSpan) + waitForDescriptorTableProtection := mkWaitForProtectionCond(t, getDescriptorTableProtection, + checkProtection) + waitForNoDescriptorTableProtection := mkWaitForProtectionCond(t, getDescriptorTableProtection, + checkNoProtection) + + // Setup helpers on the user table. + tableKey := s.Codec.TablePrefix(uint32(tableID)) + tableSpan := roachpb.Span{ + Key: tableKey, EndKey: tableKey.PrefixEnd(), + } + getTableProtection := mkGetProtections(t, ptp, s.Server, ptsReader, tableSpan) + waitForTableProtection := mkWaitForProtectionCond(t, getTableProtection, checkProtection) + waitForNoTableProtection := mkWaitForProtectionCond(t, getTableProtection, checkNoProtection) + waitForBlocked := requestBlockedScan() + waitForProtectionAdvanced := func(ts hlc.Timestamp, getProtection func() []hlc.Timestamp) { + check := func(protections []hlc.Timestamp) error { + if len(protections) != 0 { + for _, p := range protections { + if p.LessEq(ts) { + return errors.Errorf("expected protected timestamp to exceed %v, found %v", ts, p) } } - return nil } - - mkWaitForProtectionCond(t, getProtection, check)() + return nil } - foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved`) - defer closeFeed(t, foo) - { - // Ensure that there's a protected timestamp on startup that goes - // away after the initial scan. - unblock := waitForBlocked() - waitForTableProtection() - unblock() - assertPayloads(t, foo, []string{ - `foo: [1]->{"after": {"a": 1, "b": "a"}}`, - `foo: [2]->{"after": {"a": 2, "b": "b"}}`, - `foo: [4]->{"after": {"a": 4, "b": "c"}}`, - `foo: [7]->{"after": {"a": 7, "b": "d"}}`, - `foo: [8]->{"after": {"a": 8, "b": "e"}}`, - }) - resolved, _ := expectResolvedTimestamp(t, foo) - waitForProtectionAdvanced(resolved, getTableProtection) - } + mkWaitForProtectionCond(t, getProtection, check)() + } - { - // Ensure that a protected timestamp is created for a backfill due - // to a schema change and removed after. - waitForBlocked = requestBlockedScan() - sqlDB.Exec(t, `ALTER TABLE foo ADD COLUMN c INT NOT NULL DEFAULT 1`) - unblock := waitForBlocked() - waitForTableProtection() - waitForDescriptorTableProtection() - unblock() - assertPayloads(t, foo, []string{ - `foo: [1]->{"after": {"a": 1, "b": "a", "c": 1}}`, - `foo: [2]->{"after": {"a": 2, "b": "b", "c": 1}}`, - `foo: [4]->{"after": {"a": 4, "b": "c", "c": 1}}`, - `foo: [7]->{"after": {"a": 7, "b": "d", "c": 1}}`, - `foo: [8]->{"after": {"a": 8, "b": "e", "c": 1}}`, - }) - resolved, _ := expectResolvedTimestamp(t, foo) - waitForProtectionAdvanced(resolved, getTableProtection) - waitForProtectionAdvanced(resolved, getDescriptorTableProtection) - } + foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved`) + defer closeFeed(t, foo) + { + // Ensure that there's a protected timestamp on startup that goes + // away after the initial scan. + unblock := waitForBlocked() + waitForTableProtection() + unblock() + assertPayloads(t, foo, []string{ + `foo: [1]->{"after": {"a": 1, "b": "a"}}`, + `foo: [2]->{"after": {"a": 2, "b": "b"}}`, + `foo: [4]->{"after": {"a": 4, "b": "c"}}`, + `foo: [7]->{"after": {"a": 7, "b": "d"}}`, + `foo: [8]->{"after": {"a": 8, "b": "e"}}`, + }) + resolved, _ := expectResolvedTimestamp(t, foo) + waitForProtectionAdvanced(resolved, getTableProtection) + } - { - // Ensure that the protected timestamp is removed when the job is - // canceled. - waitForBlocked = requestBlockedScan() - sqlDB.Exec(t, `ALTER TABLE foo ADD COLUMN d INT NOT NULL DEFAULT 2`) - _ = waitForBlocked() - waitForTableProtection() - waitForDescriptorTableProtection() - sqlDB.Exec(t, `CANCEL JOB $1`, foo.(cdctest.EnterpriseTestFeed).JobID()) - waitForNoTableProtection() - waitForNoDescriptorTableProtection() - } - }, feedTestNoTenants, withArgsFn(func(args *base.TestServerArgs) { - storeKnobs := &kvserver.StoreTestingKnobs{} - storeKnobs.TestingRequestFilter = requestFilter - args.Knobs.Store = storeKnobs - }, - ))) + { + // Ensure that a protected timestamp is created for a backfill due + // to a schema change and removed after. + waitForBlocked = requestBlockedScan() + sqlDB.Exec(t, `ALTER TABLE foo ADD COLUMN c INT NOT NULL DEFAULT 1`) + unblock := waitForBlocked() + waitForTableProtection() + waitForDescriptorTableProtection() + unblock() + assertPayloads(t, foo, []string{ + `foo: [1]->{"after": {"a": 1, "b": "a", "c": 1}}`, + `foo: [2]->{"after": {"a": 2, "b": "b", "c": 1}}`, + `foo: [4]->{"after": {"a": 4, "b": "c", "c": 1}}`, + `foo: [7]->{"after": {"a": 7, "b": "d", "c": 1}}`, + `foo: [8]->{"after": {"a": 8, "b": "e", "c": 1}}`, + }) + resolved, _ := expectResolvedTimestamp(t, foo) + waitForProtectionAdvanced(resolved, getTableProtection) + waitForProtectionAdvanced(resolved, getDescriptorTableProtection) + } + + { + // Ensure that the protected timestamp is removed when the job is + // canceled. + waitForBlocked = requestBlockedScan() + sqlDB.Exec(t, `ALTER TABLE foo ADD COLUMN d INT NOT NULL DEFAULT 2`) + _ = waitForBlocked() + waitForTableProtection() + waitForDescriptorTableProtection() + sqlDB.Exec(t, `CANCEL JOB $1`, foo.(cdctest.EnterpriseTestFeed).JobID()) + waitForNoTableProtection() + waitForNoDescriptorTableProtection() + } + } + + cdcTestWithSystem(t, testFn, feedTestNoTenants, feedTestEnterpriseSinks, withArgsFn(func(args *base.TestServerArgs) { + storeKnobs := &kvserver.StoreTestingKnobs{} + storeKnobs.TestingRequestFilter = requestFilter + args.Knobs.Store = storeKnobs + })) } func TestChangefeedProtectedTimestampOnPause(t *testing.T) { @@ -4600,8 +4439,8 @@ func TestChangefeedProtectedTimestampOnPause(t *testing.T) { defer log.Scope(t).Close(t) testFn := func(shouldPause bool) cdcTestFn { - return func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + return func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'a'), (2, 'b'), (4, 'c'), (7, 'd'), (8, 'e')`) @@ -4627,7 +4466,7 @@ func TestChangefeedProtectedTimestampOnPause(t *testing.T) { // Pause the job then ensure that it has a reasonable protected timestamp. ctx := context.Background() - serverCfg := f.Server().DistSQLServer().(*distsql.ServerImpl).ServerConfig + serverCfg := s.Server.DistSQLServer().(*distsql.ServerImpl).ServerConfig jr := serverCfg.JobRegistry pts := serverCfg.ProtectedTimestampProvider @@ -4673,11 +4512,7 @@ func TestChangefeedProtectedTimestampOnPause(t *testing.T) { } testutils.RunTrueAndFalse(t, "protect_on_pause", func(t *testing.T, shouldPause bool) { - t.Run(`enterprise`, enterpriseTest(testFn(shouldPause), feedTestNoTenants)) - t.Run(`cloudstorage`, cloudStorageTest(testFn(shouldPause), feedTestNoTenants)) - t.Run(`kafka`, kafkaTest(testFn(shouldPause), feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn(shouldPause), feedTestNoTenants)) - t.Run(`pubsub`, pubsubTest(testFn(shouldPause), feedTestNoTenants)) + cdcTest(t, testFn(shouldPause), feedTestEnterpriseSinks) }) } @@ -4686,8 +4521,8 @@ func TestManyChangefeedsOneTable(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'init')`) @@ -4731,20 +4566,15 @@ func TestManyChangefeedsOneTable(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } func TestUnspecifiedPrimaryKey(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT)`) var id0 int sqlDB.QueryRow(t, `INSERT INTO foo VALUES (0) RETURNING rowid`).Scan(&id0) @@ -4761,10 +4591,7 @@ func TestUnspecifiedPrimaryKey(t *testing.T) { }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } // TestChangefeedNodeShutdown ensures that an enterprise changefeed continues @@ -4832,8 +4659,8 @@ func TestChangefeedTelemetry(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -4869,8 +4696,8 @@ func TestChangefeedTelemetry(t *testing.T) { require.Equal(t, int32(1), counts[`changefeed.create.num_tables.2`]) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("sinkless")) + cdcTest(t, testFn, feedTestForceSink("enterprise")) } // Regression test for #41694. @@ -4881,8 +4708,8 @@ func TestChangefeedRestartDuringBackfill(t *testing.T) { // TODO(yevgeniy): Rework this test. It's too brittle. - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - knobs := f.Server().TestingKnobs(). + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) beforeEmitRowCh := make(chan error, 20) @@ -4901,7 +4728,7 @@ func TestChangefeedRestartDuringBackfill(t *testing.T) { } } - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0), (1), (2), (3)`) @@ -4990,7 +4817,8 @@ func TestChangefeedRestartDuringBackfill(t *testing.T) { } knobs.Store.(*kvserver.StoreTestingKnobs).UseSystemConfigSpanForQueues = true }) - t.Run(`kafka`, kafkaTest(testFn, useSysCfgInKV)) + + cdcTest(t, testFn, feedTestForceSink("kafka"), useSysCfgInKV) } func TestChangefeedHandlesDrainingNodes(t *testing.T) { @@ -5049,7 +4877,8 @@ func TestChangefeedHandlesDrainingNodes(t *testing.T) { // Create a factory which executes the CREATE CHANGEFEED statement on server 0. // This statement should fail, but the job itself ought to be creaated. // After some time, that job should be adopted by another node, and executed successfully. - f := makeCloudFeedFactory(tc.Server(1), tc.ServerConn(0), sinkDir) + f, closeSink := makeFeedFactory(t, randomSinkType(feedTestEnterpriseSinks), tc.Server(1), tc.ServerConn(0)) + defer closeSink() feed := feed(t, f, "CREATE CHANGEFEED FOR foo") defer closeFeed(t, feed) @@ -5082,8 +4911,8 @@ func TestChangefeedPrimaryKeyChangeWorks(t *testing.T) { skip.UnderRace(t) skip.UnderShort(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING NOT NULL)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`) @@ -5164,12 +4993,7 @@ INSERT INTO foo VALUES (1, 'f'); }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } // Primary key changes are supported by changefeeds starting in 21.1. This test @@ -5187,8 +5011,8 @@ func TestChangefeedPrimaryKeyChangeWorksWithMultipleTables(t *testing.T) { skip.UnderRace(t) skip.UnderShort(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING NOT NULL)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`) @@ -5266,12 +5090,7 @@ INSERT INTO bar VALUES (6, 'f'); }) } - t.Run(`sinkless`, sinklessTest(testFn)) - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn) } // TestChangefeedCheckpointSchemaChange tests to make sure that writes that @@ -5289,8 +5108,8 @@ func TestChangefeedCheckpointSchemaChange(t *testing.T) { skip.UnderRace(t) skip.UnderShort(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING NOT NULL)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY, b STRING NOT NULL)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) @@ -5318,7 +5137,7 @@ func TestChangefeedCheckpointSchemaChange(t *testing.T) { `bar: [0]->{"after": {"a": 0, "b": "initial"}}`, }) - require.NoError(t, crdb.ExecuteTx(context.Background(), db, nil, func(tx *gosql.Tx) error { + require.NoError(t, crdb.ExecuteTx(context.Background(), s.DB, nil, func(tx *gosql.Tx) error { for _, stmt := range []string{ `CREATE TABLE baz ()`, `INSERT INTO foo VALUES (2, 'initial')`, @@ -5416,14 +5235,9 @@ func TestChangefeedCheckpointSchemaChange(t *testing.T) { require.NotNil(t, next.Resolved) } }) - } - t.Run("enterprise", enterpriseTest(testFn)) - t.Run("cloudstorage", cloudStorageTest(testFn)) - t.Run("kafka", kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedBackfillCheckpoint(t *testing.T) { @@ -5449,17 +5263,17 @@ func TestChangefeedBackfillCheckpoint(t *testing.T) { return err } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) valRange := []int{1, 1000} sqlDB.Exec(t, `CREATE TABLE foo(a INT PRIMARY KEY)`) sqlDB.Exec(t, fmt.Sprintf(`INSERT INTO foo (a) SELECT * FROM generate_series(%d, %d)`, valRange[0], valRange[1])) fooDesc := desctestutils.TestingGetPublicTableDescriptor( - f.Server().DB(), keys.SystemSQLCodec, "d", "foo") - tableSpan := fooDesc.PrimaryIndexSpan(keys.SystemSQLCodec) + s.SystemServer.DB(), s.Codec, "d", "foo") + tableSpan := fooDesc.PrimaryIndexSpan(s.Codec) - knobs := f.Server().(*server.TestServer).Cfg.TestingKnobs. + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -5492,11 +5306,11 @@ func TestChangefeedBackfillCheckpoint(t *testing.T) { // Checkpoint progress frequently, and set the checkpoint size limit. changefeedbase.FrontierCheckpointFrequency.Override( - context.Background(), &f.Server().ClusterSettings().SV, 1) + context.Background(), &s.Server.ClusterSettings().SV, 1) changefeedbase.FrontierCheckpointMaxBytes.Override( - context.Background(), &f.Server().ClusterSettings().SV, maxCheckpointSize) + context.Background(), &s.Server.ClusterSettings().SV, maxCheckpointSize) - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH resolved='100ms'`) // Some test feeds (kafka) are not buffered, so we have to consume messages. var shouldDrain int32 = 1 @@ -5607,9 +5421,7 @@ func TestChangefeedBackfillCheckpoint(t *testing.T) { // TODO(ssd): Tenant testing disabled because of use of DB() for _, sz := range []int64{100 << 20, 100} { maxCheckpointSize = sz - t.Run(fmt.Sprintf("enterprise-limit=%s", humanize.Bytes(uint64(sz))), enterpriseTest(testFn, feedTestNoTenants)) - t.Run(fmt.Sprintf("cloudstorage-limit=%s", humanize.Bytes(uint64(sz))), cloudStorageTest(testFn, feedTestNoTenants)) - t.Run(fmt.Sprintf("kafka-limit=%s", humanize.Bytes(uint64(sz))), kafkaTest(testFn, feedTestNoTenants)) + cdcTestNamedWithSystem(t, fmt.Sprintf("limit=%s", humanize.Bytes(uint64(sz))), testFn, feedTestEnterpriseSinks) } } @@ -5669,8 +5481,8 @@ func TestChangefeedOrderingWithErrors(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) foo := feed(t, f, `CREATE CHANGEFEED FOR foo WITH updated`) @@ -5718,20 +5530,20 @@ func TestChangefeedOrderingWithErrors(t *testing.T) { // only used for webhook sink for now since it's the only testfeed where // we can control the ordering of errors - t.Run(`webhook`, webhookTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("webhook")) } func TestChangefeedOnErrorOption(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) t.Run(`pause on error`, func(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.BeforeEmitRow = func(_ context.Context) error { @@ -5748,7 +5560,7 @@ func TestChangefeedOnErrorOption(t *testing.T) { // Verify job progress contains paused on error status. jobID := foo.(cdctest.EnterpriseTestFeed).JobID() - registry := f.Server().JobRegistry().(*jobs.Registry) + registry := s.Server.JobRegistry().(*jobs.Registry) job, err := registry.LoadJob(context.Background(), jobID) require.NoError(t, err) require.Contains(t, job.Progress().RunningStatus, "job failed (should fail with custom error) but is being paused because of on_error=pause") @@ -5774,7 +5586,7 @@ func TestChangefeedOnErrorOption(t *testing.T) { t.Run(`fail on error`, func(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY, b STRING)`) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.BeforeEmitRow = func(_ context.Context) error { @@ -5794,7 +5606,7 @@ func TestChangefeedOnErrorOption(t *testing.T) { t.Run(`default`, func(t *testing.T) { sqlDB.Exec(t, `CREATE TABLE quux (a INT PRIMARY KEY, b STRING)`) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.BeforeEmitRow = func(_ context.Context) error { @@ -5813,25 +5625,17 @@ func TestChangefeedOnErrorOption(t *testing.T) { }) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestDistSenderRangeFeedPopulatesVirtualTable(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, _ := serverutils.StartServer(t, base.TestServerArgs{ - Knobs: base.TestingKnobs{ - JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), - }, - }) - defer s.Stopper().Stop(context.Background()) + s, cleanup := makeServer(t) + defer cleanup() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled='true';`) sqlDB.Exec(t, `CREATE TABLE tbl (a INT, b STRING);`) sqlDB.Exec(t, `INSERT INTO tbl VALUES (1, 'one'), (2, 'two'), (3, 'three');`) @@ -5839,9 +5643,11 @@ func TestDistSenderRangeFeedPopulatesVirtualTable(t *testing.T) { var tableID int sqlDB.QueryRow(t, "SELECT table_id FROM crdb_internal.tables WHERE name='tbl'").Scan(&tableID) + tableKey := s.Codec.TablePrefix(uint32(tableID)) + numRangesQuery := fmt.Sprintf( - "SELECT count(*) FROM crdb_internal.active_range_feeds WHERE range_start LIKE '/Table/%d/%%'", - tableID) + "SELECT count(*) FROM crdb_internal.active_range_feeds WHERE range_start LIKE '%s/%%'", + tableKey) sqlDB.CheckQueryResultsRetry(t, numRangesQuery, [][]string{{"1"}}) } @@ -5850,8 +5656,8 @@ func TestChangefeedCaseInsensitiveOpts(t *testing.T) { defer log.Scope(t).Close(t) // Sanity check for case insensitive options - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) // Set up a type and table. sqlDB.Exec(t, `CREATE TABLE insensitive (x INT PRIMARY KEY, y string)`) sqlDB.Exec(t, `INSERT INTO insensitive VALUES (0, 'hello')`) @@ -5881,15 +5687,17 @@ func TestChangefeedCaseInsensitiveOpts(t *testing.T) { assertPayloads(t, cf, []string{`insensitive: [0]->{"after": {"x": 0, "y": "hello"}}`}) }) } - t.Run(`sinkless`, sinklessTest(testFn)) + + // Some sinks are incompatible with envelope + cdcTest(t, testFn, feedTestRestrictSinks("sinkless", "enterprise", "kafka")) } func TestChangefeedEndTime(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - knobs := f.Server().TestingKnobs(). + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) endTimeReached := make(chan struct{}) @@ -5902,12 +5710,12 @@ func TestChangefeedEndTime(t *testing.T) { } } - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "CREATE TABLE foo (a INT PRIMARY KEY)") sqlDB.Exec(t, "INSERT INTO foo VALUES (1), (2), (3)") - fakeEndTime := f.Server().Clock().Now().Add(int64(time.Hour), 0).AsOfSystemTime() + fakeEndTime := s.Server.Clock().Now().Add(int64(time.Hour), 0).AsOfSystemTime() feed := feed(t, f, "CREATE CHANGEFEED FOR foo WITH end_time = $1", fakeEndTime) defer closeFeed(t, feed) @@ -5924,17 +5732,16 @@ func TestChangefeedEndTime(t *testing.T) { return s == jobs.StatusSucceeded })) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedEndTimeWithCursor(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - knobs := f.Server().TestingKnobs(). + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) endTimeReached := make(chan struct{}) @@ -5947,7 +5754,7 @@ func TestChangefeedEndTimeWithCursor(t *testing.T) { } } - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "CREATE TABLE foo (a INT PRIMARY KEY)") sqlDB.Exec(t, "INSERT INTO foo VALUES (1), (2), (3)") @@ -5956,7 +5763,7 @@ func TestChangefeedEndTimeWithCursor(t *testing.T) { sqlDB.QueryRow(t, "SELECT (cluster_logical_timestamp())").Scan(&tsCursor) sqlDB.Exec(t, "INSERT INTO foo VALUES (4), (5), (6)") - fakeEndTime := f.Server().Clock().Now().Add(int64(time.Hour), 0).AsOfSystemTime() + fakeEndTime := s.Server.Clock().Now().Add(int64(time.Hour), 0).AsOfSystemTime() feed := feed(t, f, "CREATE CHANGEFEED FOR foo WITH cursor = $1, end_time = $2, no_initial_scan", tsCursor, fakeEndTime) defer closeFeed(t, feed) @@ -5972,9 +5779,10 @@ func TestChangefeedEndTimeWithCursor(t *testing.T) { return s == jobs.StatusSucceeded })) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + + // TODO: Fix sinkless feeds not providing pre-close events if Next is called + // after the feed was closed + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedOnlyInitialScan(t *testing.T) { @@ -5986,8 +5794,8 @@ func TestChangefeedOnlyInitialScan(t *testing.T) { `initial backfill only`: `CREATE CHANGEFEED FOR foo WITH initial_scan = 'only'`, } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) for testName, changefeedStmt := range initialScanOnlyTests { t.Run(testName, func(t *testing.T) { @@ -6028,9 +5836,8 @@ func TestChangefeedOnlyInitialScan(t *testing.T) { }) } } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedOnlyInitialScanCSV(t *testing.T) { @@ -6070,8 +5877,8 @@ func TestChangefeedOnlyInitialScanCSV(t *testing.T) { }, } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) for testName, testData := range tests { t.Run(testName, func(t *testing.T) { @@ -6117,11 +5924,8 @@ func TestChangefeedOnlyInitialScanCSV(t *testing.T) { }) } } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`webhook`, webhookTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + + cdcTest(t, testFn, feedTestEnterpriseSinks) } func TestChangefeedOnlyInitialScanCSVSinkless(t *testing.T) { @@ -6133,8 +5937,8 @@ func TestChangefeedOnlyInitialScanCSVSinkless(t *testing.T) { `initial backfill only with csv`: `CREATE CHANGEFEED FOR foo WITH initial_scan = 'only', format = csv`, } - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) for testName, changefeedStmt := range initialScanOnlyCSVTests { t.Run(testName, func(t *testing.T) { @@ -6173,15 +5977,16 @@ func TestChangefeedOnlyInitialScanCSVSinkless(t *testing.T) { }) } } - t.Run(`sinkless`, sinklessTest(testFn)) + + cdcTest(t, testFn, feedTestForceSink("sinkless")) } func TestChangefeedPrimaryKeyFilter(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, "CREATE TABLE foo (a INT PRIMARY KEY, b string)") sqlDB.Exec(t, "CREATE TABLE bar (a INT PRIMARY KEY, b string)") sqlDB.Exec(t, "INSERT INTO foo SELECT * FROM generate_series(1, 20)") @@ -6220,9 +6025,7 @@ func TestChangefeedPrimaryKeyFilter(t *testing.T) { }) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn) } func startMonitorWithBudget(budget int64) *mon.BytesMonitor { @@ -6314,11 +6117,11 @@ func TestChangefeedFlushesSinkToReleaseMemory(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, stopServer := startTestServer(t, newTestOptions()) + s, stopServer := makeServer(t) defer stopServer() - sqlDB := sqlutils.MakeSQLRunner(db) - knobs := s.TestingKnobs(). + sqlDB := sqlutils.MakeSQLRunner(s.DB) + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) @@ -6387,33 +6190,22 @@ func TestChangefeedMultiPodTenantPlanning(t *testing.T) { TestingKnobs: tenantKnobs, Existing: false, } - server1, db1 := serverutils.StartTenant(t, tc.Server(0), tenant1Args) - tenantRunner := sqlutils.MakeSQLRunner(db1) + tenant1Server, tenant1DB := serverutils.StartTenant(t, tc.Server(0), tenant1Args) + tenantRunner := sqlutils.MakeSQLRunner(tenant1DB) tenantRunner.ExecMultiple(t, strings.Split(serverSetupStatements, ";")...) - testTenant := &testServerShim{server1, tc.Server(0)} - sql1 := sqlutils.MakeSQLRunner(db1) - defer db1.Close() + sql1 := sqlutils.MakeSQLRunner(tenant1DB) + defer tenant1DB.Close() tenant2Args := tenant1Args tenant2Args.Existing = true _, db2 := serverutils.StartTenant(t, tc.Server(1), tenant2Args) defer db2.Close() - // Ensure both nodes are live and able to be distributed to - testutils.SucceedsSoon(t, func() error { - status := server1.StatusServer().(serverpb.SQLStatusServer) - var nodes *serverpb.NodesListResponse - var err error - for nodes == nil || len(nodes.Nodes) != 2 { - nodes, err = status.NodesList(context.Background(), nil) - if err != nil { - return err - } - } - return nil - }) + // Ensure both pods can be assigned work + waitForTenantPodsActive(t, tenant1Server, 2) - feedFactory := makeKafkaFeedFactory(testTenant, db1) + feedFactory, cleanupSink := makeFeedFactory(t, randomSinkType(feedTestEnterpriseSinks), tenant1Server, tenant1DB) + defer cleanupSink() // Run a changefeed across two tables to guarantee multiple spans that can be spread across the aggregators sql1.Exec(t, "CREATE TABLE foo (a INT PRIMARY KEY)") @@ -6438,19 +6230,19 @@ func TestChangefeedCreateTelemetryLogs(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - s, db, stopServer := startTestServer(t, newTestOptions()) + s, stopServer := makeServer(t) defer stopServer() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO bar VALUES (0, 'initial')`) t.Run(`core_sink_type`, func(t *testing.T) { - coreSink, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(username.RootUser)) + coreSink, cleanup := sqlutils.PGUrl(t, s.Server.SQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() - coreFeedFactory := makeSinklessFeedFactory(s, coreSink) + coreFeedFactory := makeSinklessFeedFactory(s.Server, coreSink) beforeCreateSinkless := timeutil.Now() coreFeed := feed(t, coreFeedFactory, `CREATE CHANGEFEED FOR foo`) @@ -6462,7 +6254,7 @@ func TestChangefeedCreateTelemetryLogs(t *testing.T) { }) t.Run(`gcpubsub_sink_type with options`, func(t *testing.T) { - pubsubFeedFactory := makePubsubFeedFactory(s, db) + pubsubFeedFactory := makePubsubFeedFactory(s.Server, s.DB) beforeCreatePubsub := timeutil.Now() pubsubFeed := feed(t, pubsubFeedFactory, `CREATE CHANGEFEED FOR foo, bar WITH resolved, no_initial_scan`) defer closeFeed(t, pubsubFeed) @@ -6494,23 +6286,22 @@ func TestChangefeedFailedTelemetryLogs(t *testing.T) { } t.Run(`connection_closed`, func(t *testing.T) { - s, db, stopServer := startTestServer(t, newTestOptions()) + s, stopServer := makeServer(t) defer stopServer() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, 'initial')`) sqlDB.Exec(t, `UPSERT INTO foo VALUES (0, 'updated')`) - coreSink, coreSinkCleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(username.RootUser)) - coreFactory := makeSinklessFeedFactory(s, coreSink) + coreFactory, sinkCleanup := makeFeedFactory(t, "sinkless", s.Server, s.DB) coreFeed := feed(t, coreFactory, `CREATE CHANGEFEED FOR foo`) assertPayloads(t, coreFeed, []string{ `foo: [0]->{"after": {"a": 0, "b": "updated"}}`, }) beforeCoreSinkClose := timeutil.Now() - coreSinkCleanup() + sinkCleanup() closeFeed(t, coreFeed) failLogs := waitForLogs(t, beforeCoreSinkClose) @@ -6518,8 +6309,8 @@ func TestChangefeedFailedTelemetryLogs(t *testing.T) { require.Equal(t, failLogs[0].FailureType, changefeedbase.ConnectionClosed) }) - t.Run(`user_input`, enterpriseTest(func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + cdcTestNamed(t, "user_input", func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) beforeCreate := timeutil.Now() @@ -6529,13 +6320,13 @@ func TestChangefeedFailedTelemetryLogs(t *testing.T) { failLogs := waitForLogs(t, beforeCreate) require.Equal(t, 1, len(failLogs)) require.Equal(t, failLogs[0].FailureType, changefeedbase.UserInput) - })) + }, feedTestEnterpriseSinks) - t.Run(`unknown_error`, pubsubTest(func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + cdcTestNamed(t, "unknown_error", func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) - knobs := f.Server().TestingKnobs(). + knobs := s.TestingKnobs. DistSQL.(*execinfra.TestingKnobs). Changefeed.(*TestingKnobs) knobs.BeforeEmitRow = func(_ context.Context) error { @@ -6554,5 +6345,5 @@ func TestChangefeedFailedTelemetryLogs(t *testing.T) { require.Equal(t, failLogs[0].FailureType, changefeedbase.UnknownError) require.Equal(t, failLogs[0].SinkType, `gcpubsub`) require.Equal(t, failLogs[0].NumTables, int32(1)) - })) + }, feedTestForceSink("pubsub")) } diff --git a/pkg/ccl/changefeedccl/encoder_test.go b/pkg/ccl/changefeedccl/encoder_test.go index bac4d2fdc329..29e5eac175c2 100644 --- a/pkg/ccl/changefeedccl/encoder_test.go +++ b/pkg/ccl/changefeedccl/encoder_test.go @@ -256,10 +256,10 @@ func TestAvroEncoder(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { ctx := context.Background() - sqlDB := sqlutils.MakeSQLRunner(db) + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) var ts1 string sqlDB.QueryRow(t, @@ -288,7 +288,7 @@ func TestAvroEncoder(t *testing.T) { require.NoError(t, err) var ts2 string - require.NoError(t, crdb.ExecuteTx(ctx, db, nil /* txopts */, func(tx *gosql.Tx) error { + require.NoError(t, crdb.ExecuteTx(ctx, s.DB, nil /* txopts */, func(tx *gosql.Tx) error { return tx.QueryRow( `INSERT INTO foo VALUES (3, 'baz') RETURNING cluster_logical_timestamp()`, ).Scan(&ts2) @@ -300,7 +300,7 @@ func TestAvroEncoder(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroEncoderWithTLS(t *testing.T) { @@ -421,8 +421,8 @@ func TestAvroArray(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b INT[])`) sqlDB.Exec(t, `INSERT INTO foo VALUES @@ -458,15 +458,15 @@ func TestAvroArray(t *testing.T) { } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroArrayCap(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b INT[])`) sqlDB.Exec(t, `INSERT INTO foo VALUES (0, ARRAY[])`) @@ -497,15 +497,15 @@ func TestAvroArrayCap(t *testing.T) { } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroCollatedString(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b string collate "fr-CA")`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'désolée' collate "fr-CA")`) @@ -518,15 +518,15 @@ func TestAvroCollatedString(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroEnum(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TYPE status AS ENUM ('open', 'closed', 'inactive')`) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b status, c int default 0)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1, 'open')`) @@ -577,15 +577,15 @@ func TestAvroEnum(t *testing.T) { } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroSchemaNaming(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING)`) sqlDB.Exec(t, @@ -678,15 +678,15 @@ func TestAvroSchemaNaming(t *testing.T) { } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroSchemaNamespace(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING)`) sqlDB.Exec(t, @@ -720,15 +720,15 @@ func TestAvroSchemaNamespace(t *testing.T) { require.Contains(t, foo.registry.SchemaForSubject(`superdrivers-value`), `"namespace":"super"`) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestTableNameCollision(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE DATABASE movr`) sqlDB.Exec(t, `CREATE DATABASE printr`) sqlDB.Exec(t, `CREATE TABLE movr.drivers (id INT PRIMARY KEY, name STRING)`) @@ -770,15 +770,15 @@ func TestTableNameCollision(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroMigrateToUnsupportedColumn(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY)`) sqlDB.Exec(t, `INSERT INTO foo VALUES (1)`) @@ -796,18 +796,18 @@ func TestAvroMigrateToUnsupportedColumn(t *testing.T) { } } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } func TestAvroLedger(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { ctx := context.Background() gen := ledger.FromFlags(`--customers=1`) var l workloadsql.InsertsDataLoader - _, err := workloadsql.Setup(ctx, db, gen, l) + _, err := workloadsql.Setup(ctx, s.DB, gen, l) require.NoError(t, err) ledger := feed(t, f, fmt.Sprintf(`CREATE CHANGEFEED FOR customer, transaction, entry, session @@ -830,5 +830,5 @@ func TestAvroLedger(t *testing.T) { }) } - t.Run(`kafka`, kafkaTest(testFn)) + cdcTest(t, testFn, feedTestForceSink("kafka")) } diff --git a/pkg/ccl/changefeedccl/helpers_tenant_shim_test.go b/pkg/ccl/changefeedccl/helpers_tenant_shim_test.go deleted file mode 100644 index 7dd4b3e0cb24..000000000000 --- a/pkg/ccl/changefeedccl/helpers_tenant_shim_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2021 The Cockroach Authors. -// -// Licensed as a CockroachDB Enterprise file under the Cockroach Community -// License (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt - -package changefeedccl - -import ( - "context" - - "github.com/cockroachdb/cockroach/pkg/base" - "github.com/cockroachdb/cockroach/pkg/config" - "github.com/cockroachdb/cockroach/pkg/kv" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" - "github.com/cockroachdb/cockroach/pkg/roachpb" - "github.com/cockroachdb/cockroach/pkg/rpc" - "github.com/cockroachdb/cockroach/pkg/server/status" - "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/storage" - "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" - "github.com/cockroachdb/cockroach/pkg/util/hlc" - "github.com/cockroachdb/cockroach/pkg/util/stop" - "github.com/cockroachdb/cockroach/pkg/util/uuid" -) - -// testServerShim is a kludge to get a few more tests working in -// tenant-mode. -// -// Currently, our TestFeedFactory has a Server() method that returns a -// TestServerInterface. The TestTenantInterface returned by -// StartTenant isn't a TestServerInterface. -// -// TODO(ssd): Clean this up. Perhaps we can add a SQLServer() method -// to TestFeedFactory that returns just the bits that are shared. -type testServerShim struct { - serverutils.TestTenantInterface - kvServer serverutils.TestServerInterface -} - -const unsupportedShimMethod = ` -This TestServerInterface method is not supported for tenants. Either disable this test on tenants by using the -feedOptionNoTenants option or add an appropriate implementation for this method to testServerShim. -` - -var _ serverutils.TestServerInterface = (*testServerShim)(nil) - -func (t *testServerShim) ServingSQLAddr() string { - return t.SQLAddr() -} - -func (t *testServerShim) Stopper() *stop.Stopper { panic(unsupportedShimMethod) } -func (t *testServerShim) Start(context.Context) error { panic(unsupportedShimMethod) } -func (t *testServerShim) Node() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) NodeID() roachpb.NodeID { panic(unsupportedShimMethod) } -func (t *testServerShim) StorageClusterID() uuid.UUID { panic(unsupportedShimMethod) } -func (t *testServerShim) ServingRPCAddr() string { panic(unsupportedShimMethod) } -func (t *testServerShim) RPCAddr() string { panic(unsupportedShimMethod) } -func (t *testServerShim) DB() *kv.DB { panic(unsupportedShimMethod) } -func (t *testServerShim) RPCContext() *rpc.Context { panic(unsupportedShimMethod) } -func (t *testServerShim) LeaseManager() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) InternalExecutor() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) ExecutorConfig() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) TracerI() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) GossipI() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) RangeFeedFactory() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) DistSenderI() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) MigrationServer() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) SQLServer() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) SQLLivenessProvider() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) StartupMigrationsManager() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) NodeLiveness() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) HeartbeatNodeLiveness() error { panic(unsupportedShimMethod) } -func (t *testServerShim) NodeDialer() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) SetDistSQLSpanResolver(spanResolver interface{}) { - panic(unsupportedShimMethod) -} -func (t *testServerShim) MustGetSQLCounter(name string) int64 { panic(unsupportedShimMethod) } -func (t *testServerShim) MustGetSQLNetworkCounter(name string) int64 { panic(unsupportedShimMethod) } -func (t *testServerShim) WriteSummaries() error { panic(unsupportedShimMethod) } -func (t *testServerShim) GetFirstStoreID() roachpb.StoreID { panic(unsupportedShimMethod) } -func (t *testServerShim) GetStores() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) ClusterSettings() *cluster.Settings { panic(unsupportedShimMethod) } -func (t *testServerShim) Decommission( - ctx context.Context, targetStatus livenesspb.MembershipStatus, nodeIDs []roachpb.NodeID, -) error { - panic(unsupportedShimMethod) -} -func (t *testServerShim) SplitRange( - splitKey roachpb.Key, -) (left roachpb.RangeDescriptor, right roachpb.RangeDescriptor, err error) { - panic(unsupportedShimMethod) -} -func (t *testServerShim) MergeRanges( - leftKey roachpb.Key, -) (merged roachpb.RangeDescriptor, err error) { - panic(unsupportedShimMethod) -} -func (t *testServerShim) ExpectedInitialRangeCount() (int, error) { panic(unsupportedShimMethod) } -func (t *testServerShim) ForceTableGC( - ctx context.Context, database, table string, timestamp hlc.Timestamp, -) error { - panic(unsupportedShimMethod) -} -func (t *testServerShim) UpdateChecker() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) StartTenant( - ctx context.Context, params base.TestTenantArgs, -) (serverutils.TestTenantInterface, error) { - panic(unsupportedShimMethod) -} -func (t *testServerShim) ScratchRange() (roachpb.Key, error) { panic(unsupportedShimMethod) } -func (t *testServerShim) Engines() []storage.Engine { panic(unsupportedShimMethod) } -func (t *testServerShim) MetricsRecorder() *status.MetricsRecorder { panic(unsupportedShimMethod) } -func (t *testServerShim) CollectionFactory() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) SystemTableIDResolver() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) SpanConfigKVSubscriber() interface{} { panic(unsupportedShimMethod) } -func (t *testServerShim) SystemConfigProvider() config.SystemConfigProvider { - panic(unsupportedShimMethod) -} diff --git a/pkg/ccl/changefeedccl/helpers_test.go b/pkg/ccl/changefeedccl/helpers_test.go index 897e14bb8248..b50d79001329 100644 --- a/pkg/ccl/changefeedccl/helpers_test.go +++ b/pkg/ccl/changefeedccl/helpers_test.go @@ -36,8 +36,11 @@ import ( _ "github.com/cockroachdb/cockroach/pkg/ccl/partitionccl" "github.com/cockroachdb/cockroach/pkg/jobs" "github.com/cockroachdb/cockroach/pkg/jobs/jobspb" + "github.com/cockroachdb/cockroach/pkg/keys" + "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/security/username" "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/sql/execinfra" "github.com/cockroachdb/cockroach/pkg/testutils" "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" @@ -325,15 +328,6 @@ SET CLUSTER SETTING sql.defaults.vectorize=on; CREATE DATABASE d; ` -func startTestServer( - t testing.TB, options feedTestOptions, -) (serverutils.TestServerInterface, *gosql.DB, func()) { - if options.useTenant { - return startTestTenant(t, options) - } - return startTestFullServer(t, options) -} - func startTestFullServer( t testing.TB, options feedTestOptions, ) (serverutils.TestServerInterface, *gosql.DB, func()) { @@ -418,10 +412,26 @@ func startTestCluster(t testing.TB) (serverutils.TestClusterInterface, *gosql.DB return cluster, db, cleanupAndReset } +func waitForTenantPodsActive( + t testing.TB, tenantServer serverutils.TestTenantInterface, numPods int, +) { + testutils.SucceedsWithin(t, func() error { + status := tenantServer.StatusServer().(serverpb.SQLStatusServer) + var nodes *serverpb.NodesListResponse + var err error + for nodes == nil || len(nodes.Nodes) != numPods { + nodes, err = status.NodesList(context.Background(), nil) + if err != nil { + return err + } + } + return nil + }, 10*time.Second) +} + func startTestTenant( - t testing.TB, options feedTestOptions, -) (serverutils.TestServerInterface, *gosql.DB, func()) { - kvServer, _, cleanupCluster := startTestFullServer(t, options) + t testing.TB, systemServer serverutils.TestServerInterface, options feedTestOptions, +) (roachpb.TenantID, serverutils.TestTenantInterface, *gosql.DB, func()) { knobs := base.TestingKnobs{ DistSQL: &execinfra.TestingKnobs{Changefeed: &TestingKnobs{}}, JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), @@ -440,32 +450,30 @@ func startTestTenant( ExternalIODir: options.externalIODir, } - tenantServer, tenantDB := serverutils.StartTenant(t, kvServer, tenantArgs) + tenantServer, tenantDB := serverutils.StartTenant(t, systemServer, tenantArgs) // Re-run setup on the tenant as well tenantRunner := sqlutils.MakeSQLRunner(tenantDB) tenantRunner.ExecMultiple(t, strings.Split(serverSetupStatements, ";")...) - server := &testServerShim{tenantServer, kvServer} - // Log so that it is clear if a failed test happened - // to run on a tenant. - t.Logf("Running test using tenant %s", tenantID) - return server, tenantDB, func() { + waitForTenantPodsActive(t, tenantServer, 1) + + return tenantID, tenantServer, tenantDB, func() { tenantServer.Stopper().Stop(context.Background()) - log.Infof(context.Background(), "tenant server stopped") - cleanupCluster() - log.Infof(context.Background(), "cluster shut down") } } -type cdcTestFn func(*testing.T, *gosql.DB, cdctest.TestFeedFactory) +type cdcTestFn func(*testing.T, TestServer, cdctest.TestFeedFactory) +type cdcTestWithSystemFn func(*testing.T, TestServerWithSystem, cdctest.TestFeedFactory) type updateArgsFn func(args *base.TestServerArgs) type updateKnobsFn func(knobs *base.TestingKnobs) type feedTestOptions struct { - useTenant bool - argsFn updateArgsFn - knobsFn updateKnobsFn - externalIODir string + useTenant bool + argsFn updateArgsFn + knobsFn updateKnobsFn + externalIODir string + allowedSinkTypes []string + disabledSinkTypes []string } type feedTestOption func(opts *feedTestOptions) @@ -474,6 +482,22 @@ type feedTestOption func(opts *feedTestOptions) // from randomly running on a tenant. var feedTestNoTenants = func(opts *feedTestOptions) { opts.useTenant = false } +var feedTestForceSink = func(sinkType string) feedTestOption { + return feedTestRestrictSinks(sinkType) +} + +var feedTestRestrictSinks = func(sinkTypes ...string) feedTestOption { + return func(opts *feedTestOptions) { opts.allowedSinkTypes = append(opts.allowedSinkTypes, sinkTypes...) } +} + +var feedTestEnterpriseSinks = func(opts *feedTestOptions) { + feedTestOmitSinks("sinkless")(opts) +} + +var feedTestOmitSinks = func(sinkTypes ...string) feedTestOption { + return func(opts *feedTestOptions) { opts.disabledSinkTypes = append(opts.disabledSinkTypes, sinkTypes...) } +} + // withArgsFn is a feedTestOption that allow the caller to modify the // TestServerArgs before they are used to create the test server. Note // that in multi-tenant tests, these will only apply to the kvServer @@ -496,7 +520,7 @@ func newTestOptions() feedTestOptions { // percentTenant is the percentange of tests that will be run against // a SQL-node in a multi-tenant server. 1 for all tests to be run on a // tenant. - const percentTenant = 0.25 + const percentTenant = 1 return feedTestOptions{ useTenant: rand.Float32() < percentTenant, } @@ -510,128 +534,6 @@ func makeOptions(opts ...feedTestOption) feedTestOptions { return options } -func sinklessTest(testFn cdcTestFn, testOpts ...feedTestOption) func(*testing.T) { - return sinklessTestWithOptions(testFn, makeOptions(testOpts...)) -} - -func sinklessTestWithOptions(testFn cdcTestFn, opts feedTestOptions) func(*testing.T) { - return func(t *testing.T) { - s, db, stopServer := startTestServer(t, opts) - defer stopServer() - - sink, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(username.RootUser)) - defer cleanup() - f := makeSinklessFeedFactory(s, sink) - testFn(t, db, f) - } -} - -// RunRandomSink runs the testFn against one of a number of possible -// sinks. Sinkless is not included in the possible sinks. -func RunRandomSinkTest(t *testing.T, desc string, testFn cdcTestFn, testOpts ...feedTestOption) { - // TODO(ssd): It would be nice if explicitly selecting a test - // via -run/TESTS= would force it to always run. - switch p := rand.Float32(); { - case p < 0.20: - t.Run(fmt.Sprintf("enterprise/%s", desc), enterpriseTest(testFn, testOpts...)) - case p < 0.40: - t.Run(fmt.Sprintf("cloudstorage/%s", desc), cloudStorageTest(testFn, testOpts...)) - case p < 0.60: - t.Run(fmt.Sprintf("webhook/%s", desc), webhookTest(testFn, testOpts...)) - default: // Run kafka a bit more often - t.Run(fmt.Sprintf("kafka/%s", desc), kafkaTest(testFn, testOpts...)) - } -} - -func enterpriseTest(testFn cdcTestFn, testOpts ...feedTestOption) func(*testing.T) { - return enterpriseTestWithOptions(testFn, makeOptions(testOpts...)) -} - -func enterpriseTestWithOptions(testFn cdcTestFn, options feedTestOptions) func(*testing.T) { - return func(t *testing.T) { - s, db, stopServer := startTestServer(t, options) - defer stopServer() - - sink, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(username.RootUser)) - defer cleanup() - f := makeTableFeedFactory(s, db, sink) - - testFn(t, db, f) - } -} - -func cloudStorageTest(testFn cdcTestFn, testOpts ...feedTestOption) func(*testing.T) { - return cloudStorageTestWithOptions(testFn, makeOptions(testOpts...)) -} - -func cloudStorageTestWithOptions(testFn cdcTestFn, options feedTestOptions) func(*testing.T) { - return func(t *testing.T) { - if options.externalIODir == "" { - dir, dirCleanupFn := testutils.TempDir(t) - defer dirCleanupFn() - options.externalIODir = dir - } - oldKnobsFn := options.knobsFn - options.knobsFn = func(knobs *base.TestingKnobs) { - if oldKnobsFn != nil { - oldKnobsFn(knobs) - } - blobClientFactory := blobs.NewLocalOnlyBlobClientFactory(options.externalIODir) - if serverKnobs, ok := knobs.Server.(*server.TestingKnobs); ok { - serverKnobs.BlobClientFactory = blobClientFactory - } else { - knobs.Server = &server.TestingKnobs{ - BlobClientFactory: blobClientFactory, - } - } - } - s, db, stopServer := startTestServer(t, options) - defer stopServer() - - f := makeCloudFeedFactory(s, db, options.externalIODir) - testFn(t, db, f) - } -} - -func kafkaTest(testFn cdcTestFn, testOpts ...feedTestOption) func(t *testing.T) { - return kafkaTestWithOptions(testFn, makeOptions(testOpts...)) -} - -func kafkaTestWithOptions(testFn cdcTestFn, options feedTestOptions) func(*testing.T) { - return func(t *testing.T) { - s, db, stopServer := startTestServer(t, options) - defer stopServer() - f := makeKafkaFeedFactory(s, db) - testFn(t, db, f) - } -} - -func webhookTest(testFn cdcTestFn, testOpts ...feedTestOption) func(t *testing.T) { - return webhookTestWithOptions(testFn, makeOptions(testOpts...)) -} - -func webhookTestWithOptions(testFn cdcTestFn, options feedTestOptions) func(*testing.T) { - return func(t *testing.T) { - s, db, stopServer := startTestServer(t, options) - defer stopServer() - f := makeWebhookFeedFactory(s, db) - testFn(t, db, f) - } -} - -func pubsubTest(testFn cdcTestFn, testOpts ...feedTestOption) func(t *testing.T) { - return pubsubTestWithOptions(testFn, makeOptions(testOpts...)) -} - -func pubsubTestWithOptions(testFn cdcTestFn, options feedTestOptions) func(*testing.T) { - return func(t *testing.T) { - s, db, stopServer := startTestServer(t, options) - defer stopServer() - f := makePubsubFeedFactory(s, db) - testFn(t, db, f) - } -} - func serverArgsRegion(args base.TestServerArgs) string { for _, tier := range args.Locality.Tiers { if tier.Key == "region" { @@ -644,8 +546,8 @@ func serverArgsRegion(args base.TestServerArgs) string { // expectNotice creates a pretty crude database connection that doesn't involve // a lot of cdc test framework, use with caution. Driver-agnostic tools don't // have clean ways of inspecting incoming notices. -func expectNotice(t *testing.T, s serverutils.TestServerInterface, sql string, expected string) { - url, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(username.RootUser)) +func expectNotice(t *testing.T, s serverutils.TestTenantInterface, sql string, expected string) { + url, cleanup := sqlutils.PGUrl(t, s.SQLAddr(), t.Name(), url.User(username.RootUser)) defer cleanup() base, err := pq.NewConnector(url.String()) if err != nil { @@ -683,6 +585,248 @@ func closeFeed(t testing.TB, f cdctest.TestFeed) { } } +// TestServer is a struct to allow tests to operate on a shared API regardless +// of a test running as the system tenant or a secondary tenant +type TestServer struct { + DB *gosql.DB + Server serverutils.TestTenantInterface + Codec keys.SQLCodec + TestingKnobs base.TestingKnobs +} + +// TestServerWithSystem provides access to the system db and server for a +// TestServer. This is useful for some tests that explicitly require access to +// the system tenant, for example if +// desctestutils.TestingGetPublicTableDescriptor is being called. +type TestServerWithSystem struct { + TestServer + SystemDB *gosql.DB + SystemServer serverutils.TestServerInterface +} + +func makeSystemServer( + t *testing.T, opts ...feedTestOption, +) (testServer TestServerWithSystem, cleanup func()) { + options := makeOptions(opts...) + return makeSystemServerWithOptions(t, options) +} + +var _ = makeSystemServer // silence unused warning + +func makeSystemServerWithOptions( + t *testing.T, options feedTestOptions, +) (testServer TestServerWithSystem, cleanup func()) { + systemServer, systemDB, clusterCleanup := startTestFullServer(t, options) + return TestServerWithSystem{ + TestServer: TestServer{ + DB: systemDB, + Server: systemServer, + TestingKnobs: systemServer.(*server.TestServer).Cfg.TestingKnobs, + Codec: keys.SystemSQLCodec, + }, + SystemServer: systemServer, + SystemDB: systemDB, + }, func() { + clusterCleanup() + } +} + +func makeTenantServer( + t *testing.T, opts ...feedTestOption, +) (testServer TestServerWithSystem, cleanup func()) { + options := makeOptions(opts...) + return makeTenantServerWithOptions(t, options) +} +func makeTenantServerWithOptions( + t *testing.T, options feedTestOptions, +) (testServer TestServerWithSystem, cleanup func()) { + systemServer, systemDB, clusterCleanup := startTestFullServer(t, options) + tenantID, tenantServer, tenantDB, tenantCleanup := startTestTenant(t, systemServer, options) + + return TestServerWithSystem{ + TestServer: TestServer{ + DB: tenantDB, + Server: tenantServer, + TestingKnobs: tenantServer.(*server.TestTenant).Cfg.TestingKnobs, + Codec: keys.MakeSQLCodec(tenantID), + }, + SystemDB: systemDB, + SystemServer: systemServer, + }, func() { + tenantCleanup() + clusterCleanup() + } +} + +func makeServer( + t *testing.T, opts ...feedTestOption, +) (testServer TestServerWithSystem, cleanup func()) { + options := makeOptions(opts...) + return makeServerWithOptions(t, options) +} + +func makeServerWithOptions( + t *testing.T, options feedTestOptions, +) (server TestServerWithSystem, cleanup func()) { + if options.useTenant { + t.Logf("making server as secondary tenant") + return makeTenantServerWithOptions(t, options) + } + t.Logf("making server as system tenant") + return makeSystemServerWithOptions(t, options) +} + +func randomSinkType(opts ...feedTestOption) string { + options := makeOptions(opts...) + return randomSinkTypeWithOptions(options) +} + +func randomSinkTypeWithOptions(options feedTestOptions) string { + sinkWeights := map[string]int{ + "kafka": 2, // run kafka a bit more often + "enterprise": 1, + "webhook": 1, + "pubsub": 1, + "sinkless": 1, + "cloudstorage": 0, // requires externalIODir set + } + if options.externalIODir != "" { + sinkWeights["cloudstorage"] = 1 + } + if options.allowedSinkTypes != nil { + sinkWeights = map[string]int{} + for _, sinkType := range options.allowedSinkTypes { + sinkWeights[sinkType] = 1 + } + } + if options.disabledSinkTypes != nil { + for _, sinkType := range options.disabledSinkTypes { + sinkWeights[sinkType] = 0 + } + } + weightTotal := 0 + for _, weight := range sinkWeights { + weightTotal += weight + } + p := rand.Float32() * float32(weightTotal) + var sum float32 = 0 + for sink, weight := range sinkWeights { + sum += float32(weight) + if p <= sum { + return sink + } + } + return "kafka" // unreachable +} + +// addCloudStorageOptions adds the options necessary to enable a server to run a +// cloudstorage changefeed on it +func addCloudStorageOptions(t *testing.T, options *feedTestOptions) (cleanup func()) { + dir, dirCleanupFn := testutils.TempDir(t) + options.externalIODir = dir + oldKnobsFn := options.knobsFn + options.knobsFn = func(knobs *base.TestingKnobs) { + if oldKnobsFn != nil { + oldKnobsFn(knobs) + } + blobClientFactory := blobs.NewLocalOnlyBlobClientFactory(options.externalIODir) + if serverKnobs, ok := knobs.Server.(*server.TestingKnobs); ok { + serverKnobs.BlobClientFactory = blobClientFactory + } else { + knobs.Server = &server.TestingKnobs{ + BlobClientFactory: blobClientFactory, + } + } + } + return dirCleanupFn +} + +func makeFeedFactory( + t *testing.T, + sinkType string, + s serverutils.TestTenantInterface, + db *gosql.DB, + testOpts ...feedTestOption, +) (factory cdctest.TestFeedFactory, sinkCleanup func()) { + options := makeOptions(testOpts...) + return makeFeedFactoryWithOptions(t, sinkType, s, db, options) +} + +func makeFeedFactoryWithOptions( + t *testing.T, + sinkType string, + s serverutils.TestTenantInterface, + db *gosql.DB, + options feedTestOptions, +) (factory cdctest.TestFeedFactory, sinkCleanup func()) { + t.Logf("making %s feed factory", sinkType) + switch sinkType { + case "kafka": + f := makeKafkaFeedFactory(s, db) + return f, func() {} + case "cloudstorage": + if options.externalIODir == "" { + t.Fatalf("expected externalIODir option to be set") + } + f := makeCloudFeedFactory(s, db, options.externalIODir) + return f, func() {} + case "enterprise": + sink, cleanup := sqlutils.PGUrl(t, s.SQLAddr(), t.Name(), url.User(username.RootUser)) + f := makeTableFeedFactory(s, db, sink) + return f, cleanup + case "webhook": + f := makeWebhookFeedFactory(s, db) + return f, func() {} + case "pubsub": + f := makePubsubFeedFactory(s, db) + return f, func() {} + case "sinkless": + sink, cleanup := sqlutils.PGUrl(t, s.SQLAddr(), t.Name(), url.User(username.RootUser)) + f := makeSinklessFeedFactory(s, sink) + return f, cleanup + } + t.Fatalf("unhandled sink type %s", sinkType) + return nil, nil +} + +func cdcTest(t *testing.T, testFn cdcTestFn, testOpts ...feedTestOption) { + cdcTestNamed(t, "", testFn, testOpts...) +} + +func cdcTestNamed(t *testing.T, name string, testFn cdcTestFn, testOpts ...feedTestOption) { + testFnWithSystem := func(t *testing.T, s TestServerWithSystem, f cdctest.TestFeedFactory) { + testFn(t, s.TestServer, f) + } + cdcTestNamedWithSystem(t, "", testFnWithSystem, testOpts...) +} + +func cdcTestWithSystem(t *testing.T, testFn cdcTestWithSystemFn, testOpts ...feedTestOption) { + cdcTestNamedWithSystem(t, "", testFn, testOpts...) +} + +func cdcTestNamedWithSystem( + t *testing.T, name string, testFn cdcTestWithSystemFn, testOpts ...feedTestOption, +) { + t.Helper() + options := makeOptions(testOpts...) + cleanupCloudStorage := addCloudStorageOptions(t, &options) + + sinkType := randomSinkTypeWithOptions(options) + testLabel := sinkType + if name != "" { + testLabel = fmt.Sprintf("%s/%s", sinkType, name) + } + t.Run(testLabel, func(t *testing.T) { + testServer, cleanupServer := makeServerWithOptions(t, options) + feedFactory, cleanupSink := makeFeedFactoryWithOptions(t, sinkType, testServer.Server, testServer.DB, options) + defer cleanupServer() + defer cleanupSink() + defer cleanupCloudStorage() + + testFn(t, testServer, feedFactory) + }) +} + func forceTableGC( t testing.TB, tsi serverutils.TestServerInterface, diff --git a/pkg/ccl/changefeedccl/nemeses_test.go b/pkg/ccl/changefeedccl/nemeses_test.go index 0e0bbbaa3f56..907bdd169b85 100644 --- a/pkg/ccl/changefeedccl/nemeses_test.go +++ b/pkg/ccl/changefeedccl/nemeses_test.go @@ -9,7 +9,6 @@ package changefeedccl import ( - gosql "database/sql" "math" "regexp" "strings" @@ -27,13 +26,13 @@ func TestChangefeedNemeses(t *testing.T) { defer log.Scope(t).Close(t) skip.UnderRace(t, "takes >1 min under race") - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) disableDeclarativeSchemaChangesForTest(t, sqlDB) // TODO(dan): Ugly hack to disable `eventPause` in sinkless feeds. See comment in // `RunNemesis` for details. isSinkless := strings.Contains(t.Name(), "sinkless") - v, err := cdctest.RunNemesis(f, db, isSinkless) + v, err := cdctest.RunNemesis(f, s.DB, isSinkless) if err != nil { t.Fatalf("%+v", err) } @@ -47,10 +46,7 @@ func TestChangefeedNemeses(t *testing.T) { // // nemeses_test.go:39: pq: unimplemented: operation is // unsupported in multi-tenancy mode - t.Run(`sinkless`, sinklessTest(testFn, feedTestNoTenants)) - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) - t.Run(`cloudstorage`, cloudStorageTest(testFn, feedTestNoTenants)) - t.Run(`webhook`, webhookTest(testFn, feedTestNoTenants)) + cdcTest(t, testFn, feedTestNoTenants) log.Flush() entries, err := log.FetchEntriesFromFiles(0, math.MaxInt64, 1, regexp.MustCompile("cdc ux violation"), log.WithFlattenedSensitiveData) diff --git a/pkg/ccl/changefeedccl/show_changefeed_jobs_test.go b/pkg/ccl/changefeedccl/show_changefeed_jobs_test.go index c2d6ad164906..ba47ce410b55 100644 --- a/pkg/ccl/changefeedccl/show_changefeed_jobs_test.go +++ b/pkg/ccl/changefeedccl/show_changefeed_jobs_test.go @@ -10,7 +10,6 @@ package changefeedccl import ( "context" - gosql "database/sql" "fmt" "net/url" "sort" @@ -53,8 +52,8 @@ func TestShowChangefeedJobsBasic(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) foo := feed(t, f, `CREATE CHANGEFEED FOR foo`) @@ -105,10 +104,9 @@ func TestShowChangefeedJobsBasic(t *testing.T) { require.Equal(t, "json", out.format, "Expected format:%s but found format:%s", "json", out.format) } - t.Run(`enterprise`, enterpriseTest(testFn)) - t.Run(`kafka`, kafkaTest(testFn)) - t.Run(`cloudstorage`, cloudStorageTest(testFn)) - t.Run(`pubsub`, pubsubTest(testFn)) + // TODO: Webhook disabled since the query parameters on the sinkURI are + // correct but out of order + cdcTest(t, testFn, feedTestOmitSinks("webhook", "sinkless")) } func TestShowChangefeedJobs(t *testing.T) { @@ -338,8 +336,8 @@ func TestShowChangefeedJobsAlterChangefeed(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - sqlDB := sqlutils.MakeSQLRunner(db) + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { + sqlDB := sqlutils.MakeSQLRunner(s.DB) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) sqlDB.Exec(t, `CREATE TABLE bar (a INT PRIMARY KEY)`) @@ -427,5 +425,6 @@ func TestShowChangefeedJobsAlterChangefeed(t *testing.T) { require.Equal(t, "json", out.format, "Expected format:%s but found format:%s", "json", out.format) } - t.Run(`kafka`, kafkaTest(testFn)) + // Force kafka to validate topics + cdcTest(t, testFn, feedTestForceSink("kafka")) } diff --git a/pkg/ccl/changefeedccl/testfeed_test.go b/pkg/ccl/changefeedccl/testfeed_test.go index e4ecabd90774..6bb055244668 100644 --- a/pkg/ccl/changefeedccl/testfeed_test.go +++ b/pkg/ccl/changefeedccl/testfeed_test.go @@ -54,14 +54,14 @@ import ( ) type sinklessFeedFactory struct { - s serverutils.TestServerInterface + s serverutils.TestTenantInterface sink url.URL } // makeSinklessFeedFactory returns a TestFeedFactory implementation using the // `experimental-sql` uri. func makeSinklessFeedFactory( - s serverutils.TestServerInterface, sink url.URL, + s serverutils.TestTenantInterface, sink url.URL, ) cdctest.TestFeedFactory { return &sinklessFeedFactory{s: s, sink: sink} } @@ -87,7 +87,7 @@ func (f *sinklessFeedFactory) Feed(create string, args ...interface{}) (cdctest. } // Server implements the TestFeedFactory interface. -func (f *sinklessFeedFactory) Server() serverutils.TestServerInterface { +func (f *sinklessFeedFactory) Server() serverutils.TestTenantInterface { return f.s } @@ -578,7 +578,7 @@ func (di *depInjector) getJobFeed(jobID jobspb.JobID) *jobFeed { } type enterpriseFeedFactory struct { - s serverutils.TestServerInterface + s serverutils.TestTenantInterface di *depInjector db *gosql.DB } @@ -600,7 +600,7 @@ type tableFeedFactory struct { // makeTableFeedFactory returns a TestFeedFactory implementation using the // `experimental-sql` uri. func makeTableFeedFactory( - srv serverutils.TestServerInterface, db *gosql.DB, sink url.URL, + srv serverutils.TestTenantInterface, db *gosql.DB, sink url.URL, ) cdctest.TestFeedFactory { return &tableFeedFactory{ enterpriseFeedFactory: enterpriseFeedFactory{ @@ -664,7 +664,7 @@ func (f *tableFeedFactory) Feed( } // Server implements the TestFeedFactory interface. -func (f *tableFeedFactory) Server() serverutils.TestServerInterface { +func (f *tableFeedFactory) Server() serverutils.TestTenantInterface { return f.s } @@ -781,7 +781,7 @@ type cloudFeedFactory struct { // makeCloudFeedFactory returns a TestFeedFactory implementation using the cloud // storage uri. func makeCloudFeedFactory( - srv serverutils.TestServerInterface, db *gosql.DB, dir string, + srv serverutils.TestTenantInterface, db *gosql.DB, dir string, ) cdctest.TestFeedFactory { return &cloudFeedFactory{ enterpriseFeedFactory: enterpriseFeedFactory{ @@ -838,7 +838,7 @@ func (f *cloudFeedFactory) Feed( } // Server implements the TestFeedFactory interface. -func (f *cloudFeedFactory) Server() serverutils.TestServerInterface { +func (f *cloudFeedFactory) Server() serverutils.TestTenantInterface { return f.s } @@ -889,7 +889,7 @@ func reformatJSON(j interface{}) ([]byte, error) { // extractKeyFromJSONValue extracts the `WITH key_in_value` key from a `WITH // format=json, envelope=wrapped` value. func extractKeyFromJSONValue(wrapped []byte) (key []byte, value []byte, _ error) { - parsed := make(map[string]interface{}) + parsed := make(map[string]gojson.RawMessage) if err := gojson.Unmarshal(wrapped, &parsed); err != nil { return nil, nil, err } @@ -1136,7 +1136,7 @@ var _ cdctest.TestFeedFactory = (*kafkaFeedFactory)(nil) // makeKafkaFeedFactory returns a TestFeedFactory implementation using the `kafka` uri. func makeKafkaFeedFactory( - srv serverutils.TestServerInterface, db *gosql.DB, + srv serverutils.TestTenantInterface, db *gosql.DB, ) cdctest.TestFeedFactory { return &kafkaFeedFactory{ enterpriseFeedFactory: enterpriseFeedFactory{ @@ -1244,7 +1244,7 @@ func (k *kafkaFeedFactory) Feed(create string, args ...interface{}) (cdctest.Tes } // Server implements TestFeedFactory -func (k *kafkaFeedFactory) Server() serverutils.TestServerInterface { +func (k *kafkaFeedFactory) Server() serverutils.TestTenantInterface { return k.s } @@ -1343,7 +1343,7 @@ var _ cdctest.TestFeedFactory = (*webhookFeedFactory)(nil) // makeWebhookFeedFactory returns a TestFeedFactory implementation using the `webhook-webhooks` uri. func makeWebhookFeedFactory( - srv serverutils.TestServerInterface, db *gosql.DB, + srv serverutils.TestTenantInterface, db *gosql.DB, ) cdctest.TestFeedFactory { useSecure := rand.Float32() < 0.5 return &webhookFeedFactory{ @@ -1418,7 +1418,7 @@ func (f *webhookFeedFactory) Feed(create string, args ...interface{}) (cdctest.T return c, nil } -func (f *webhookFeedFactory) Server() serverutils.TestServerInterface { +func (f *webhookFeedFactory) Server() serverutils.TestTenantInterface { return f.s } @@ -1449,24 +1449,24 @@ func isResolvedTimestamp(message []byte) (bool, error) { // extractTopicFromJSONValue extracts the `WITH topic_in_value` topic from a `WITH // format=json, envelope=wrapped` value. func extractTopicFromJSONValue(wrapped []byte) (topic string, value []byte, _ error) { - parsed := make(map[string]interface{}) - if err := gojson.Unmarshal(wrapped, &parsed); err != nil { + parsedValue := make(map[string]gojson.RawMessage) + if err := gojson.Unmarshal(wrapped, &parsedValue); err != nil { return "", nil, err } - topicParsed := parsed[`topic`] - delete(parsed, `topic`) - - topic = fmt.Sprintf("%v", topicParsed) + if err := gojson.Unmarshal(parsedValue[`topic`], &topic); err != nil { + return "", nil, err + } + delete(parsedValue, `topic`) var err error - if value, err = reformatJSON(parsed); err != nil { + if value, err = reformatJSON(parsedValue); err != nil { return "", nil, err } return topic, value, nil } type webhookSinkTestfeedPayload struct { - Payload []interface{} `json:"payload"` - Length int `json:"length"` + Payload []gojson.RawMessage `json:"payload"` + Length int `json:"length"` } // extractValueFromJSONMessage extracts the value of the first element of @@ -1641,7 +1641,7 @@ var _ cdctest.TestFeedFactory = (*pubsubFeedFactory)(nil) // makePubsubFeedFactory returns a TestFeedFactory implementation using the `pubsub` uri. func makePubsubFeedFactory( - srv serverutils.TestServerInterface, db *gosql.DB, + srv serverutils.TestTenantInterface, db *gosql.DB, ) cdctest.TestFeedFactory { return &pubsubFeedFactory{ enterpriseFeedFactory: enterpriseFeedFactory{ @@ -1701,7 +1701,7 @@ func (p *pubsubFeedFactory) Feed(create string, args ...interface{}) (cdctest.Te } // Server implements TestFeedFactory -func (p *pubsubFeedFactory) Server() serverutils.TestServerInterface { +func (p *pubsubFeedFactory) Server() serverutils.TestTenantInterface { return p.s } diff --git a/pkg/ccl/changefeedccl/validations_test.go b/pkg/ccl/changefeedccl/validations_test.go index cf47254cd719..114c96304fcb 100644 --- a/pkg/ccl/changefeedccl/validations_test.go +++ b/pkg/ccl/changefeedccl/validations_test.go @@ -30,22 +30,22 @@ func TestCatchupScanOrdering(t *testing.T) { defer log.Scope(t).Close(t) defer utilccl.TestingEnableEnterprise()() - testFn := func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { + testFn := func(t *testing.T, s TestServer, f cdctest.TestFeedFactory) { t.Run("bank", func(t *testing.T) { ctx := context.Background() const numRows, numRanges, payloadBytes, maxTransfer = 10, 10, 10, 999 gen := bank.FromConfig(numRows, numRows, payloadBytes, numRanges) var l workloadsql.InsertsDataLoader - if _, err := workloadsql.Setup(ctx, db, gen, l); err != nil { + if _, err := workloadsql.Setup(ctx, s.DB, gen, l); err != nil { t.Fatal(err) } var nowString string - require.NoError(t, db.QueryRow("SELECT cluster_logical_timestamp()").Scan(&nowString)) + require.NoError(t, s.DB.QueryRow("SELECT cluster_logical_timestamp()").Scan(&nowString)) existingChangeCount := 50 for i := 0; i < existingChangeCount; i++ { - if err := randomBankTransfer(numRows, maxTransfer, db); err != nil { + if err := randomBankTransfer(numRows, maxTransfer, s.DB); err != nil { t.Fatal(err) } } @@ -61,7 +61,7 @@ func TestCatchupScanOrdering(t *testing.T) { return nil } - if err := randomBankTransfer(numRows, maxTransfer, db); err != nil { + if err := randomBankTransfer(numRows, maxTransfer, s.DB); err != nil { return err } } @@ -102,8 +102,7 @@ func TestCatchupScanOrdering(t *testing.T) { // validations_test.go:40: executing ALTER TABLE bank SPLIT AT // VALUES (5): pq: unimplemented: operation is unsupported in // multi-tenancy mode - t.Run(`sinkless`, sinklessTest(testFn, feedTestNoTenants)) - t.Run(`enterprise`, enterpriseTest(testFn, feedTestNoTenants)) + cdcTest(t, testFn, feedTestNoTenants) } // TODO(dan): This bit is copied from the bank workload. It's diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index ecf476d3524d..cd8efec2938c 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -652,6 +652,11 @@ func (t *TestTenant) DrainClients(ctx context.Context) error { return t.drain.drainClients(ctx, nil /* reporter */) } +// MustGetSQLCounter implements TestTenantInterface. +func (t *TestTenant) MustGetSQLCounter(name string) int64 { + return mustGetSQLCounterForRegistry(t.metricsRegistry, name) +} + // StartTenant starts a SQL tenant communicating with this TestServer. func (ts *TestServer) StartTenant( ctx context.Context, params base.TestTenantArgs, @@ -892,38 +897,9 @@ func (v *v2AuthDecorator) RoundTrip(r *http.Request) (*http.Response, error) { return v.RoundTripper.RoundTrip(r) } -// MustGetSQLCounter implements TestServerInterface. +// MustGetSQLCounter implements TestTenantInterface. func (ts *TestServer) MustGetSQLCounter(name string) int64 { - var c int64 - var found bool - - type ( - int64Valuer interface{ Value() int64 } - int64Counter interface{ Count() int64 } - ) - - ts.registry.Each(func(n string, v interface{}) { - if name == n { - switch t := v.(type) { - case *metric.Counter: - c = t.Count() - found = true - case *metric.Gauge: - c = t.Value() - found = true - case int64Valuer: - c = t.Value() - found = true - case int64Counter: - c = t.Count() - found = true - } - } - }) - if !found { - panic(fmt.Sprintf("couldn't find metric %s", name)) - } - return c + return mustGetSQLCounterForRegistry(ts.registry, name) } // MustGetSQLNetworkCounter implements TestServerInterface. @@ -1422,3 +1398,36 @@ func (testServerFactoryImpl) New(params base.TestServerArgs) (interface{}, error return ts, nil } + +func mustGetSQLCounterForRegistry(registry *metric.Registry, name string) int64 { + var c int64 + var found bool + + type ( + int64Valuer interface{ Value() int64 } + int64Counter interface{ Count() int64 } + ) + + registry.Each(func(n string, v interface{}) { + if name == n { + switch t := v.(type) { + case *metric.Counter: + c = t.Count() + found = true + case *metric.Gauge: + c = t.Value() + found = true + case int64Valuer: + c = t.Value() + found = true + case int64Counter: + c = t.Count() + found = true + } + } + }) + if !found { + panic(fmt.Sprintf("couldn't find metric %s", name)) + } + return c +} diff --git a/pkg/testutils/serverutils/test_tenant_shim.go b/pkg/testutils/serverutils/test_tenant_shim.go index 9e724e27755f..4c798df1b470 100644 --- a/pkg/testutils/serverutils/test_tenant_shim.go +++ b/pkg/testutils/serverutils/test_tenant_shim.go @@ -136,6 +136,10 @@ type TestTenantInterface interface { // SystemConfigProvider provides access to the system config. SystemConfigProvider() config.SystemConfigProvider + // MustGetSQLCounter returns the value of a counter metric from the server's + // SQL Executor. Runs in O(# of metrics) time, which is fine for test code. + MustGetSQLCounter(name string) int64 + // TODO(irfansharif): We'd benefit from an API to construct a *gosql.DB, or // better yet, a *sqlutils.SQLRunner. We use it all the time, constructing // it by hand each time.