diff --git a/pkg/bench/bench_test.go b/pkg/bench/bench_test.go index e51bc84db62e..df98f1594df8 100644 --- a/pkg/bench/bench_test.go +++ b/pkg/bench/bench_test.go @@ -506,7 +506,7 @@ func BenchmarkTracing(b *testing.B) { Tracer: tr, }, }) - sqlRunner := sqlutils.MakeRoundRobinSQLRunner(tc.Conns[0], tc.Conns[1], tc.Conns[2]) + sqlRunner := sqlutils.MakeRoundRobinSQLRunner(tc.ServerConn(0), tc.ServerConn(1), tc.ServerConn(2)) return sqlRunner, tc.Stopper() }, }, diff --git a/pkg/bench/foreachdb.go b/pkg/bench/foreachdb.go index b9617ac8c24d..9fcd4e8d0093 100644 --- a/pkg/bench/foreachdb.go +++ b/pkg/bench/foreachdb.go @@ -145,12 +145,12 @@ func benchmarkMultinodeCockroach(b *testing.B, f BenchmarkFn) { DefaultTestTenant: base.TestIsForStuffThatShouldWorkWithSecondaryTenantsButDoesntYet(83461), }, }) - if _, err := tc.Conns[0].Exec(`CREATE DATABASE bench`); err != nil { + if _, err := tc.ServerConn(0).Exec(`CREATE DATABASE bench`); err != nil { b.Fatal(err) } defer tc.Stopper().Stop(context.TODO()) - f(b, sqlutils.MakeRoundRobinSQLRunner(tc.Conns[0], tc.Conns[1], tc.Conns[2])) + f(b, sqlutils.MakeRoundRobinSQLRunner(tc.ServerConn(0), tc.ServerConn(1), tc.ServerConn(2))) } func benchmarkPostgres(b *testing.B, f BenchmarkFn) { diff --git a/pkg/ccl/backupccl/backup_tenant_test.go b/pkg/ccl/backupccl/backup_tenant_test.go index c727bdb02513..7672929e945c 100644 --- a/pkg/ccl/backupccl/backup_tenant_test.go +++ b/pkg/ccl/backupccl/backup_tenant_test.go @@ -45,7 +45,7 @@ func TestBackupTenantImportingTable(t *testing.T) { }, }) defer tc.Stopper().Stop(ctx) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) tenantID := roachpb.MustMakeTenantID(10) tSrv, tSQL := serverutils.StartTenant(t, tc.Server(0), base.TestTenantArgs{ diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index 1e131d84f23f..0dfa1e84ba9c 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -532,7 +532,7 @@ func TestBackupRestoreExecLocality(t *testing.T) { // Job exec relocation will return an error while it waits for resumption on a // matching node, but this makes for a slow test, so just send the job stmt to // the node which will not need to relocate the coordination, i.e. n3 or n4. - n3, n4 := sqlutils.MakeSQLRunner(tc.Conns[2]), sqlutils.MakeSQLRunner(tc.Conns[3]) + n3, n4 := sqlutils.MakeSQLRunner(tc.ServerConn(2)), sqlutils.MakeSQLRunner(tc.ServerConn(3)) t.Run("pin-top", func(t *testing.T) { ensureLeaseholder(t, sqlDB) @@ -742,10 +742,10 @@ func TestBackupRestoreAppend(t *testing.T) { // incremental backups that were appended to that backup. store, err := cloud.ExternalStorageFromURI(ctx, "userfile:///0", base.ExternalIODirConfig{}, - tc.Servers[0].ClusterSettings(), + tc.Server(0).ClusterSettings(), blobs.TestEmptyBlobClientFactory, username.RootUserName(), - tc.Servers[0].InternalDB().(isql.DB), + tc.Server(0).InternalDB().(isql.DB), nil, /* limiters */ cloud.NilMetrics, ) @@ -1003,7 +1003,7 @@ func backupAndRestore( numAccounts int, kmsURIs []string, ) { - conn := tc.Conns[0] + conn := tc.ServerConn(0) sqlDB := sqlutils.MakeSQLRunner(conn) storageConn := tc.SystemLayer(0).SQLConn(t, "") storageSQLDB := sqlutils.MakeSQLRunner(storageConn) @@ -1815,7 +1815,7 @@ func TestBackupRestoreResume(t *testing.T) { srv := tc.ApplicationLayer(0) codec := keys.MakeSQLCodec(srv.RPCContext().TenantID) clusterID := srv.RPCContext().LogicalClusterID.Get() - backupTableDesc := desctestutils.TestingGetPublicTableDescriptor(tc.Servers[0].DB(), codec, "data", "bank") + backupTableDesc := desctestutils.TestingGetPublicTableDescriptor(tc.Server(0).DB(), codec, "data", "bank") t.Run("backup", func(t *testing.T) { for _, item := range []struct { @@ -1855,7 +1855,7 @@ func TestBackupRestoreResume(t *testing.T) { createAndWaitForJob( t, sqlDB, []descpb.ID{backupTableDesc.GetID()}, jobspb.BackupDetails{ - EndTime: tc.Servers[0].Clock().Now(), + EndTime: tc.Server(0).Clock().Now(), URI: "nodelocal://1/backup" + "-" + item.testName, }, jobspb.BackupProgress{}, @@ -2950,7 +2950,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore everything to new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - db := sqlutils.MakeSQLRunner(tc.Conns[0]) + db := sqlutils.MakeSQLRunner(tc.ServerConn(0)) db.Exec(t, createStore) db.Exec(t, `RESTORE store.* FROM $1`, localFoo) @@ -2988,7 +2988,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore customers to new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - db := sqlutils.MakeSQLRunner(tc.Conns[0]) + db := sqlutils.MakeSQLRunner(tc.ServerConn(0)) db.Exec(t, createStore) db.Exec(t, `RESTORE store.customers, store.orders FROM $1`, localFoo) // Restore's Validate checks all the tables point to each other correctly. @@ -3006,7 +3006,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore orders to new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - db := sqlutils.MakeSQLRunner(tc.Conns[0]) + db := sqlutils.MakeSQLRunner(tc.ServerConn(0)) db.Exec(t, createStore) // FK validation of self-FK is preserved. @@ -3026,7 +3026,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore receipts to new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - db := sqlutils.MakeSQLRunner(tc.Conns[0]) + db := sqlutils.MakeSQLRunner(tc.ServerConn(0)) db.Exec(t, createStore) db.Exec(t, `RESTORE store.receipts FROM $1 WITH OPTIONS (skip_missing_foreign_keys)`, localFoo) // Restore's Validate checks all the tables point to each other correctly. @@ -3044,7 +3044,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore receipts and customers to new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - db := sqlutils.MakeSQLRunner(tc.Conns[0]) + db := sqlutils.MakeSQLRunner(tc.ServerConn(0)) db.Exec(t, createStore) db.Exec(t, `RESTORE store.receipts, store.customers FROM $1 WITH OPTIONS (skip_missing_foreign_keys)`, localFoo) // Restore's Validate checks all the tables point to each other correctly. @@ -3074,7 +3074,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore simple view", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - db := sqlutils.MakeSQLRunner(tc.Conns[0]) + db := sqlutils.MakeSQLRunner(tc.ServerConn(0)) db.Exec(t, createStore) db.ExpectErr( t, `cannot restore view "early_customers" without restoring referenced table`, @@ -3105,7 +3105,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore multi-table view", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - db := sqlutils.MakeSQLRunner(tc.Conns[0]) + db := sqlutils.MakeSQLRunner(tc.ServerConn(0)) db.ExpectErr( t, `cannot restore view "ordercounts" without restoring referenced table`, @@ -3164,7 +3164,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore and skip missing views", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - db := sqlutils.MakeSQLRunner(tc.Conns[0]) + db := sqlutils.MakeSQLRunner(tc.ServerConn(0)) // Test cases where, after filtering out views that can't be restored, there are no other tables to restore @@ -3284,7 +3284,7 @@ func TestBackupRestoreIncremental(t *testing.T) { { restoreTC := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer restoreTC.Stopper().Stop(context.Background()) - sqlDBRestore := sqlutils.MakeSQLRunner(restoreTC.Conns[0]) + sqlDBRestore := sqlutils.MakeSQLRunner(restoreTC.ServerConn(0)) sqlDBRestore.Exec(t, `CREATE DATABASE data`) sqlDBRestore.Exec(t, `CREATE TABLE data.bank (id INT PRIMARY KEY)`) @@ -3295,7 +3295,7 @@ func TestBackupRestoreIncremental(t *testing.T) { // generated by the same cluster. sqlDBRestore.ExpectErr( - t, fmt.Sprintf("belongs to cluster %s", tc.Servers[0].RPCContext().LogicalClusterID.Get()), + t, fmt.Sprintf("belongs to cluster %s", tc.Server(0).RPCContext().LogicalClusterID.Get()), `BACKUP TABLE data.bank TO $1 INCREMENTAL FROM $2`, "nodelocal://1/some-other-table", "nodelocal://1/0", ) @@ -3377,7 +3377,7 @@ func TestBackupRestorePartitionedIncremental(t *testing.T) { { restoreTC := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer restoreTC.Stopper().Stop(context.Background()) - sqlDBRestore := sqlutils.MakeSQLRunner(restoreTC.Conns[0]) + sqlDBRestore := sqlutils.MakeSQLRunner(restoreTC.ServerConn(0)) sqlDBRestore.Exec(t, `CREATE DATABASE data`) for i := len(defaultBackupDirs); i > 0; i-- { @@ -3462,7 +3462,7 @@ func TestBackupRestoreWithConcurrentWrites(t *testing.T) { for task := 0; task < numBackgroundTasks; task++ { taskNum := task _ = tc.Stopper().RunAsyncTask(ctx, "bg-task", func(context.Context) { - conn := tc.Conns[taskNum%len(tc.Conns)] + conn := tc.ServerConn(taskNum % tc.NumServers()) // Use different sql gateways to make sure leasing is right. if err := startBackgroundWrites(tc.Stopper(), conn, rows, bgActivity, &allowErrors); err != nil { t.Error(err) @@ -3557,7 +3557,7 @@ func TestBackupTenantsWithRevisionHistory(t *testing.T) { tc, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication) defer cleanupFn() - _, err := tc.Servers[0].StartTenant(ctx, base.TestTenantArgs{TenantID: roachpb.MustMakeTenantID(10)}) + _, err := tc.Server(0).StartTenant(ctx, base.TestTenantArgs{TenantID: roachpb.MustMakeTenantID(10)}) require.NoError(t, err) const msg = "can not backup tenants with revision history" @@ -4678,7 +4678,7 @@ func TestRestoredPrivileges(t *testing.T) { t.Run("into fresh db", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - sqlDBRestore := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDBRestore := sqlutils.MakeSQLRunner(tc.ServerConn(0)) sqlDBRestore.Exec(t, `CREATE DATABASE data`) sqlDBRestore.Exec(t, `RESTORE data.bank FROM $1`, localFoo) sqlDBRestore.CheckQueryResults(t, `SHOW GRANTS ON data.bank`, rootOnly) @@ -4687,7 +4687,7 @@ func TestRestoredPrivileges(t *testing.T) { t.Run("into db with added grants", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - sqlDBRestore := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDBRestore := sqlutils.MakeSQLRunner(tc.ServerConn(0)) sqlDBRestore.Exec(t, `CREATE DATABASE data`) sqlDBRestore.Exec(t, `CREATE USER someone`) sqlDBRestore.Exec(t, `USE data`) @@ -4699,7 +4699,7 @@ func TestRestoredPrivileges(t *testing.T) { t.Run("into db on db grants", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - sqlDBRestore := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDBRestore := sqlutils.MakeSQLRunner(tc.ServerConn(0)) sqlDBRestore.Exec(t, `CREATE USER someone`) sqlDBRestore.Exec(t, `RESTORE DATABASE data2 FROM $1`, localFoo) sqlDBRestore.CheckQueryResults(t, `SHOW GRANTS ON DATABASE data2`, data2Grants) @@ -4734,7 +4734,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) { const numAccounts = 1 tc, origDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication) defer cleanupFn() - args := base.TestServerArgs{ExternalIODir: tc.Servers[0].ClusterSettings().ExternalIODir} + args := base.TestServerArgs{ExternalIODir: tc.Server(0).ClusterSettings().ExternalIODir} for _, q := range []string{ `CREATE DATABASE d2`, @@ -4759,7 +4759,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) { t.Run("incomplete-db", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tcRestore.Stopper().Stop(context.Background()) - sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tcRestore.ServerConn(0)) sqlDB.Exec(t, `create database d5`) @@ -4794,14 +4794,14 @@ func TestRestoreDatabaseVersusTable(t *testing.T) { t.Run("db", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tcRestore.Stopper().Stop(context.Background()) - sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tcRestore.ServerConn(0)) sqlDB.Exec(t, `RESTORE DATABASE data, d2, d3 FROM $1`, localFoo) }) t.Run("db-exists", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tcRestore.Stopper().Stop(context.Background()) - sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tcRestore.ServerConn(0)) sqlDB.Exec(t, `CREATE DATABASE data`) sqlDB.ExpectErr(t, "already exists", `RESTORE DATABASE data FROM $1`, localFoo) @@ -4810,7 +4810,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) { t.Run("tables", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tcRestore.Stopper().Stop(context.Background()) - sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tcRestore.ServerConn(0)) sqlDB.Exec(t, `CREATE DATABASE data`) sqlDB.Exec(t, `RESTORE data.* FROM $1`, localFoo) @@ -4819,7 +4819,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) { t.Run("tables-needs-db", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tcRestore.Stopper().Stop(context.Background()) - sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tcRestore.ServerConn(0)) sqlDB.ExpectErr(t, "needs to exist", `RESTORE data.*, d4.* FROM $1`, localFoo) }) @@ -4827,7 +4827,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) { t.Run("into_db", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tcRestore.Stopper().Stop(context.Background()) - sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tcRestore.ServerConn(0)) sqlDB.ExpectErr( t, `cannot use "into_db"`, @@ -5228,7 +5228,7 @@ func TestBackupRestoreSequence(t *testing.T) { t.Run("restore both table & sequence to a new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - newDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + newDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) newDB.Exec(t, `RESTORE DATABASE data FROM $1`, backupLoc) newDB.Exec(t, `USE data`) @@ -5274,7 +5274,7 @@ func TestBackupRestoreSequence(t *testing.T) { t.Run("restore just the table to a new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - newDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + newDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) newDB.Exec(t, `CREATE DATABASE data`) newDB.Exec(t, `USE data`) @@ -5307,7 +5307,7 @@ func TestBackupRestoreSequence(t *testing.T) { t.Run("restore just the sequence to a new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - newDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + newDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) newDB.Exec(t, `CREATE DATABASE data`) newDB.Exec(t, `USE data`) @@ -5450,7 +5450,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - newDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + newDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) newDB.Exec(t, `RESTORE DATABASE d FROM $1`, backupLoc) @@ -5479,7 +5479,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - newDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + newDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) newDB.Exec(t, `CREATE DATABASE d`) newDB.Exec(t, `USE d`) @@ -5501,7 +5501,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - newDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + newDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) newDB.Exec(t, `CREATE DATABASE d`) newDB.Exec(t, `USE d`) @@ -5532,7 +5532,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - newDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + newDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) newDB.Exec(t, `CREATE DATABASE restore_db`) newDB.Exec(t, `RESTORE d.* FROM $1 WITH into_db='restore_db'`, backupLoc) @@ -5580,7 +5580,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - newDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + newDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) newDB.ExpectErr(t, "pq: cannot restore sequence \"seq\" without referenced owner|"+ "pq: cannot restore table \"t\" without referenced sequence", @@ -5624,7 +5624,7 @@ func TestBackupRestoreSequenceOwnership(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(context.Background()) - newDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + newDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) newDB.Exec(t, `RESTORE DATABASE d2, d3 FROM $1`, backupLocD2D3) @@ -7043,7 +7043,7 @@ func TestBackupRestoreTenant(t *testing.T) { }}, ) defer restoreTC.Stopper().Stop(ctx) - restoreDB := sqlutils.MakeSQLRunner(restoreTC.Conns[0]) + restoreDB := sqlutils.MakeSQLRunner(restoreTC.ServerConn(0)) restoreDB.CheckQueryResults(t, `select id, active, name, data_state, service_mode, crdb_internal.pb_to_json('cockroach.multitenant.ProtoInfo', info) from system.tenants`, [][]string{ { @@ -7209,7 +7209,7 @@ func TestBackupRestoreTenant(t *testing.T) { }}, ) defer restoreTC.Stopper().Stop(ctx) - restoreDB := sqlutils.MakeSQLRunner(restoreTC.Conns[0]) + restoreDB := sqlutils.MakeSQLRunner(restoreTC.ServerConn(0)) restoreDB.CheckQueryResults(t, `select id, active, name, data_state, service_mode, crdb_internal.pb_to_json('cockroach.multitenant.ProtoInfo', info) from system.tenants`, @@ -7271,7 +7271,7 @@ func TestBackupRestoreTenant(t *testing.T) { ) defer restoreTC.Stopper().Stop(ctx) - restoreDB := sqlutils.MakeSQLRunner(restoreTC.Conns[0]) + restoreDB := sqlutils.MakeSQLRunner(restoreTC.ServerConn(0)) restoreDB.CheckQueryResults(t, `select id, active, name, data_state, service_mode, crdb_internal.pb_to_json('cockroach.multitenant.ProtoInfo', info) from system.tenants`, @@ -7386,7 +7386,7 @@ func TestBackupRestoreTenant(t *testing.T) { }}, ) defer restoreTC.Stopper().Stop(ctx) - restoreDB := sqlutils.MakeSQLRunner(restoreTC.Conns[0]) + restoreDB := sqlutils.MakeSQLRunner(restoreTC.ServerConn(0)) restoreDB.Exec(t, `RESTORE TENANT 10 FROM 'nodelocal://1/t10' AS OF SYSTEM TIME `+ts1) @@ -7412,7 +7412,7 @@ func TestBackupRestoreTenant(t *testing.T) { }}, ) defer restoreTC.Stopper().Stop(ctx) - restoreDB := sqlutils.MakeSQLRunner(restoreTC.Conns[0]) + restoreDB := sqlutils.MakeSQLRunner(restoreTC.ServerConn(0)) restoreDB.Exec(t, `RESTORE TENANT 20 FROM 'nodelocal://1/t20'`) @@ -7652,7 +7652,8 @@ func TestRestoreTypeDescriptorsRollBack(t *testing.T) { tc, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication) defer cleanupFn() - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -7782,7 +7783,8 @@ func TestOfflineDescriptorsDuringRestore(t *testing.T) { defer cancel() kvDB := tc.Server(0).DB() - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor( jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -7877,7 +7879,8 @@ CREATE TYPE sc.typ AS ENUM ('hello'); defer cancel() kvDB := tc.Server(0).DB() - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -7985,7 +7988,8 @@ CREATE TYPE sc.typ AS ENUM ('hello'); defer cancel() kvDB := tc.Server(0).DB() - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -8118,7 +8122,8 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -8179,7 +8184,8 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -8242,7 +8248,8 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -8257,7 +8264,7 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) { // Use the same connection throughout (except for the concurrent RESTORE) to // preserve session variables. - conn, err := tc.Conns[0].Conn(ctx) + conn, err := tc.ServerConn(0).Conn(ctx) require.NoError(t, err) sqlDB := sqlutils.MakeSQLRunner(conn) @@ -8280,7 +8287,7 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) { afterPublishNotif, continueNotif := notifyAfterPublishing() g := ctxgroup.WithContext(ctx) g.GoCtx(func(ctx context.Context) error { - conn, err := tc.Conns[0].Conn(ctx) + conn, err := tc.ServerConn(0).Conn(ctx) require.NoError(t, err) _, err = conn.ExecContext(ctx, `RESTORE olddb.* FROM 'nodelocal://1/test/' WITH into_db='newdb'`) require.Regexp(t, "injected error", err) @@ -8310,7 +8317,8 @@ func TestCleanupDoesNotDeleteParentsWithChildObjects(t *testing.T) { ctx, cancel := context.WithCancel(ctx) defer cancel() - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -8617,7 +8625,7 @@ func TestRestoreJobEventLogging(t *testing.T) { defer cleanupFn() var forceFailure bool - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { tc.ApplicationLayer(i).JobRegistry().(*jobs.Registry).TestingWrapResumerConstructor( jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -8923,7 +8931,7 @@ func TestBackupWorkerFailure(t *testing.T) { tc, _, _, cleanup := backupRestoreTestSetupWithParams(t, multiNode, numAccounts, InitManualReplication, params) - conn := tc.Conns[0] + conn := tc.ServerConn(0) sqlDB := sqlutils.MakeSQLRunner(conn) defer cleanup() @@ -9074,10 +9082,10 @@ func TestRestorePauseOnError(t *testing.T) { defer cleanupFn() var forceFailure bool - for i := range tc.Servers { - jobRegistry := tc.Servers[i].JobRegistry() + for i := 0; i < tc.NumServers(); i++ { + jobRegistry := tc.Server(i).JobRegistry() if tc.StartedDefaultTestTenant() { - jobRegistry = tc.Servers[i].TestTenants()[0].JobRegistry() + jobRegistry = tc.Server(i).TestTenants()[0].JobRegistry() } jobRegistry.(*jobs.Registry).TestingWrapResumerConstructor( @@ -9286,7 +9294,7 @@ func TestGCDropIndexSpanExpansion(t *testing.T) { }, }}) defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) sqlRunner := sqlutils.MakeSQLRunner(conn) sqlRunner.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'`) // speeds up the test @@ -9333,7 +9341,8 @@ func TestRestoreSchemaDescriptorsRollBack(t *testing.T) { tc, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication) defer cleanupFn() - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -9466,7 +9475,7 @@ func TestExcludeDataFromBackupAndRestore(t *testing.T) { sqlDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled = true`) sqlDB.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'`) - conn := tc.Conns[0] + conn := tc.ServerConn(0) sqlDB.Exec(t, `CREATE TABLE data.foo (id INT, INDEX bar(id))`) sqlDB.Exec(t, `INSERT INTO data.foo select * from generate_series(1,10)`) @@ -9528,7 +9537,8 @@ func TestExportRequestBelowGCThresholdOnDataExcludedFromBackup(t *testing.T) { tc.WaitForNodeLiveness(t) require.NoError(t, tc.WaitForFullReplication()) - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeBackup, func(raw jobs.Resumer) jobs.Resumer { @@ -9721,7 +9731,7 @@ func TestProtectRestoreTargets(t *testing.T) { ctx := context.Background() if !tc.StartedDefaultTestTenant() { - _, err := tc.Servers[0].StartTenant(ctx, base.TestTenantArgs{TenantID: roachpb. + _, err := tc.Server(0).StartTenant(ctx, base.TestTenantArgs{TenantID: roachpb. MustMakeTenantID(10)}) require.NoError(t, err) } @@ -10451,7 +10461,7 @@ func TestBackupRestoreTelemetryEvents(t *testing.T) { defer cleanupFn() var forceFailure bool - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { tc.ApplicationLayer(i).JobRegistry().(*jobs.Registry).TestingWrapResumerConstructor( jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -10652,7 +10662,7 @@ func TestBackupDBWithViewOnAdjacentDBRange(t *testing.T) { tc, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, 0, InitManualReplication) defer cleanupFn() - s0 := tc.Servers[0] + s0 := tc.Server(0) // Speeds up the test. sqlDB.Exec(t, `SET CLUSTER SETTING kv.rangefeed.enabled = true`) @@ -10675,7 +10685,7 @@ func TestBackupDBWithViewOnAdjacentDBRange(t *testing.T) { `) // Wait for splits to be created on the new tables. - waitForTableSplit(t, tc.Conns[0], "t2", "da") + waitForTableSplit(t, tc.ServerConn(0), "t2", "da") sqlDB.Exec(t, `BACKUP DATABASE db INTO 'userfile:///a' WITH revision_history;`) diff --git a/pkg/ccl/backupccl/backupinfo/backup_metadata_test.go b/pkg/ccl/backupccl/backupinfo/backup_metadata_test.go index 4f8112b2d523..bc7c777289ad 100644 --- a/pkg/ccl/backupccl/backupinfo/backup_metadata_test.go +++ b/pkg/ccl/backupccl/backupinfo/backup_metadata_test.go @@ -77,7 +77,7 @@ func TestMetadataSST(t *testing.T) { // Check for correct backup metadata on tenant backups. userfile2 := "userfile:///2" - _, err := tc.Servers[0].StartTenant(ctx, base.TestTenantArgs{TenantID: roachpb.MustMakeTenantID(10)}) + _, err := tc.Server(0).StartTenant(ctx, base.TestTenantArgs{TenantID: roachpb.MustMakeTenantID(10)}) require.NoError(t, err) sqlDB.Exec(t, `BACKUP TENANT 10 TO $1`, userfile2) checkMetadata(ctx, t, tc, userfile2) @@ -90,10 +90,10 @@ func checkMetadata( ctx, backupLoc, base.ExternalIODirConfig{}, - tc.Servers[0].ClusterSettings(), + tc.Server(0).ClusterSettings(), blobs.TestEmptyBlobClientFactory, username.RootUserName(), - tc.Servers[0].InternalDB().(isql.DB), + tc.Server(0).InternalDB().(isql.DB), nil, /* limiters */ cloud.NilMetrics, ) @@ -105,7 +105,7 @@ func checkMetadata( t.Fatal(err) } - srv := tc.Servers[0] + srv := tc.Server(0) execCfg := srv.ExecutorConfig().(sql.ExecutorConfig) kmsEnv := backupencryption.MakeBackupKMSEnv(srv.ClusterSettings(), &base.ExternalIODirConfig{}, execCfg.InternalDB, username.RootUserName()) diff --git a/pkg/ccl/backupccl/backuprand/backup_rand_test.go b/pkg/ccl/backupccl/backuprand/backup_rand_test.go index 9ceefea4920a..8aa5aacec1ad 100644 --- a/pkg/ccl/backupccl/backuprand/backup_rand_test.go +++ b/pkg/ccl/backupccl/backuprand/backup_rand_test.go @@ -61,12 +61,12 @@ func TestBackupRestoreRandomDataRoundtrips(t *testing.T) { tc := testcluster.StartTestCluster(t, 1, params) defer tc.Stopper().Stop(ctx) tc.ToggleReplicateQueues(false) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) sqlDB.Exec(t, "CREATE DATABASE rand") setup := sqlsmith.Setups[sqlsmith.RandTableSetupName](rng) for _, stmt := range setup { - if _, err := tc.Conns[0].Exec(stmt); err != nil { + if _, err := tc.ServerConn(0).Exec(stmt); err != nil { t.Fatal(err) } } @@ -91,7 +91,7 @@ database_name = 'rand' AND schema_name = 'public'`) } // Note: we do not care how many rows successfully populate // the given table - if _, err := randgen.PopulateTableWithRandData(rng, tc.Conns[0], tableName, + if _, err := randgen.PopulateTableWithRandData(rng, tc.ServerConn(0), tableName, numInserts); err != nil { t.Fatal(err) } @@ -106,7 +106,7 @@ database_name = 'rand' AND schema_name = 'public'`) if runSchemaOnlyExtension == "" { var err error tableID := sqlutils.QueryTableID(t, sqlDB.DB, "rand", "public", tableName) - expectedData[tableName], err = fingerprintutils.FingerprintTable(ctx, tc.Conns[0], tableID, + expectedData[tableName], err = fingerprintutils.FingerprintTable(ctx, tc.ServerConn(0), tableID, fingerprintutils.Stripped()) require.NoError(t, err) } @@ -142,7 +142,7 @@ database_name = 'rand' AND schema_name = 'public'`) "SHOW CREATE %s not equal after RESTORE", tableName) if runSchemaOnlyExtension == "" { tableID := sqlutils.QueryTableID(t, sqlDB.DB, "restoredb", "public", tableName) - fingerpint, err := fingerprintutils.FingerprintTable(ctx, tc.Conns[0], tableID, + fingerpint, err := fingerprintutils.FingerprintTable(ctx, tc.ServerConn(0), tableID, fingerprintutils.Stripped()) require.NoError(t, err) require.Equal(t, expectedData[tableName], fingerpint) @@ -194,7 +194,7 @@ database_name = 'rand' AND schema_name = 'public'`) tables := buf.String() t.Logf("Testing subset backup/restore %s", tables) sqlDB.Exec(t, fmt.Sprintf(`BACKUP TABLE %s INTO $1`, tables), backupTarget) - _, err := tc.Conns[0].Exec( + _, err := tc.ServerConn(0).Exec( fmt.Sprintf("RESTORE TABLE %s FROM LATEST IN $1 WITH OPTIONS (into_db='restoredb' %s)", tables, runSchemaOnlyExtension), backupTarget) if err != nil { diff --git a/pkg/ccl/backupccl/backuptestutils/testutils.go b/pkg/ccl/backupccl/backuptestutils/testutils.go index 70a695f24576..2096e7f89db8 100644 --- a/pkg/ccl/backupccl/backuptestutils/testutils.go +++ b/pkg/ccl/backupccl/backuptestutils/testutils.go @@ -122,7 +122,7 @@ func StartBackupRestoreTestCluster( tc := testcluster.StartTestCluster(t, clusterSize, opts.testClusterArgs) opts.initFunc(tc) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) if opts.bankArgs != nil { const payloadSize = 100 @@ -157,7 +157,7 @@ func StartBackupRestoreTestCluster( } return tc, sqlDB, opts.dataDir, func() { - CheckForInvalidDescriptors(t, tc.Conns[0]) + CheckForInvalidDescriptors(t, tc.ServerConn(0)) tc.Stopper().Stop(ctx) // cleans up in memory storage's auxiliary dirs dirCleanupFunc() } diff --git a/pkg/ccl/backupccl/bench_test.go b/pkg/ccl/backupccl/bench_test.go index d917193d1a2e..18a2cd4e834b 100644 --- a/pkg/ccl/backupccl/bench_test.go +++ b/pkg/ccl/backupccl/bench_test.go @@ -127,10 +127,10 @@ func BenchmarkIteratorMemory(b *testing.B) { ctx, storeURI, base.ExternalIODirConfig{}, - tc.Servers[0].ClusterSettings(), + tc.Server(0).ClusterSettings(), blobs.TestEmptyBlobClientFactory, username.RootUserName(), - tc.Servers[0].InternalDB().(isql.DB), + tc.Server(0).InternalDB().(isql.DB), nil, /* limiters */ cloud.NilMetrics, ) diff --git a/pkg/ccl/backupccl/file_sst_sink_test.go b/pkg/ccl/backupccl/file_sst_sink_test.go index c4abb644ef32..45fdc34f87a0 100644 --- a/pkg/ccl/backupccl/file_sst_sink_test.go +++ b/pkg/ccl/backupccl/file_sst_sink_test.go @@ -860,10 +860,10 @@ func fileSSTSinkTestSetUp( ) (*fileSSTSink, cloud.ExternalStorage) { store, err := cloud.ExternalStorageFromURI(ctx, "userfile:///0", base.ExternalIODirConfig{}, - tc.Servers[0].ClusterSettings(), + tc.Server(0).ClusterSettings(), blobs.TestEmptyBlobClientFactory, username.RootUserName(), - tc.Servers[0].InternalDB().(isql.DB), + tc.Server(0).InternalDB().(isql.DB), nil, /* limiters */ cloud.NilMetrics, ) @@ -876,7 +876,7 @@ func fileSSTSinkTestSetUp( id: 1, enc: nil, progCh: progCh, - settings: &tc.Servers[0].ClusterSettings().SV, + settings: &tc.Server(0).ClusterSettings().SV, } sink := makeFileSSTSink(sinkConf, store) diff --git a/pkg/ccl/backupccl/full_cluster_backup_restore_test.go b/pkg/ccl/backupccl/full_cluster_backup_restore_test.go index c01d1a92cd54..a77df7b8fadf 100644 --- a/pkg/ccl/backupccl/full_cluster_backup_restore_test.go +++ b/pkg/ccl/backupccl/full_cluster_backup_restore_test.go @@ -87,7 +87,8 @@ func TestFullClusterBackup(t *testing.T) { allowProgressAfterPreRestore := make(chan struct{}) // Closed to signal the zones have been restored. restoredZones := make(chan struct{}) - for _, server := range tcRestore.Servers { + for i := 0; i < tcRestore.NumServers(); i++ { + server := tcRestore.Server(i) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -495,7 +496,8 @@ func TestClusterRestoreSystemTableOrdering(t *testing.T) { defer cleanupEmptyCluster() restoredSystemTables := make([]string, 0) - for _, server := range tcRestore.Servers { + for i := 0; i < tcRestore.NumServers(); i++ { + server := tcRestore.Server(i) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -712,7 +714,8 @@ func TestClusterRestoreFailCleanup(t *testing.T) { // Inject a retry error, that returns once. alreadyErrored := false - for _, server := range tcRestore.Servers { + for i := 0; i < tcRestore.NumServers(); i++ { + server := tcRestore.Server(i) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -744,7 +747,8 @@ func TestClusterRestoreFailCleanup(t *testing.T) { defer cleanupEmptyCluster() // Bugger the backup by injecting a failure while restoring the system data. - for _, server := range tcRestore.Servers { + for i := 0; i < tcRestore.NumServers(); i++ { + server := tcRestore.Server(i) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -787,7 +791,8 @@ func TestClusterRestoreFailCleanup(t *testing.T) { defer cleanupEmptyCluster() // Bugger the backup by injecting a failure while restoring the system data. - for _, server := range tcRestore.Servers { + for i := 0; i < tcRestore.NumServers(); i++ { + server := tcRestore.Server(i) registry := server.JobRegistry().(*jobs.Registry) registry.TestingWrapResumerConstructor(jobspb.TypeRestore, func(raw jobs.Resumer) jobs.Resumer { @@ -1065,7 +1070,7 @@ func TestClusterRevisionDoesNotBackupOptOutSystemTables(t *testing.T) { defer log.Scope(t).Close(t) tc, _, _, cleanup := backupRestoreTestSetup(t, singleNode, 10, InitManualReplication) - conn := tc.Conns[0] + conn := tc.ServerConn(0) sqlDB := sqlutils.MakeSQLRunner(conn) defer cleanup() diff --git a/pkg/ccl/backupccl/generative_split_and_scatter_processor_test.go b/pkg/ccl/backupccl/generative_split_and_scatter_processor_test.go index ad35d1ee9735..1a5c0c7ed774 100644 --- a/pkg/ccl/backupccl/generative_split_and_scatter_processor_test.go +++ b/pkg/ccl/backupccl/generative_split_and_scatter_processor_test.go @@ -87,7 +87,7 @@ func TestRunGenerativeSplitAndScatterContextCancel(t *testing.T) { uri := localFoo + "/" + backups[0][0] codec := keys.MakeSQLCodec(s0.RPCContext().TenantID) - backupTableDesc := desctestutils.TestingGetPublicTableDescriptor(tc.Servers[0].DB(), codec, "data", "bank") + backupTableDesc := desctestutils.TestingGetPublicTableDescriptor(tc.Server(0).DB(), codec, "data", "bank") backupStartKey := backupTableDesc.PrimaryIndexSpan(codec).Key spec := makeTestingGenerativeSplitAndScatterSpec( @@ -171,7 +171,7 @@ func TestRunGenerativeSplitAndScatterRandomizedDestOnFailScatter(t *testing.T) { uri := localFoo + "/" + backups[0][0] codec := keys.MakeSQLCodec(s0.RPCContext().TenantID) - backupTableDesc := desctestutils.TestingGetPublicTableDescriptor(tc.Servers[0].DB(), codec, "data", "bank") + backupTableDesc := desctestutils.TestingGetPublicTableDescriptor(tc.Server(0).DB(), codec, "data", "bank") backupStartKey := backupTableDesc.PrimaryIndexSpan(codec).Key spec := makeTestingGenerativeSplitAndScatterSpec( diff --git a/pkg/ccl/backupccl/restore_mid_schema_change_test.go b/pkg/ccl/backupccl/restore_mid_schema_change_test.go index 85b36f308434..4e20f922b370 100644 --- a/pkg/ccl/backupccl/restore_mid_schema_change_test.go +++ b/pkg/ccl/backupccl/restore_mid_schema_change_test.go @@ -220,7 +220,7 @@ func restoreMidSchemaChange( tc.Stopper().Stop(ctx) dirCleanupFn() }() - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) symlink := filepath.Join(dir, "foo") err := os.Symlink(backupDir, symlink) diff --git a/pkg/ccl/backupccl/restore_old_versions_test.go b/pkg/ccl/backupccl/restore_old_versions_test.go index 2f4d1511c9f2..ff5821fabd0e 100644 --- a/pkg/ccl/backupccl/restore_old_versions_test.go +++ b/pkg/ccl/backupccl/restore_old_versions_test.go @@ -148,7 +148,7 @@ func restoreOldVersionClusterTest(exportDir string) func(t *testing.T) { ExternalIODir: externalDir, }, }) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) defer func() { tc.Stopper().Stop(ctx) dirCleanup() diff --git a/pkg/ccl/backupccl/system_schema_test.go b/pkg/ccl/backupccl/system_schema_test.go index 3dd21579db35..c9b2afb30ac4 100644 --- a/pkg/ccl/backupccl/system_schema_test.go +++ b/pkg/ccl/backupccl/system_schema_test.go @@ -35,7 +35,7 @@ func TestAllSystemTablesHaveBackupConfig(t *testing.T) { DefaultTestTenant: base.TestControlsTenantsExplicitly, }}) defer tc.Stopper().Stop(ctx) - systemSQL := sqlutils.MakeSQLRunner(tc.Conns[0]) + systemSQL := sqlutils.MakeSQLRunner(tc.ServerConn(0)) _, tSQL := serverutils.StartTenant(t, tc.Server(0), base.TestTenantArgs{ TenantID: roachpb.MustMakeTenantID(10), diff --git a/pkg/ccl/backupccl/tenant_backup_nemesis_test.go b/pkg/ccl/backupccl/tenant_backup_nemesis_test.go index a66eb817d145..6d2861d7846c 100644 --- a/pkg/ccl/backupccl/tenant_backup_nemesis_test.go +++ b/pkg/ccl/backupccl/tenant_backup_nemesis_test.go @@ -73,7 +73,7 @@ func TestTenantBackupWithCanceledImport(t *testing.T) { hostSQLDB.Exec(t, "SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true") hostSQLDB.Exec(t, "ALTER TENANT ALL SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true") - tenant10, err := tc.Servers[0].StartTenant(ctx, base.TestTenantArgs{ + tenant10, err := tc.Server(0).StartTenant(ctx, base.TestTenantArgs{ TenantID: roachpb.MustMakeTenantID(10), TestingKnobs: base.TestingKnobs{ JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), @@ -102,7 +102,7 @@ func TestTenantBackupWithCanceledImport(t *testing.T) { hostSQLDB.Exec(t, "BACKUP TENANT 10 INTO LATEST IN 'nodelocal://1/tenant-backup'") hostSQLDB.Exec(t, "RESTORE TENANT 10 FROM LATEST IN 'nodelocal://1/tenant-backup' WITH virtual_cluster_name = 'tenant-11'") - tenant11, err := tc.Servers[0].StartTenant(ctx, base.TestTenantArgs{ + tenant11, err := tc.Server(0).StartTenant(ctx, base.TestTenantArgs{ TenantName: "tenant-11", DisableCreateTenant: true, }) @@ -147,7 +147,7 @@ func TestTenantBackupNemesis(t *testing.T) { hostSQLDB.Exec(t, "SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true") hostSQLDB.Exec(t, "ALTER TENANT ALL SET CLUSTER SETTING storage.mvcc.range_tombstones.enabled = true") - tenant10, err := tc.Servers[0].StartTenant(ctx, base.TestTenantArgs{ + tenant10, err := tc.Server(0).StartTenant(ctx, base.TestTenantArgs{ TenantID: roachpb.MustMakeTenantID(10), TestingKnobs: base.TestingKnobs{ JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals(), @@ -248,7 +248,7 @@ func TestTenantBackupNemesis(t *testing.T) { // // We check bank.bank which has had the workload running against it // and any table from a completed nemesis. - tenant11, err := tc.Servers[0].StartTenant(ctx, base.TestTenantArgs{ + tenant11, err := tc.Server(0).StartTenant(ctx, base.TestTenantArgs{ TenantName: "tenant-11", DisableCreateTenant: true, }) diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 26b815d0c999..d3e052232fbb 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -8000,7 +8000,7 @@ func TestChangefeedExecLocality(t *testing.T) { defer tc.Stopper().Stop(ctx) tc.ToggleReplicateQueues(false) - n2 := sqlutils.MakeSQLRunner(tc.Conns[1]) + n2 := sqlutils.MakeSQLRunner(tc.ServerConn(1)) // Setup a table with at least one range on each node to be sure we will see a // file from that node if it isn't excluded by filter. Relocate can fail with diff --git a/pkg/ccl/cloudccl/amazon/s3_connection_test.go b/pkg/ccl/cloudccl/amazon/s3_connection_test.go index 1c52461c0eb3..603174ba6fd0 100644 --- a/pkg/ccl/cloudccl/amazon/s3_connection_test.go +++ b/pkg/ccl/cloudccl/amazon/s3_connection_test.go @@ -45,7 +45,7 @@ func TestS3ExternalConnection(t *testing.T) { defer tc.Stopper().Stop(context.Background()) tc.WaitForNodeLiveness(t) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) // Setup some dummy data. sqlDB.Exec(t, `CREATE DATABASE foo`) @@ -198,7 +198,7 @@ func TestAWSKMSExternalConnection(t *testing.T) { defer tc.Stopper().Stop(context.Background()) tc.WaitForNodeLiveness(t) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) // Setup some dummy data. sqlDB.Exec(t, `CREATE DATABASE foo`) @@ -307,7 +307,7 @@ func TestAWSKMSExternalConnectionAssumeRole(t *testing.T) { defer tc.Stopper().Stop(context.Background()) tc.WaitForNodeLiveness(t) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) // Setup some dummy data. sqlDB.Exec(t, `CREATE DATABASE foo`) diff --git a/pkg/ccl/cloudccl/azure/azure_connection_test.go b/pkg/ccl/cloudccl/azure/azure_connection_test.go index fbf7887d93dd..651a739e3c49 100644 --- a/pkg/ccl/cloudccl/azure/azure_connection_test.go +++ b/pkg/ccl/cloudccl/azure/azure_connection_test.go @@ -71,7 +71,7 @@ func TestExternalConnections(t *testing.T) { defer tc.Stopper().Stop(context.Background()) tc.WaitForNodeLiveness(t) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) // Setup some dummy data. sqlDB.Exec(t, `CREATE DATABASE foo`) diff --git a/pkg/ccl/cloudccl/gcp/gcp_connection_test.go b/pkg/ccl/cloudccl/gcp/gcp_connection_test.go index 705af47b43e2..433cb7a08016 100644 --- a/pkg/ccl/cloudccl/gcp/gcp_connection_test.go +++ b/pkg/ccl/cloudccl/gcp/gcp_connection_test.go @@ -50,7 +50,7 @@ func TestGCPKMSExternalConnection(t *testing.T) { defer tc.Stopper().Stop(context.Background()) tc.WaitForNodeLiveness(t) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) // Setup some dummy data. sqlDB.Exec(t, `CREATE DATABASE foo`) @@ -178,7 +178,7 @@ func TestGCPKMSExternalConnectionAssumeRole(t *testing.T) { defer tc.Stopper().Stop(context.Background()) tc.WaitForNodeLiveness(t) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) // Setup some dummy data. sqlDB.Exec(t, `CREATE DATABASE foo`) @@ -305,7 +305,7 @@ func TestGCPAssumeRoleExternalConnection(t *testing.T) { defer tc.Stopper().Stop(context.Background()) tc.WaitForNodeLiveness(t) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) // Setup some dummy data. sqlDB.Exec(t, `CREATE DATABASE foo`) @@ -452,7 +452,7 @@ func TestGCPExternalConnection(t *testing.T) { defer tc.Stopper().Stop(context.Background()) tc.WaitForNodeLiveness(t) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) // Setup some dummy data. sqlDB.Exec(t, `CREATE DATABASE foo`) diff --git a/pkg/ccl/importerccl/ccl_test.go b/pkg/ccl/importerccl/ccl_test.go index 4eee609f25eb..8ad018fc2fd2 100644 --- a/pkg/ccl/importerccl/ccl_test.go +++ b/pkg/ccl/importerccl/ccl_test.go @@ -424,7 +424,7 @@ func TestImportInTenant(t *testing.T) { } tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ServerArgs: args}) defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) sqlDB := sqlutils.MakeSQLRunner(conn) // Setup a few tenants, each with a different table. diff --git a/pkg/ccl/kvccl/kvfollowerreadsccl/boundedstaleness_test.go b/pkg/ccl/kvccl/kvfollowerreadsccl/boundedstaleness_test.go index b0a06668a1a8..078443cc8bac 100644 --- a/pkg/ccl/kvccl/kvfollowerreadsccl/boundedstaleness_test.go +++ b/pkg/ccl/kvccl/kvfollowerreadsccl/boundedstaleness_test.go @@ -80,7 +80,7 @@ func TestBoundedStalenessEnterpriseLicense(t *testing.T) { t.Run("disabled", func(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.query, func(t *testing.T) { - _, err := tc.Conns[0].QueryContext(ctx, testCase.query, testCase.args...) + _, err := tc.ServerConn(0).QueryContext(ctx, testCase.query, testCase.args...) require.Error(t, err) require.Contains(t, err.Error(), "use of bounded staleness requires an enterprise license") }) @@ -91,7 +91,7 @@ func TestBoundedStalenessEnterpriseLicense(t *testing.T) { defer ccl.TestingEnableEnterprise()() for _, testCase := range testCases { t.Run(testCase.query, func(t *testing.T) { - r, err := tc.Conns[0].QueryContext(ctx, testCase.query, testCase.args...) + r, err := tc.ServerConn(0).QueryContext(ctx, testCase.query, testCase.args...) require.NoError(t, err) require.NoError(t, r.Close()) }) diff --git a/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go b/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go index fac1d07a97ad..2bc91046e711 100644 --- a/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go +++ b/pkg/ccl/kvccl/kvfollowerreadsccl/followerreads_test.go @@ -743,7 +743,7 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) { }) defer tc.Stopper().Stop(ctx) - n1 := sqlutils.MakeSQLRunner(tc.Conns[0]) + n1 := sqlutils.MakeSQLRunner(tc.ServerConn(0)) n1.Exec(t, `CREATE DATABASE t`) n1.Exec(t, `CREATE TABLE test (k INT PRIMARY KEY)`) n1.Exec(t, `ALTER TABLE test EXPERIMENTAL_RELOCATE VOTERS VALUES (ARRAY[1,2], 1)`) @@ -761,7 +761,7 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) { log.Infof(ctx, "test sleeping... done") // Run a query on n4 to populate its cache. - n4 := sqlutils.MakeSQLRunner(tc.Conns[3]) + n4 := sqlutils.MakeSQLRunner(tc.ServerConn(3)) n4.Exec(t, "SELECT * from test WHERE k=1") // Check that the cache was indeed populated. var tableID uint32 @@ -803,7 +803,7 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) { // Make a note of the follower reads metric on n3. We'll check that it was // incremented. var followerReadsCountBefore int64 - err := tc.Servers[2].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { + err := tc.Server(2).GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { followerReadsCountBefore = s.Metrics().FollowerReadsCount.Count() return nil }) @@ -820,7 +820,7 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) { // Check that the follower read metric was incremented. var followerReadsCountAfter int64 - err = tc.Servers[2].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { + err = tc.Server(2).GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { followerReadsCountAfter = s.Metrics().FollowerReadsCount.Count() return nil }) @@ -831,7 +831,7 @@ func TestFollowerReadsWithStaleDescriptor(t *testing.T) { // ranges" (#61313). // First, run a query on n3 to populate its cache. - n3 := sqlutils.MakeSQLRunner(tc.Conns[2]) + n3 := sqlutils.MakeSQLRunner(tc.ServerConn(2)) n3.Exec(t, "SELECT * from test WHERE k=1") n3Cache := tc.Server(2).DistSenderI().(*kvcoord.DistSender).RangeDescriptorCache() entry = n3Cache.GetCached(ctx, tablePrefix, false /* inverted */) @@ -958,7 +958,7 @@ func TestSecondaryTenantFollowerReadsRouting(t *testing.T) { // Speed up closing of timestamps in order to sleep less below before we can // use follower_read_timestamp(). Note that we need to override the setting // for the tenant as well, because the builtin is run in the tenant's sql pod. - systemSQL := sqlutils.MakeSQLRunner(tc.Conns[0]) + systemSQL := sqlutils.MakeSQLRunner(tc.ServerConn(0)) systemSQL.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '0.1s'`) systemSQL.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.side_transport_interval = '0.1s'`) systemSQL.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.propagation_slack = '0.1s'`) @@ -1024,8 +1024,8 @@ func TestSecondaryTenantFollowerReadsRouting(t *testing.T) { getFollowerReadCounts := func() [numNodes]int64 { var counts [numNodes]int64 - for i := range tc.Servers { - err := tc.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { + for i := 0; i < tc.NumServers(); i++ { + err := tc.Server(i).GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { counts[i] = s.Metrics().FollowerReadsCount.Count() return nil }) diff --git a/pkg/ccl/multiregionccl/cold_start_latency_test.go b/pkg/ccl/multiregionccl/cold_start_latency_test.go index 413602b73266..11de3a34b088 100644 --- a/pkg/ccl/multiregionccl/cold_start_latency_test.go +++ b/pkg/ccl/multiregionccl/cold_start_latency_test.go @@ -288,8 +288,9 @@ SELECT checkpoint > extract(epoch from after) // Wait for the configs to be applied. testutils.SucceedsWithin(t, func() error { - for _, server := range tc.Servers { - reporter := server.SpanConfigReporter().(spanconfig.Reporter) + for i := 0; i < tc.NumServers(); i++ { + s := tc.Server(i) + reporter := s.SpanConfigReporter().(spanconfig.Reporter) report, err := reporter.SpanConfigConformance(ctx, []roachpb.Span{ {Key: keys.TableDataMin, EndKey: keys.TenantTableDataMax}, }) diff --git a/pkg/ccl/multiregionccl/multiregion_system_table_test.go b/pkg/ccl/multiregionccl/multiregion_system_table_test.go index 943260386a7f..5d08499afab1 100644 --- a/pkg/ccl/multiregionccl/multiregion_system_table_test.go +++ b/pkg/ccl/multiregionccl/multiregion_system_table_test.go @@ -55,9 +55,9 @@ func TestMrSystemDatabase(t *testing.T) { tenantArgs := base.TestTenantArgs{ Settings: cs, TenantID: id, - Locality: *cluster.Servers[0].Locality(), + Locality: *cluster.Server(0).Locality(), } - _, tenantSQL := serverutils.StartTenant(t, cluster.Servers[0], tenantArgs) + _, tenantSQL := serverutils.StartTenant(t, cluster.Server(0), tenantArgs) tDB := sqlutils.MakeSQLRunner(tenantSQL) diff --git a/pkg/ccl/multiregionccl/regional_by_row_test.go b/pkg/ccl/multiregionccl/regional_by_row_test.go index de62460db00f..94483f23635c 100644 --- a/pkg/ccl/multiregionccl/regional_by_row_test.go +++ b/pkg/ccl/multiregionccl/regional_by_row_test.go @@ -922,7 +922,7 @@ func TestIndexDescriptorUpdateForImplicitColumns(t *testing.T) { tdb.Exec(t, `CREATE DATABASE test PRIMARY REGION "us-east1" REGIONS "us-east2"`) fetchIndexes := func(tableName string) []catalog.Index { - kvDB := c.Servers[0].DB() + kvDB := c.Server(0).DB() desc := desctestutils.TestingGetPublicTableDescriptor(kvDB, keys.SystemSQLCodec, "test", tableName) return desc.NonDropIndexes() } diff --git a/pkg/ccl/partitionccl/partition_test.go b/pkg/ccl/partitionccl/partition_test.go index aeb566c4b253..3f4030261a89 100644 --- a/pkg/ccl/partitionccl/partition_test.go +++ b/pkg/ccl/partitionccl/partition_test.go @@ -1191,14 +1191,14 @@ func setupPartitioningTestCluster( }} tc := testcluster.StartTestCluster(t, 3, tcArgs) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) sqlDB.Exec(t, `CREATE DATABASE data`) // Disabling store throttling vastly speeds up rebalancing. sqlDB.Exec(t, `SET CLUSTER SETTING server.declined_reservation_timeout = '0s'`) sqlDB.Exec(t, `SET CLUSTER SETTING server.failed_reservation_timeout = '0s'`) - return tc.Conns[0], sqlDB, func() { + return tc.ServerConn(0), sqlDB, func() { tc.Stopper().Stop(context.Background()) } } diff --git a/pkg/ccl/storageccl/external_sst_reader_test.go b/pkg/ccl/storageccl/external_sst_reader_test.go index d421d80a88fb..9a3d4134735c 100644 --- a/pkg/ccl/storageccl/external_sst_reader_test.go +++ b/pkg/ccl/storageccl/external_sst_reader_test.go @@ -122,7 +122,7 @@ func TestNewExternalSSTReader(t *testing.T) { clusterSettings, blobs.TestBlobServiceClient(tempDir), username.RootUserName(), - tc.Servers[0].InternalDB().(isql.DB), + tc.Server(0).InternalDB().(isql.DB), nil, /* limiters */ cloud.NilMetrics, ) diff --git a/pkg/ccl/streamingccl/streamingest/replication_random_client_test.go b/pkg/ccl/streamingccl/streamingest/replication_random_client_test.go index 734e399a573e..285ced1187ea 100644 --- a/pkg/ccl/streamingccl/streamingest/replication_random_client_test.go +++ b/pkg/ccl/streamingccl/streamingest/replication_random_client_test.go @@ -204,7 +204,7 @@ func TestStreamIngestionJobWithRandomClient(t *testing.T) { tc := testcluster.StartTestCluster(t, numNodes, params) defer tc.Stopper().Stop(ctx) sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) - conn := tc.Conns[0] + conn := tc.ServerConn(0) allowResponse = make(chan struct{}) receivedRevertRequest = make(chan struct{}) diff --git a/pkg/ccl/testccl/sqlstatsccl/sql_stats_test.go b/pkg/ccl/testccl/sqlstatsccl/sql_stats_test.go index 2eda6971ddbf..6625661e1b29 100644 --- a/pkg/ccl/testccl/sqlstatsccl/sql_stats_test.go +++ b/pkg/ccl/testccl/sqlstatsccl/sql_stats_test.go @@ -80,11 +80,11 @@ func TestSQLStatsRegions(t *testing.T) { serverArgs[i] = args } - host := testcluster.StartTestCluster(t, numServers, base.TestClusterArgs{ + tc := testcluster.StartTestCluster(t, numServers, base.TestClusterArgs{ ServerArgsPerNode: serverArgs, ParallelStart: true, }) - defer host.Stopper().Stop(ctx) + defer tc.Stopper().Stop(ctx) go func() { for _, c := range signalAfter { @@ -92,7 +92,7 @@ func TestSQLStatsRegions(t *testing.T) { } }() - tdb := sqlutils.MakeSQLRunner(host.ServerConn(1)) + tdb := sqlutils.MakeSQLRunner(tc.ServerConn(1)) // Shorten the closed timestamp target duration so that span configs // propagate more rapidly. @@ -116,7 +116,8 @@ func TestSQLStatsRegions(t *testing.T) { // Create secondary tenants var tenantDbs []*gosql.DB - for _, server := range host.Servers { + for i := 0; i < numServers; i++ { + server := tc.Server(i) _, tenantDb := serverutils.StartTenant(t, server, base.TestTenantArgs{ Settings: st, TenantID: roachpb.MustMakeTenantID(11), @@ -131,19 +132,19 @@ func TestSQLStatsRegions(t *testing.T) { tenantDbName := "testDbTenant" createMultiRegionDbAndTable(t, tenantRunner, regionNames, tenantDbName) - require.NoError(t, host.WaitForFullReplication()) + require.NoError(t, tc.WaitForFullReplication()) testCases := []struct { name string dbName string - db func(t *testing.T, host *testcluster.TestCluster, st *cluster.Settings) *sqlutils.SQLRunner + db func(t *testing.T, tc *testcluster.TestCluster, st *cluster.Settings) *sqlutils.SQLRunner }{{ // This test runs against the system tenant, opening a database // connection to the first node in the cluster. name: "system tenant", dbName: systemDbName, - db: func(t *testing.T, host *testcluster.TestCluster, _ *cluster.Settings) *sqlutils.SQLRunner { - return sqlutils.MakeSQLRunner(host.ServerConn(0)) + db: func(t *testing.T, tc *testcluster.TestCluster, _ *cluster.Settings) *sqlutils.SQLRunner { + return sqlutils.MakeSQLRunner(tc.ServerConn(0)) }, }, { // This test runs against a secondary tenant, launching a SQL instance @@ -151,13 +152,13 @@ func TestSQLStatsRegions(t *testing.T) { // connection to the first one. name: "secondary tenant", dbName: tenantDbName, - db: func(t *testing.T, host *testcluster.TestCluster, st *cluster.Settings) *sqlutils.SQLRunner { + db: func(t *testing.T, tc *testcluster.TestCluster, st *cluster.Settings) *sqlutils.SQLRunner { return tenantRunner }, }} - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - db := tc.db(t, host, st) + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + db := testCase.db(t, tc, st) db.Exec(t, `SET CLUSTER SETTING sql.txn_stats.sample_rate = 1;`) @@ -165,7 +166,7 @@ func TestSQLStatsRegions(t *testing.T) { testutils.SucceedsWithin(t, func() error { var expectedNodes []int64 var expectedRegions []string - _, err := db.DB.ExecContext(ctx, fmt.Sprintf(`USE %s`, tc.dbName)) + _, err := db.DB.ExecContext(ctx, fmt.Sprintf(`USE %s`, testCase.dbName)) if err != nil { return err } diff --git a/pkg/ccl/workloadccl/fixture_test.go b/pkg/ccl/workloadccl/fixture_test.go index 51faf9af9a4c..dfbd9e1457bf 100644 --- a/pkg/ccl/workloadccl/fixture_test.go +++ b/pkg/ccl/workloadccl/fixture_test.go @@ -267,7 +267,7 @@ func TestImportFixtureNodeCount(t *testing.T) { tc := testcluster.StartTestCluster(t, nodes, base.TestClusterArgs{}) defer tc.Stopper().Stop(ctx) - db := tc.Conns[0] + db := tc.ServerConn(0) sqlDB := sqlutils.MakeSQLRunner(db) gen := makeTestWorkload() diff --git a/pkg/cli/debug_recover_loss_of_quorum_test.go b/pkg/cli/debug_recover_loss_of_quorum_test.go index f5492ec2de8f..46f8605e754f 100644 --- a/pkg/cli/debug_recover_loss_of_quorum_test.go +++ b/pkg/cli/debug_recover_loss_of_quorum_test.go @@ -185,7 +185,7 @@ func TestLossOfQuorumRecovery(t *testing.T) { }, }) tcBefore.Start(t) - s := sqlutils.MakeSQLRunner(tcBefore.Conns[0]) + s := sqlutils.MakeSQLRunner(tcBefore.ServerConn(0)) s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'") defer tcBefore.Stopper().Stop(ctx) @@ -201,7 +201,7 @@ func TestLossOfQuorumRecovery(t *testing.T) { createIntentOnRangeDescriptor(ctx, t, tcBefore, sk) - node1ID := tcBefore.Servers[0].NodeID() + node1ID := tcBefore.Server(0).NodeID() // Now that stores are prepared and replicated we can shut down cluster // and perform store manipulations. tcBefore.Stopper().Stop(ctx) @@ -264,8 +264,8 @@ func TestLossOfQuorumRecovery(t *testing.T) { []roachpb.NodeID{roachpb.NodeID(2), roachpb.NodeID(3)}, tcAfter.Server(0).NodeID()), "Failed to decommission removed nodes") - for i := 0; i < len(tcAfter.Servers); i++ { - require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { + for i := 0; i < tcAfter.NumServers(); i++ { + require.NoError(t, tcAfter.Server(i).GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.SetReplicateQueueActive(true) return nil }), "Failed to activate replication queue") @@ -274,15 +274,15 @@ func TestLossOfQuorumRecovery(t *testing.T) { "Failed to ensure zone configs are propagated") require.NoError(t, tcAfter.WaitForFullReplication(), "Failed to perform full replication") - for i := 0; i < len(tcAfter.Servers); i++ { - require.NoError(t, tcAfter.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { + for i := 0; i < tcAfter.NumServers(); i++ { + require.NoError(t, tcAfter.Server(i).GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceConsistencyQueueProcess() }), "Failed to force replicas to consistency queue") } // As a validation step we will just pick one range and get its replicas to see // if they were up-replicated to the new nodes. - s = sqlutils.MakeSQLRunner(tcAfter.Conns[0]) + s = sqlutils.MakeSQLRunner(tcAfter.ServerConn(0)) r := s.QueryRow(t, "select replicas from crdb_internal.ranges limit 1") var replicas string r.Scan(&replicas) @@ -391,7 +391,7 @@ func TestStageVersionCheck(t *testing.T) { func createIntentOnRangeDescriptor( ctx context.Context, t *testing.T, tcBefore *testcluster.TestCluster, sk roachpb.Key, ) { - txn := kv.NewTxn(ctx, tcBefore.Servers[0].DB(), 1) + txn := kv.NewTxn(ctx, tcBefore.Server(0).DB(), 1) var desc roachpb.RangeDescriptor // Pick one of the predefined split points. rdKey := keys.RangeDescriptorKey(roachpb.RKey(sk)) @@ -467,7 +467,7 @@ func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) { ServerArgsPerNode: sa, }) tc.Start(t) - s := sqlutils.MakeSQLRunner(tc.Conns[0]) + s := sqlutils.MakeSQLRunner(tc.ServerConn(0)) s.Exec(t, "set cluster setting cluster.organization='remove dead replicas test'") defer tc.Stopper().Stop(ctx) @@ -483,7 +483,7 @@ func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) { createIntentOnRangeDescriptor(ctx, t, tc, sk) - node1ID := tc.Servers[0].NodeID() + node1ID := tc.Server(0).NodeID() // Now that stores are prepared and replicated we can shut down cluster // and perform store manipulations. @@ -548,7 +548,7 @@ func TestHalfOnlineLossOfQuorumRecovery(t *testing.T) { // This is caused by liveness range becoming unavailable and preventing any // progress. So it is likely that test will timeout if basic workflow fails. require.NoError(t, tc.RestartServer(0), "restart failed") - s = sqlutils.MakeSQLRunner(tc.Conns[0]) + s = sqlutils.MakeSQLRunner(tc.ServerConn(0)) // Verifying that post start cleanup performed node decommissioning that // prevents old nodes from rejoining. diff --git a/pkg/kv/kvserver/batcheval/cmd_export_test.go b/pkg/kv/kvserver/batcheval/cmd_export_test.go index cb678f0ca05c..24c54acde16b 100644 --- a/pkg/kv/kvserver/batcheval/cmd_export_test.go +++ b/pkg/kv/kvserver/batcheval/cmd_export_test.go @@ -172,7 +172,7 @@ func TestExportCmd(t *testing.T) { "unexpected ResumeReason in latest export") } - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) sqlDB.Exec(t, `CREATE DATABASE mvcclatest`) sqlDB.Exec(t, `CREATE TABLE mvcclatest.export (id INT PRIMARY KEY, value INT)`) tableID := descpb.ID(sqlutils.QueryTableID( @@ -455,7 +455,7 @@ func TestExportRequestWithCPULimitResumeSpans(t *testing.T) { defer tc.Stopper().Stop(context.Background()) s := tc.ApplicationLayer(0) - sqlDB := tc.Conns[0] + sqlDB := tc.ServerConn(0) db := sqlutils.MakeSQLRunner(sqlDB) execCfg := s.ExecutorConfig().(sql.ExecutorConfig) kvDB := s.DB() diff --git a/pkg/kv/kvserver/client_atomic_membership_change_test.go b/pkg/kv/kvserver/client_atomic_membership_change_test.go index c68dba366711..b90aae377332 100644 --- a/pkg/kv/kvserver/client_atomic_membership_change_test.go +++ b/pkg/kv/kvserver/client_atomic_membership_change_test.go @@ -59,7 +59,7 @@ func TestAtomicReplicationChange(t *testing.T) { runChange := func(expDesc roachpb.RangeDescriptor, chgs []kvpb.ReplicationChange) roachpb.RangeDescriptor { t.Helper() - desc, err := tc.Servers[0].DB().AdminChangeReplicas(ctx, k, expDesc, chgs) + desc, err := tc.Server(0).DB().AdminChangeReplicas(ctx, k, expDesc, chgs) require.NoError(t, err) return *desc @@ -68,7 +68,8 @@ func TestAtomicReplicationChange(t *testing.T) { checkDesc := func(desc roachpb.RangeDescriptor, expStores ...roachpb.StoreID) { testutils.SucceedsSoon(t, func() error { var sawStores []roachpb.StoreID - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) r, _, _ := s.GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, desc.RangeID) if r == nil { continue diff --git a/pkg/kv/kvserver/client_decommission_test.go b/pkg/kv/kvserver/client_decommission_test.go index f350e39f4d0c..a3aebfb8acdf 100644 --- a/pkg/kv/kvserver/client_decommission_test.go +++ b/pkg/kv/kvserver/client_decommission_test.go @@ -71,7 +71,7 @@ func TestDecommission(t *testing.T) { attempt++ desc := tc.LookupRangeOrFatal(t, k) for _, rDesc := range desc.Replicas().VoterDescriptors() { - store, err := tc.Servers[int(rDesc.NodeID-1)].GetStores().(*kvserver.Stores).GetStore(rDesc.StoreID) + store, err := tc.Server(int(rDesc.NodeID-1)).GetStores().(*kvserver.Stores).GetStore(rDesc.StoreID) require.NoError(t, err) if err := store.ForceReplicationScanAndProcess(); err != nil { return err diff --git a/pkg/kv/kvserver/client_lease_test.go b/pkg/kv/kvserver/client_lease_test.go index 910a0288d11d..bcad32e15461 100644 --- a/pkg/kv/kvserver/client_lease_test.go +++ b/pkg/kv/kvserver/client_lease_test.go @@ -128,7 +128,8 @@ func TestGossipNodeLivenessOnLeaseChange(t *testing.T) { // Turn off liveness heartbeats on all nodes to ensure that updates to node // liveness are not triggering gossiping. - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) pErr := s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.GetStoreConfig().NodeLiveness.PauseHeartbeatLoopForTest() return nil @@ -141,7 +142,8 @@ func TestGossipNodeLivenessOnLeaseChange(t *testing.T) { nodeLivenessKey := gossip.MakeNodeLivenessKey(1) initialServerId := -1 - for i, s := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { + s := tc.Server(i) pErr := s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { if store.Gossip().InfoOriginatedHere(nodeLivenessKey) { initialServerId = i @@ -617,11 +619,11 @@ func TestStoreLeaseTransferTimestampCacheRead(t *testing.T) { manualClock.Pause() // Write a key. - _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), incrementArgs(key, 1)) + _, pErr := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), incrementArgs(key, 1)) require.Nil(t, pErr) // Determine when to read. - readTS := tc.Servers[0].Clock().Now() + readTS := tc.Server(0).Clock().Now() if futureRead { readTS = readTS.Add(500*time.Millisecond.Nanoseconds(), 0).WithSynthetic(true) } @@ -631,7 +633,7 @@ func TestStoreLeaseTransferTimestampCacheRead(t *testing.T) { ba := &kvpb.BatchRequest{} ba.Timestamp = readTS ba.Add(getArgs(key)) - br, pErr := tc.Servers[0].DistSenderI().(kv.Sender).Send(ctx, ba) + br, pErr := tc.Server(0).DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) require.Equal(t, readTS, br.Timestamp) v, err := br.Responses[0].GetGet().Value.GetInt() @@ -649,7 +651,7 @@ func TestStoreLeaseTransferTimestampCacheRead(t *testing.T) { ba = &kvpb.BatchRequest{} ba.Timestamp = readTS ba.Add(incrementArgs(key, 1)) - br, pErr = tc.Servers[0].DistSenderI().(kv.Sender).Send(ctx, ba) + br, pErr = tc.Server(0).DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) require.NotEqual(t, readTS, br.Timestamp) require.True(t, readTS.Less(br.Timestamp)) @@ -676,7 +678,7 @@ func TestStoreLeaseTransferTimestampCacheTxnRecord(t *testing.T) { // Start a txn and perform a write, so that a txn record has to be created by // the EndTxn. - txn := tc.Servers[0].DB().NewTxn(ctx, "test") + txn := tc.Server(0).DB().NewTxn(ctx, "test") require.NoError(t, txn.Put(ctx, "a", "val")) // After starting the transaction, transfer the lease. This will wipe the // timestamp cache, which means that the txn record will not be able to be @@ -845,7 +847,7 @@ func TestLeaseholderRelocate(t *testing.T) { testutils.SucceedsSoon(t, func() error { // Relocate range 3 -> 4. - err = tc.Servers[2].DB(). + err = tc.Server(2).DB(). AdminRelocateRange( context.Background(), rhsDesc.StartKey.AsRawKey(), tc.Targets(0, 1, 3), nil, false) @@ -888,13 +890,13 @@ func TestLeaseholderRelocate(t *testing.T) { } func gossipLiveness(t *testing.T, tc *testcluster.TestCluster) { - for i := range tc.Servers { - testutils.SucceedsSoon(t, tc.Servers[i].HeartbeatNodeLiveness) + for i := 0; i < tc.NumServers(); i++ { + testutils.SucceedsSoon(t, tc.Server(i).HeartbeatNodeLiveness) } // Make sure that all store pools have seen liveness heartbeats from everyone. testutils.SucceedsSoon(t, func() error { - for i := range tc.Servers { - for j := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { + for j := 0; j < tc.NumServers(); j++ { live, err := tc.GetFirstStoreFromServer(t, i).GetStoreConfig(). StorePool.IsLive(tc.Target(j).StoreID) if err != nil { @@ -997,7 +999,7 @@ func TestLeasePreferencesDuringOutage(t *testing.T) { // allocator on server 0 may see everyone as temporarily dead due to the // clock move above. for _, i := range []int{0, 3, 4} { - require.NoError(t, tc.Servers[i].HeartbeatNodeLiveness()) + require.NoError(t, tc.Server(i).HeartbeatNodeLiveness()) require.NoError(t, tc.GetFirstStoreFromServer(t, i).GossipStore(ctx, true)) } } @@ -1180,7 +1182,7 @@ func TestLeasesDontThrashWhenNodeBecomesSuspect(t *testing.T) { var repl *kvserver.Replica for _, i := range []int{2, 3} { repl = tc.GetFirstStoreFromServer(t, i).LookupReplica(roachpb.RKey(key)) - if repl.OwnsValidLease(ctx, tc.Servers[i].Clock().NowAsClockTimestamp()) { + if repl.OwnsValidLease(ctx, tc.Server(i).Clock().NowAsClockTimestamp()) { return nil } } @@ -1198,8 +1200,8 @@ func TestLeasesDontThrashWhenNodeBecomesSuspect(t *testing.T) { // Make sure that all store pools have seen liveness heartbeats from everyone. testutils.SucceedsSoon(t, func() error { - for i := range tc.Servers { - for j := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { + for j := 0; j < tc.NumServers(); j++ { live, err := tc.GetFirstStoreFromServer(t, i).GetStoreConfig().StorePool.IsLive(tc.Target(j).StoreID) if err != nil { return err @@ -1216,7 +1218,7 @@ func TestLeasesDontThrashWhenNodeBecomesSuspect(t *testing.T) { repl := tc.GetFirstStoreFromServer(t, 1).LookupReplica(roachpb.RKey(key)) tc.TransferRangeLeaseOrFatal(t, *repl.Desc(), tc.Target(1)) testutils.SucceedsSoon(t, func() error { - if !repl.OwnsValidLease(ctx, tc.Servers[1].Clock().NowAsClockTimestamp()) { + if !repl.OwnsValidLease(ctx, tc.Server(1).Clock().NowAsClockTimestamp()) { return errors.Errorf("Expected lease to transfer to server 1 for replica %s", repl) } return nil @@ -1225,7 +1227,7 @@ func TestLeasesDontThrashWhenNodeBecomesSuspect(t *testing.T) { heartbeat := func(servers ...int) { for _, i := range servers { - testutils.SucceedsSoon(t, tc.Servers[i].HeartbeatNodeLiveness) + testutils.SucceedsSoon(t, tc.Server(i).HeartbeatNodeLiveness) } } @@ -1297,7 +1299,7 @@ func TestLeasesDontThrashWhenNodeBecomesSuspect(t *testing.T) { } for _, key := range startKeys { repl := tc.GetFirstStoreFromServer(t, 1).LookupReplica(roachpb.RKey(key)) - if repl.OwnsValidLease(ctx, tc.Servers[1].Clock().NowAsClockTimestamp()) { + if repl.OwnsValidLease(ctx, tc.Server(1).Clock().NowAsClockTimestamp()) { return nil } } @@ -1333,7 +1335,7 @@ func TestAlterRangeRelocate(t *testing.T) { require.NoError(t, err) testutils.SucceedsSoon(t, func() error { repl := tc.GetFirstStoreFromServer(t, 3).LookupReplica(rhsDesc.StartKey) - if !repl.OwnsValidLease(ctx, tc.Servers[0].Clock().NowAsClockTimestamp()) { + if !repl.OwnsValidLease(ctx, tc.Server(0).Clock().NowAsClockTimestamp()) { return errors.Errorf("Expected lease to transfer to node 4") } // Do this to avoid snapshot problems below when we do another replica move. @@ -1585,7 +1587,7 @@ func TestLeaseUpgradeVersionGate(t *testing.T) { }) // Enable the version gate. - _, err := tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err := tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.TODODelete_V22_2EnableLeaseUpgrade).String()) require.NoError(t, err) diff --git a/pkg/kv/kvserver/client_merge_test.go b/pkg/kv/kvserver/client_merge_test.go index 85f9044aad33..114208541354 100644 --- a/pkg/kv/kvserver/client_merge_test.go +++ b/pkg/kv/kvserver/client_merge_test.go @@ -546,7 +546,7 @@ func mergeCheckingTimestampCaches( if err != nil { return err } - if !rhsRepl.OwnsValidLease(ctx, tc.Servers[1].Clock().NowAsClockTimestamp()) { + if !rhsRepl.OwnsValidLease(ctx, tc.Server(1).Clock().NowAsClockTimestamp()) { return errors.New("rhs store does not own valid lease for rhs range") } if rhsRepl.CurrentLeaseStatus(ctx).Lease.Type() != roachpb.LeaseEpoch { @@ -564,7 +564,7 @@ func mergeCheckingTimestampCaches( t.Fatal(pErr) } - readTS := tc.Servers[0].Clock().Now() + readTS := tc.Server(0).Clock().Now() if futureRead { readTS = readTS.Add(500*time.Millisecond.Nanoseconds(), 0).WithSynthetic(true) } @@ -636,7 +636,7 @@ func mergeCheckingTimestampCaches( lhsKey := scratchKey("a") var lhsStores []*kvserver.Store var lhsRepls []*kvserver.Replica - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { s := tc.GetFirstStoreFromServer(t, i) r := s.LookupReplica(roachpb.RKey(lhsKey)) lhsStores = append(lhsStores, s) @@ -698,7 +698,7 @@ func mergeCheckingTimestampCaches( } else { funcs = partitionedLeaderFuncs } - tc.Servers[i].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(s.StoreID(), &unreliableRaftHandler{ + tc.Server(i).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(s.StoreID(), &unreliableRaftHandler{ rangeID: lhsDesc.GetRangeID(), IncomingRaftMessageHandler: s, unreliableRaftHandlerFuncs: funcs, @@ -810,7 +810,7 @@ func mergeCheckingTimestampCaches( } else { h = s } - tc.Servers[i].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(s.StoreID(), h) + tc.Server(i).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(s.StoreID(), h) } close(filterMu.blockHBAndGCs) filterMu.Lock() @@ -870,7 +870,7 @@ func mergeCheckingTimestampCaches( // whether the LHS leaseholder learned about the merge through Raft log // application or a Raft snapshot. Either way though, the transaction should // not be allowed to create its record. - hb, hbH := heartbeatArgs(&pushee, tc.Servers[0].Clock().Now()) + hb, hbH := heartbeatArgs(&pushee, tc.Server(0).Clock().Now()) ba = &kvpb.BatchRequest{} ba.Header = hbH ba.RangeID = lhsDesc.RangeID @@ -963,7 +963,7 @@ func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) { }, }) defer tc.Stopper().Stop(context.Background()) - distSender := tc.Servers[0].DistSenderI().(kv.Sender) + distSender := tc.Server(0).DistSenderI().(kv.Sender) for _, key := range []roachpb.Key{scratchKey("a"), scratchKey("b")} { if _, pErr := kv.SendWrapped(ctx, distSender, adminSplitArgs(key)); pErr != nil { @@ -1015,7 +1015,7 @@ func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) { if err != nil { return err } - if !lhsRepl1.OwnsValidLease(ctx, tc.Servers[1].Clock().NowAsClockTimestamp()) { + if !lhsRepl1.OwnsValidLease(ctx, tc.Server(1).Clock().NowAsClockTimestamp()) { return errors.New("s2 does not own valid lease for lhs range") } if !kvserver.ExpirationLeasesOnly.Get(&tc.Server(0).ClusterSettings().SV) { // metamorphic @@ -1212,7 +1212,7 @@ func TestStoreRangeMergeTxnRefresh(t *testing.T) { store := tc.GetFirstStoreFromServer(t, 0) // Create the ranges to be merged. - lhsDesc, _, err := tc.Servers[0].ScratchRangeEx() + lhsDesc, _, err := tc.Server(0).ScratchRangeEx() require.NoError(t, err) // Launch the merge. @@ -1382,7 +1382,7 @@ func TestStoreRangeMergeStats(t *testing.T) { require.NoError(t, err) // Merged stats should agree with recomputation. - nowNanos := tc.Servers[0].Clock().Now().WallTime + nowNanos := tc.Server(0).Clock().Now().WallTime msMerged.AgeTo(nowNanos) assertRecomputedStats(t, "merged range", snap, replMerged.Desc(), msMerged, nowNanos) } @@ -1620,7 +1620,7 @@ func TestStoreRangeMergeSplitRace_MergeWins(t *testing.T) { scratch := tc.ScratchRange(t) store := tc.GetFirstStoreFromServer(t, 0) - distSender := tc.Servers[0].DistSenderI().(kv.Sender) + distSender := tc.Server(0).DistSenderI().(kv.Sender) lhsDesc, rhsDesc, err := createSplitRanges(ctx, scratch, store) if err != nil { @@ -1722,7 +1722,7 @@ func TestStoreRangeMergeSplitRace_SplitWins(t *testing.T) { defer tc.Stopper().Stop(context.Background()) scratch := tc.ScratchRange(t) store := tc.GetFirstStoreFromServer(t, 0) - distSender = tc.Servers[0].DistSenderI().(kv.Sender) + distSender = tc.Server(0).DistSenderI().(kv.Sender) lhsDesc, _, err := createSplitRanges(ctx, scratch, store) if err != nil { @@ -1829,7 +1829,7 @@ func TestStoreRangeMergeRHSLeaseExpiration(t *testing.T) { // the second store the lease on the RHS. The LHS is largely irrelevant. What // matters is that the RHS exists on two stores so we can transfer its lease // during the merge. - lhsDesc, rhsDesc, err := tc.Servers[0].ScratchRangeWithExpirationLeaseEx() + lhsDesc, rhsDesc, err := tc.Server(0).ScratchRangeWithExpirationLeaseEx() require.NoError(t, err) rhsSentinel = rhsDesc.StartKey.AsRawKey() @@ -1866,7 +1866,7 @@ func TestStoreRangeMergeRHSLeaseExpiration(t *testing.T) { // We heartbeat the merge's transaction record with a timestamp forwarded by // the duration we plan to advance the clock by so that the transaction does // not look expired even after the manual clock update. - afterAdvance := tc.Servers[0].Clock().Now().Add(toAdvance, 0) + afterAdvance := tc.Server(0).Clock().Now().Add(toAdvance, 0) hb, hbH := heartbeatArgs(mergeTxn, afterAdvance) if _, pErr := kv.SendWrappedWith(ctx, store.TestSender(), hbH, hb); pErr != nil { t.Fatal(pErr) @@ -2001,7 +2001,7 @@ func TestStoreRangeMergeRHSLeaseTransfers(t *testing.T) { // the second store the lease on the RHS. The LHS is largely irrelevant. What // matters is that the RHS exists on two stores so we can transfer its lease // during the merge. - lhsDesc, rhsDesc, err := tc.Servers[0].ScratchRangeWithExpirationLeaseEx() + lhsDesc, rhsDesc, err := tc.Server(0).ScratchRangeWithExpirationLeaseEx() require.NoError(t, err) tc.AddVotersOrFatal(t, lhsDesc.StartKey.AsRawKey(), tc.Target(1)) @@ -2096,7 +2096,7 @@ func TestStoreRangeMergeLHSLeaseTransfersAfterFreezeTime(t *testing.T) { // first store the lease on the LHS and the second store the lease on the // RHS. Before the merge completes, we'll transfer the LHS's lease to the // second store so that the two leaseholders are collocated. - lhsDesc, rhsDesc, err := tc.Servers[0].ScratchRangeEx() + lhsDesc, rhsDesc, err := tc.Server(0).ScratchRangeEx() require.NoError(t, err) tc.AddVotersOrFatal(t, lhsDesc.StartKey.AsRawKey(), tc.Target(1)) @@ -2108,7 +2108,7 @@ func TestStoreRangeMergeLHSLeaseTransfersAfterFreezeTime(t *testing.T) { mergeErr := make(chan error, 1) _ = tc.Stopper().RunAsyncTask(ctx, "merge", func(context.Context) { args := adminMergeArgs(lhsDesc.StartKey.AsRawKey()) - _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), args) + _, pErr := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), args) mergeErr <- pErr.GoError() }) @@ -2128,7 +2128,7 @@ func TestStoreRangeMergeLHSLeaseTransfersAfterFreezeTime(t *testing.T) { lhsLeaseholder := store1.LookupReplica(lhsDesc.StartKey) testutils.SucceedsSoon(t, func() error { // Wait for the new leaseholder to notice that it received the lease. - now := tc.Servers[1].Clock().NowAsClockTimestamp() + now := tc.Server(1).Clock().NowAsClockTimestamp() if !lhsLeaseholder.OwnsValidLease(ctx, now) { return errors.New("not leaseholder") } @@ -2148,7 +2148,7 @@ func TestStoreRangeMergeLHSLeaseTransfersAfterFreezeTime(t *testing.T) { ba.Timestamp = lhsClosedTS.Prev() ba.RangeID = lhsDesc.RangeID ba.Add(incrementArgs(rhsDesc.StartKey.AsRawKey().Next(), 1)) - br, pErr := tc.Servers[1].DistSenderI().(kv.Sender).Send(ctx, ba) + br, pErr := tc.Server(1).DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) require.NotEqual(t, ba.Timestamp, br.Timestamp, "write timestamp not bumped") require.True(t, lhsClosedTS.Less(br.Timestamp), "write timestamp not bumped above closed timestamp") @@ -2467,13 +2467,13 @@ func TestStoreReplicaGCAfterMerge(t *testing.T) { } transport := kvserver.NewRaftTransport( - tc.Servers[0].AmbientCtx(), + tc.Server(0).AmbientCtx(), cluster.MakeTestingClusterSettings(), - tc.Servers[0].AmbientCtx().Tracer, - nodedialer.New(tc.Servers[0].RPCContext(), - gossip.AddressResolver(tc.Servers[0].GossipI().(*gossip.Gossip))), + tc.Server(0).AmbientCtx().Tracer, + nodedialer.New(tc.Server(0).RPCContext(), + gossip.AddressResolver(tc.Server(0).GossipI().(*gossip.Gossip))), nil, /* grpcServer */ - tc.Servers[0].Stopper(), + tc.Server(0).Stopper(), kvflowdispatch.NewDummyDispatch(), kvserver.NoopStoresFlowControlIntegration{}, kvserver.NoopRaftTransportDisconnectListener{}, @@ -2735,7 +2735,7 @@ func TestStoreRangeMergeSlowUnabandonedFollower_WithSplit(t *testing.T) { // Start dropping all Raft traffic to the LHS on store2 so that it won't be // aware that there is a merge in progress. - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ rangeID: lhsDesc.RangeID, IncomingRaftMessageHandler: store2, unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -2773,7 +2773,7 @@ func TestStoreRangeMergeSlowUnabandonedFollower_WithSplit(t *testing.T) { if err != nil { return err } - if !rhsRepl.OwnsValidLease(ctx, tc.Servers[2].Clock().NowAsClockTimestamp()) { + if !rhsRepl.OwnsValidLease(ctx, tc.Server(2).Clock().NowAsClockTimestamp()) { return errors.New("rhs store does not own valid lease for rhs range") } return nil @@ -2898,7 +2898,7 @@ func TestStoreRangeMergeAbandonedFollowers(t *testing.T) { keys := []roachpb.RKey{scratchRKey("a"), scratchRKey("b"), scratchRKey("c")} for _, key := range keys { splitArgs := adminSplitArgs(key.AsRawKey()) - if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), splitArgs); pErr != nil { t.Fatal(pErr) } } @@ -2924,7 +2924,7 @@ func TestStoreRangeMergeAbandonedFollowers(t *testing.T) { // Merge all three ranges together. store2 won't hear about this merge. for i := 0; i < 2; i++ { - if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), adminMergeArgs(scratchKey("a"))); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), adminMergeArgs(scratchKey("a"))); pErr != nil { t.Fatal(pErr) } } @@ -3008,7 +3008,7 @@ func TestStoreRangeMergeAbandonedFollowersAutomaticallyGarbageCollected(t *testi if err != nil { return err } - if !rhsRepl.OwnsValidLease(ctx, tc.Servers[2].Clock().NowAsClockTimestamp()) { + if !rhsRepl.OwnsValidLease(ctx, tc.Server(2).Clock().NowAsClockTimestamp()) { return errors.New("store2 does not own valid lease for rhs range") } return nil @@ -3016,7 +3016,7 @@ func TestStoreRangeMergeAbandonedFollowersAutomaticallyGarbageCollected(t *testi // Start dropping all Raft traffic to the LHS replica on store2 so that it // won't be aware that there is a merge in progress. - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ rangeID: lhsDesc.RangeID, IncomingRaftMessageHandler: store2, unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -3291,7 +3291,7 @@ func TestStoreRangeMergeUninitializedLHSFollower(t *testing.T) { defer tc.Stopper().Stop(ctx) tc.ScratchRange(t) store0, store2 := tc.GetFirstStoreFromServer(t, 0), tc.GetFirstStoreFromServer(t, 2) - distSender := tc.Servers[0].DistSenderI().(kv.Sender) + distSender := tc.Server(0).DistSenderI().(kv.Sender) split := func(key roachpb.RKey) roachpb.RangeID { t.Helper() @@ -3333,7 +3333,7 @@ func TestStoreRangeMergeUninitializedLHSFollower(t *testing.T) { }, }, } - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, unreliableHandler) + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, unreliableHandler) // Perform the split of A, now that store2 won't be able to initialize its // replica of A. @@ -3347,7 +3347,7 @@ func TestStoreRangeMergeUninitializedLHSFollower(t *testing.T) { IncomingRaftMessageHandler: unreliableHandler, } defer slowSnapHandler.unblock() - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, slowSnapHandler) + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, slowSnapHandler) // Remove the replica of range 1 on store2. If we were to leave it in place, // store2 would refuse to GC its replica of C after the merge commits, because @@ -3646,7 +3646,7 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { testKeys := []roachpb.RKey{aKey, bKey, cKey} for _, key := range testKeys { splitArgs := adminSplitArgs(key.AsRawKey()) - if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), splitArgs); pErr != nil { t.Fatal(pErr) } } @@ -3657,7 +3657,8 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { // during the test, and we don't want requests headed for A or C to get routed // to B while its blocked because of a stale DistSender cache. for _, key := range testKeys { - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) if _, pErr := kv.SendWrapped(ctx, server.DistSenderI().(kv.Sender), getArgs(key.AsRawKey())); pErr != nil { t.Fatal(pErr) } @@ -3673,7 +3674,7 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { // Merge A <- B. mergeArgs := adminMergeArgs(aKey.AsRawKey()) - if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), mergeArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), mergeArgs); pErr != nil { t.Fatal(pErr) } @@ -3687,13 +3688,13 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { }() // Merge AB <- C. - if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), mergeArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), mergeArgs); pErr != nil { t.Fatal(pErr) } // Synchronously ensure that the intent on meta2CKey has been cleaned up. // The merge committed, but the intent resolution happens asynchronously. - _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), getArgs(meta2CKey)) + _, pErr := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), getArgs(meta2CKey)) if pErr != nil { t.Fatal(pErr) } @@ -3962,7 +3963,7 @@ func TestStoreRangeMergeRaftSnapshot(t *testing.T) { store0, store2 := tc.GetFirstStoreFromServer(t, 0), tc.GetFirstStoreFromServer(t, 2) sendingEng = store0.TODOEngine() receivingEng = store2.TODOEngine() - distSender := tc.Servers[0].DistSenderI().(kv.Sender) + distSender := tc.Server(0).DistSenderI().(kv.Sender) // This test works across 5 ranges in total. We start with a scratch range(1) // [Start, End). We then split this range as follows: @@ -4006,7 +4007,7 @@ func TestStoreRangeMergeRaftSnapshot(t *testing.T) { aRepl0 := store0.LookupReplica(roachpb.RKey(keyA)) // Start dropping all Raft traffic to the first range on store2. - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ rangeID: aRepl0.RangeID, IncomingRaftMessageHandler: store2, unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -4049,7 +4050,7 @@ func TestStoreRangeMergeRaftSnapshot(t *testing.T) { // Restore Raft traffic to the LHS on store2. log.Infof(ctx, "restored traffic to store 2") - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, &unreliableRaftHandler{ rangeID: aRepl0.RangeID, IncomingRaftMessageHandler: store2, unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -4425,8 +4426,8 @@ func TestMergeQueue(t *testing.T) { // not leak over between subtests. Then, bump the manual clock so that // both range's load-based splitters consider their measurements to be // reliable. - lhs().LoadBasedSplitter().Reset(tc.Servers[0].Clock().PhysicalTime()) - rhs().LoadBasedSplitter().Reset(tc.Servers[1].Clock().PhysicalTime()) + lhs().LoadBasedSplitter().Reset(tc.Server(0).Clock().PhysicalTime()) + rhs().LoadBasedSplitter().Reset(tc.Server(1).Clock().PhysicalTime()) manualClock.Increment(splitByLoadMergeDelay.Nanoseconds()) } for _, splitObjective := range []kvserver.LBRebalancingObjective{ @@ -4437,7 +4438,7 @@ func TestMergeQueue(t *testing.T) { t.Run(fmt.Sprintf("unreliable-lhs-%s", splitObjective.ToDimension().String()), func(t *testing.T) { resetForLoadBasedSubtest(t) - lhs().LoadBasedSplitter().Reset(tc.Servers[0].Clock().PhysicalTime()) + lhs().LoadBasedSplitter().Reset(tc.Server(0).Clock().PhysicalTime()) clearRange(t, lhsStartKey, rhsEndKey) verifyUnmergedSoon(t, store, lhsStartKey, rhsStartKey) @@ -4446,7 +4447,7 @@ func TestMergeQueue(t *testing.T) { t.Run(fmt.Sprintf("unreliable-rhs-%s", splitObjective.ToDimension().String()), func(t *testing.T) { resetForLoadBasedSubtest(t) - rhs().LoadBasedSplitter().Reset(tc.Servers[1].Clock().PhysicalTime()) + rhs().LoadBasedSplitter().Reset(tc.Server(1).Clock().PhysicalTime()) clearRange(t, lhsStartKey, rhsEndKey) verifyUnmergedSoon(t, store, lhsStartKey, rhsStartKey) @@ -4456,8 +4457,8 @@ func TestMergeQueue(t *testing.T) { resetForLoadBasedSubtest(t) moreThanHalfStat := mergeByLoadStat/2 + 1 - rhs().LoadBasedSplitter().RecordMax(tc.Servers[0].Clock().PhysicalTime(), moreThanHalfStat) - lhs().LoadBasedSplitter().RecordMax(tc.Servers[1].Clock().PhysicalTime(), moreThanHalfStat) + rhs().LoadBasedSplitter().RecordMax(tc.Server(0).Clock().PhysicalTime(), moreThanHalfStat) + lhs().LoadBasedSplitter().RecordMax(tc.Server(1).Clock().PhysicalTime(), moreThanHalfStat) clearRange(t, lhsStartKey, rhsEndKey) verifyUnmergedSoon(t, store, lhsStartKey, rhsStartKey) @@ -4468,8 +4469,8 @@ func TestMergeQueue(t *testing.T) { manualClock.Increment(splitByLoadMergeDelay.Nanoseconds()) lessThanHalfStat := mergeByLoadStat/2 - 1 - rhs().LoadBasedSplitter().RecordMax(tc.Servers[0].Clock().PhysicalTime(), lessThanHalfStat) - lhs().LoadBasedSplitter().RecordMax(tc.Servers[1].Clock().PhysicalTime(), lessThanHalfStat) + rhs().LoadBasedSplitter().RecordMax(tc.Server(0).Clock().PhysicalTime(), lessThanHalfStat) + lhs().LoadBasedSplitter().RecordMax(tc.Server(1).Clock().PhysicalTime(), lessThanHalfStat) clearRange(t, lhsStartKey, rhsEndKey) verifyMergedSoon(t, store, lhsStartKey, rhsStartKey) @@ -4498,8 +4499,8 @@ func TestMergeQueue(t *testing.T) { resetForLoadBasedSubtest(t) moreThanHalfStat := mergeByLoadStat/2 + 1 - rhs().LoadBasedSplitter().RecordMax(tc.Servers[0].Clock().PhysicalTime(), moreThanHalfStat) - lhs().LoadBasedSplitter().RecordMax(tc.Servers[1].Clock().PhysicalTime(), moreThanHalfStat) + rhs().LoadBasedSplitter().RecordMax(tc.Server(0).Clock().PhysicalTime(), moreThanHalfStat) + lhs().LoadBasedSplitter().RecordMax(tc.Server(1).Clock().PhysicalTime(), moreThanHalfStat) clearRange(t, lhsStartKey, rhsEndKey) // Switch the dimension, so that any recorded load should @@ -4518,8 +4519,8 @@ func TestMergeQueue(t *testing.T) { manualClock.Increment(splitByLoadMergeDelay.Nanoseconds()) lessThanHalfStat := mergeByLoadStat/2 - 1 - rhs().LoadBasedSplitter().RecordMax(tc.Servers[0].Clock().PhysicalTime(), lessThanHalfStat) - lhs().LoadBasedSplitter().RecordMax(tc.Servers[1].Clock().PhysicalTime(), lessThanHalfStat) + rhs().LoadBasedSplitter().RecordMax(tc.Server(0).Clock().PhysicalTime(), lessThanHalfStat) + lhs().LoadBasedSplitter().RecordMax(tc.Server(1).Clock().PhysicalTime(), lessThanHalfStat) clearRange(t, lhsStartKey, rhsEndKey) setSplitObjective(secondSplitObjective) @@ -4568,7 +4569,7 @@ func TestMergeQueue(t *testing.T) { defer manualClock.Resume() // Perform manual merge and verify that no merge occurred. - exp := tc.Servers[0].Clock().Now().Add(manualSplitTTL.Nanoseconds(), 0) + exp := tc.Server(0).Clock().Now().Add(manualSplitTTL.Nanoseconds(), 0) split(t, rhsStartKey.AsRawKey(), exp /* expirationTime */) clearRange(t, lhsStartKey, rhsEndKey) verifyUnmergedSoon(t, store, lhsStartKey, rhsStartKey) diff --git a/pkg/kv/kvserver/client_metrics_test.go b/pkg/kv/kvserver/client_metrics_test.go index 306b0aedad60..fb5bac1405dc 100644 --- a/pkg/kv/kvserver/client_metrics_test.go +++ b/pkg/kv/kvserver/client_metrics_test.go @@ -303,7 +303,7 @@ func TestStoreMetrics(t *testing.T) { // Flush Pebble memtables, so that Pebble begins using block-based tables. // This is useful, because most of the stats we track don't apply to // memtables. - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { if err := tc.GetFirstStoreFromServer(t, i).TODOEngine().Flush(); err != nil { t.Fatal(err) } diff --git a/pkg/kv/kvserver/client_migration_test.go b/pkg/kv/kvserver/client_migration_test.go index 9c9d2166b863..8eb20c8d7a34 100644 --- a/pkg/kv/kvserver/client_migration_test.go +++ b/pkg/kv/kvserver/client_migration_test.go @@ -85,7 +85,7 @@ func TestStorePurgeOutdatedReplicas(t *testing.T) { require.NoError(t, tc.WaitForVoters(k, tc.Target(n2), tc.Target(n3))) for _, node := range []int{n2, n3} { - ts := tc.Servers[node] + ts := tc.Server(node) store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) @@ -107,7 +107,7 @@ func TestStorePurgeOutdatedReplicas(t *testing.T) { t.Fatal(err) } - ts := tc.Servers[n2] + ts := tc.Server(n2) store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) @@ -219,7 +219,7 @@ func TestMigrateWithInflightSnapshot(t *testing.T) { } for _, node := range []int{n1, n2} { - ts := tc.Servers[node] + ts := tc.Server(node) store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) @@ -273,7 +273,7 @@ func TestMigrateWaitsForApplication(t *testing.T) { require.NoError(t, tc.WaitForVoters(k, tc.Target(n2), tc.Target(n3))) for _, node := range []int{n1, n2, n3} { - ts := tc.Servers[node] + ts := tc.Server(node) store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) @@ -306,7 +306,7 @@ func TestMigrateWaitsForApplication(t *testing.T) { } for _, node := range []int{n1, n2, n3} { - ts := tc.Servers[node] + ts := tc.Server(node) store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) diff --git a/pkg/kv/kvserver/client_raft_helpers_test.go b/pkg/kv/kvserver/client_raft_helpers_test.go index c0c736b76eb6..e386db826ca3 100644 --- a/pkg/kv/kvserver/client_raft_helpers_test.go +++ b/pkg/kv/kvserver/client_raft_helpers_test.go @@ -180,7 +180,7 @@ type testClusterStoreRaftMessageHandler struct { } func (h *testClusterStoreRaftMessageHandler) getStore() (*kvserver.Store, error) { - ts := h.tc.Servers[h.storeIdx] + ts := h.tc.Server(h.storeIdx) return ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) } @@ -275,8 +275,8 @@ func setupPartitionedRange( activated bool, funcs unreliableRaftHandlerFuncs, ) (*testClusterPartitionedRange, error) { - handlers := make([]kvserver.IncomingRaftMessageHandler, 0, len(tc.Servers)) - for i := range tc.Servers { + handlers := make([]kvserver.IncomingRaftMessageHandler, 0, tc.NumServers()) + for i := 0; i < tc.NumServers(); i++ { handlers = append(handlers, &testClusterStoreRaftMessageHandler{ tc: tc, storeIdx: i, @@ -301,7 +301,7 @@ func setupPartitionedRangeWithHandlers( pr.mu.partitioned = activated pr.mu.partitionedNodeIdx = partitionedNodeIdx if replicaID == 0 { - ts := tc.Servers[partitionedNodeIdx] + ts := tc.Server(partitionedNodeIdx) store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { return nil, err @@ -319,7 +319,7 @@ func setupPartitionedRangeWithHandlers( pr.mu.partitionedReplicas = map[roachpb.ReplicaID]bool{ replicaID: true, } - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { s := i h := &unreliableRaftHandler{ rangeID: rangeID, @@ -383,7 +383,7 @@ func setupPartitionedRangeWithHandlers( } } pr.handlers = append(pr.handlers, h) - tc.Servers[s].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.Target(s).StoreID, h) + tc.Server(s).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.Target(s).StoreID, h) } return pr, nil } diff --git a/pkg/kv/kvserver/client_raft_log_queue_test.go b/pkg/kv/kvserver/client_raft_log_queue_test.go index 07260d9c2e83..bd44084b9dfb 100644 --- a/pkg/kv/kvserver/client_raft_log_queue_test.go +++ b/pkg/kv/kvserver/client_raft_log_queue_test.go @@ -91,7 +91,7 @@ func TestRaftLogQueue(t *testing.T) { var afterTruncationIndex kvpb.RaftIndex testutils.SucceedsSoon(t, func() error { // Force a truncation check. - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { tc.GetFirstStoreFromServer(t, i).MustForceRaftLogScanAndProcess() } // Flush the engine to advance durability, which triggers truncation. @@ -113,7 +113,7 @@ func TestRaftLogQueue(t *testing.T) { // GetFirstIndex, giving a false negative. Fixing this requires additional // instrumentation of the queues, which was deemed to require too much work // at the time of this writing. - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { tc.GetFirstStoreFromServer(t, i).MustForceRaftLogScanAndProcess() } diff --git a/pkg/kv/kvserver/client_raft_test.go b/pkg/kv/kvserver/client_raft_test.go index 2fe6f538ef09..00623ff8f942 100644 --- a/pkg/kv/kvserver/client_raft_test.go +++ b/pkg/kv/kvserver/client_raft_test.go @@ -781,7 +781,7 @@ func TestSnapshotAfterTruncation(t *testing.T) { testutils.SucceedsSoon(t, func() error { hasLeader := false term := uint64(0) - for i := 0; i < len(tc.Servers); i++ { + for i := 0; i < tc.NumServers(); i++ { repl := tc.GetFirstStoreFromServer(t, i).LookupReplica(key) require.NotNil(t, repl) status := repl.RaftStatus() @@ -804,7 +804,7 @@ func TestSnapshotAfterTruncation(t *testing.T) { }) // Turn the queues back on and wait for the snapshot to be sent and processed. - for i := 0; i < len(tc.Servers)-1; i++ { + for i := 0; i < tc.NumServers()-1; i++ { tc.GetFirstStoreFromServer(t, i).SetRaftSnapshotQueueActive(true) if err := tc.GetFirstStoreFromServer(t, i).ForceRaftSnapshotQueueProcess(); err != nil { t.Fatal(err) @@ -948,7 +948,7 @@ func TestSnapshotAfterTruncationWithUncommittedTail(t *testing.T) { return hb.FromReplicaID == partReplDesc.ReplicaID } } - tc.Servers[s].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.Target(s).StoreID, h) + tc.Server(s).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.Target(s).StoreID, h) } // Perform a series of writes on the partitioned replica. The writes will @@ -1044,7 +1044,7 @@ func TestSnapshotAfterTruncationWithUncommittedTail(t *testing.T) { // Remove the partition. Snapshot should follow. log.Infof(ctx, "test: removing the partition") for _, s := range []int{0, 1, 2} { - tc.Servers[s].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.Target(s).StoreID, &unreliableRaftHandler{ + tc.Server(s).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.Target(s).StoreID, &unreliableRaftHandler{ rangeID: partRepl.RangeID, IncomingRaftMessageHandler: tc.GetFirstStoreFromServer(t, s), unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -1075,7 +1075,7 @@ func TestSnapshotAfterTruncationWithUncommittedTail(t *testing.T) { // Perform another write. The partitioned replica should be able to receive // replicated updates. incArgs = incrementArgs(key, incC) - if _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } tc.WaitForValues(t, key, []int64{incABC, incABC, incABC}) @@ -1121,7 +1121,7 @@ func TestRequestsOnLaggingReplica(t *testing.T) { tc := testcluster.StartTestCluster(t, 3, clusterArgs) defer tc.Stopper().Stop(ctx) - _, rngDesc, err := tc.Servers[0].ScratchRangeEx() + _, rngDesc, err := tc.Server(0).ScratchRangeEx() require.NoError(t, err) key := rngDesc.StartKey.AsRawKey() // Add replicas on all the stores. @@ -1385,7 +1385,7 @@ func TestRequestsOnFollowerWithNonLiveLeaseholder(t *testing.T) { } // Create a new range. - _, rngDesc, err := tc.Servers[0].ScratchRangeEx() + _, rngDesc, err := tc.Server(0).ScratchRangeEx() require.NoError(t, err) key := rngDesc.StartKey.AsRawKey() // Add replicas on all the stores. @@ -1557,7 +1557,7 @@ func TestReceiveSnapshotLogging(t *testing.T) { }, }) - _, scratchRange, err := tc.Servers[0].ScratchRangeEx() + _, scratchRange, err := tc.Server(0).ScratchRangeEx() require.NoError(t, err) return ctx, tc, &scratchRange, signals @@ -1864,7 +1864,7 @@ func TestReplicateAfterRemoveAndSplit(t *testing.T) { // Try to up-replicate the RHS of the split to store 2. // Don't use tc.AddVoter because we expect a retriable error and want it // returned to us. - if _, err := tc.Servers[0].DB().AdminChangeReplicas( + if _, err := tc.Server(0).DB().AdminChangeReplicas( ctx, splitKey, tc.LookupRangeOrFatal(t, splitKey), kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(3)), ); !kvserver.IsRetriableReplicationChangeError(err) { @@ -1992,7 +1992,7 @@ func TestLogGrowthWhenRefreshingPendingCommands(t *testing.T) { }) // Stop enough nodes to prevent a quorum. - for i := 2; i < len(tc.Servers); i++ { + for i := 2; i < tc.NumServers(); i++ { tc.StopServer(i) } @@ -2082,7 +2082,7 @@ func TestStoreRangeUpReplicate(t *testing.T) { var replicaCount int64 testutils.SucceedsSoon(t, func() error { var replicaCounts [numServers]int64 - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { var err error tc.GetFirstStoreFromServer(t, i).VisitReplicas(func(r *kvserver.Replica) bool { replicaCounts[i]++ @@ -2112,7 +2112,7 @@ func TestStoreRangeUpReplicate(t *testing.T) { var generated int64 var learnerApplied, raftApplied int64 - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { m := tc.GetFirstStoreFromServer(t, i).Metrics() generated += m.RangeSnapshotsGenerated.Count() learnerApplied += m.RangeSnapshotsAppliedForInitialUpreplication.Count() @@ -2318,7 +2318,7 @@ func runReplicateRestartAfterTruncation(t *testing.T, removeBeforeTruncateAndReA }, RaftConfig: base.RaftConfig{ // Don't timeout raft leaders or range leases. This test expects - // tc.Servers[0] to hold the range lease for the range under test. + // tc.Server(0) to hold the range lease for the range under test. RaftElectionTimeoutTicks: 1000000, RangeLeaseDuration: time.Minute, }, @@ -2578,7 +2578,7 @@ func TestQuotaPool(t *testing.T) { }) followerRepl := func() *kvserver.Replica { - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { repl := tc.GetFirstStoreFromServer(t, i).LookupReplica(roachpb.RKey(key)) require.NotNil(t, repl) if repl == leaderRepl { @@ -2615,7 +2615,7 @@ func TestQuotaPool(t *testing.T) { value := bytes.Repeat([]byte("v"), (3*quota)/4) ba := &kvpb.BatchRequest{} ba.Add(putArgs(keyToWrite, value)) - if err := ba.SetActiveTimestamp(tc.Servers[0].Clock()); err != nil { + if err := ba.SetActiveTimestamp(tc.Server(0).Clock()); err != nil { t.Fatal(err) } if _, pErr := leaderRepl.Send(ctx, ba); pErr != nil { @@ -2636,7 +2636,7 @@ func TestQuotaPool(t *testing.T) { go func() { ba := &kvpb.BatchRequest{} ba.Add(putArgs(keyToWrite, value)) - if err := ba.SetActiveTimestamp(tc.Servers[0].Clock()); err != nil { + if err := ba.SetActiveTimestamp(tc.Server(0).Clock()); err != nil { ch <- kvpb.NewError(err) return } @@ -2689,7 +2689,7 @@ func TestWedgedReplicaDetection(t *testing.T) { leaderRepl := tc.GetRaftLeader(t, key) followerRepl := func() *kvserver.Replica { - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { repl := tc.GetFirstStoreFromServer(t, i).LookupReplica(key) require.NotNil(t, repl) if repl == leaderRepl { @@ -2726,7 +2726,7 @@ func TestWedgedReplicaDetection(t *testing.T) { value := []byte("value") ba := &kvpb.BatchRequest{} ba.Add(putArgs(key, value)) - if err := ba.SetActiveTimestamp(tc.Servers[0].Clock()); err != nil { + if err := ba.SetActiveTimestamp(tc.Server(0).Clock()); err != nil { t.Fatal(err) } if _, pErr := leaderRepl.Send(ctx, ba); pErr != nil { @@ -2816,14 +2816,14 @@ func TestReportUnreachableHeartbeats(t *testing.T) { leaderRepl := tc.GetRaftLeader(t, roachpb.RKey(key)) initialTerm := leaderRepl.RaftStatus().Term // Choose a follower index that is guaranteed to not be the leader. - followerIdx := int(leaderRepl.StoreID()) % len(tc.Servers) + followerIdx := int(leaderRepl.StoreID()) % tc.NumServers() // Get the store for the leader leaderStore := tc.GetFirstStoreFromServer(t, int(leaderRepl.StoreID()-1)) // Shut down a raft transport via the circuit breaker, and wait for two // election timeouts to trigger an election if reportUnreachable broke // heartbeat transmission to the other store. - b, ok := tc.Servers[followerIdx].RaftTransport().(*kvserver.RaftTransport).GetCircuitBreaker( + b, ok := tc.Server(followerIdx).RaftTransport().(*kvserver.RaftTransport).GetCircuitBreaker( tc.Target(followerIdx).NodeID, rpc.DefaultClass) require.True(t, ok) undo := circuit.TestingSetTripped(b, errors.New("boom")) @@ -2879,7 +2879,7 @@ func TestReportUnreachableRemoveRace(t *testing.T) { var leaderIdx int var leaderRepl *kvserver.Replica testutils.SucceedsSoon(t, func() error { - for idx := range tc.Servers { + for idx := 0; idx < tc.NumServers(); idx++ { repl := tc.GetFirstStoreFromServer(t, idx).LookupReplica(roachpb.RKey(key)) require.NotNil(t, repl) if repl.RaftStatus().SoftState.RaftState == raft.StateLeader { @@ -2911,9 +2911,9 @@ func TestReportUnreachableRemoveRace(t *testing.T) { // the circuit breaker on all other nodes. t.Logf("partitioning") var undos []func() - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { if i != partitionedMaybeLeaseholderIdx { - b, ok := tc.Servers[i].RaftTransport().(*kvserver.RaftTransport).GetCircuitBreaker(tc.Target(partitionedMaybeLeaseholderIdx).NodeID, rpc.DefaultClass) + b, ok := tc.Server(i).RaftTransport().(*kvserver.RaftTransport).GetCircuitBreaker(tc.Target(partitionedMaybeLeaseholderIdx).NodeID, rpc.DefaultClass) require.True(t, ok) undos = append(undos, circuit.TestingSetTripped(b, errors.New("boom"))) } @@ -3113,7 +3113,7 @@ func TestRaftAfterRemoveRange(t *testing.T) { // Wait for the removal to be processed. testutils.SucceedsSoon(t, func() error { - for i := range tc.Servers[1:] { + for i := 0; i < tc.NumServers(); i++ { store := tc.GetFirstStoreFromServer(t, i) _, err := store.GetReplica(desc.RangeID) if !errors.HasType(err, (*kvpb.RangeNotFoundError)(nil)) { @@ -3138,7 +3138,7 @@ func TestRaftAfterRemoveRange(t *testing.T) { StoreID: target2.StoreID, } - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).SendAsync(&kvserverpb.RaftMessageRequest{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).SendAsync(&kvserverpb.RaftMessageRequest{ ToReplica: replica1, FromReplica: replica2, Heartbeats: []kvserverpb.RaftHeartbeat{ @@ -3182,8 +3182,8 @@ func TestRaftRemoveRace(t *testing.T) { // Cyclically up-replicate to a bunch of nodes which stresses a condition // where replicas receive messages for a previous or later incarnation of the // replica. - targets := make([]roachpb.ReplicationTarget, len(tc.Servers)-1) - for i := 1; i < len(tc.Servers); i++ { + targets := make([]roachpb.ReplicationTarget, tc.NumServers()-1) + for i := 1; i < tc.NumServers(); i++ { targets[i-1] = tc.Target(i) } tc.AddVotersOrFatal(t, key, targets...) @@ -3314,8 +3314,8 @@ func TestReplicaGCRace(t *testing.T) { toStore := tc.GetFirstStoreFromServer(t, 2) // Prevent the victim replica from processing configuration changes. - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).StopIncomingRaftMessages(toStore.Ident.StoreID) - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(toStore.Ident.StoreID, &noConfChangeTestHandler{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).StopIncomingRaftMessages(toStore.Ident.StoreID) + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(toStore.Ident.StoreID, &noConfChangeTestHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: toStore, }) @@ -3399,15 +3399,15 @@ func TestReplicaGCRace(t *testing.T) { // Create a new transport for store 0. Error responses are passed // back along the same grpc stream as the request so it's ok that // there are two (this one and the one actually used by the store). - ambient := tc.Servers[0].AmbientCtx() + ambient := tc.Server(0).AmbientCtx() ambient.AddLogTag("test-raft-transport", nil) fromTransport := kvserver.NewRaftTransport( ambient, cluster.MakeTestingClusterSettings(), ambient.Tracer, - nodedialer.New(tc.Servers[0].RPCContext(), gossip.AddressResolver(fromStore.Gossip())), + nodedialer.New(tc.Server(0).RPCContext(), gossip.AddressResolver(fromStore.Gossip())), nil, /* grpcServer */ - tc.Servers[0].Stopper(), + tc.Server(0).Stopper(), kvflowdispatch.NewDummyDispatch(), kvserver.NoopStoresFlowControlIntegration{}, kvserver.NoopRaftTransportDisconnectListener{}, @@ -3612,7 +3612,7 @@ func TestReplicateRogueRemovedNode(t *testing.T) { tc.GetFirstStoreFromServer(t, 2).TestSender(), kvpb.Header{ Replica: replicaDesc, - Timestamp: tc.Servers[2].Clock().Now(), + Timestamp: tc.Server(2).Clock().Now(), }, incArgs, ) detail := pErr.GetDetail() @@ -3736,7 +3736,7 @@ func TestReplicateRemovedNodeDisruptiveElection(t *testing.T) { // established after the first node's removal. value := int64(5) incArgs := incrementArgs(key, value) - if _, err := kv.SendWrapped(ctx, tc.Servers[1].DistSenderI().(kv.Sender), incArgs); err != nil { + if _, err := kv.SendWrapped(ctx, tc.Server(1).DistSenderI().(kv.Sender), incArgs); err != nil { t.Fatal(err) } @@ -3797,13 +3797,13 @@ func TestReplicateRemovedNodeDisruptiveElection(t *testing.T) { // so it's ok that there are two (this one and the one actually used by the // store). transport0 := kvserver.NewRaftTransport( - tc.Servers[0].AmbientCtx(), + tc.Server(0).AmbientCtx(), cluster.MakeTestingClusterSettings(), - tc.Servers[0].AmbientCtx().Tracer, - nodedialer.New(tc.Servers[0].RPCContext(), + tc.Server(0).AmbientCtx().Tracer, + nodedialer.New(tc.Server(0).RPCContext(), gossip.AddressResolver(tc.GetFirstStoreFromServer(t, 0).Gossip())), nil, /* grpcServer */ - tc.Servers[0].Stopper(), + tc.Server(0).Stopper(), kvflowdispatch.NewDummyDispatch(), kvserver.NoopStoresFlowControlIntegration{}, kvserver.NoopRaftTransportDisconnectListener{}, @@ -4274,7 +4274,7 @@ func TestRangeQuiescence(t *testing.T) { waitForQuiescence := func(key roachpb.RKey) { testutils.SucceedsSoon(t, func() error { - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { rep := tc.GetFirstStoreFromServer(t, i).LookupReplica(key) require.NotNil(t, rep) if !rep.IsQuiescent() { @@ -4293,7 +4293,7 @@ func TestRangeQuiescence(t *testing.T) { // Unquiesce a follower range, this should "wake the leader" and not result // in an election. - followerIdx := int(leader.StoreID()) % len(tc.Servers) + followerIdx := int(leader.StoreID()) % tc.NumServers() tc.GetFirstStoreFromServer(t, followerIdx).EnqueueRaftUpdateCheck(tc.LookupRangeOrFatal(t, key).RangeID) // Wait for a bunch of ticks to occur which will allow the follower time to @@ -4324,7 +4324,7 @@ func TestUninitializedReplicaRemainsQuiesced(t *testing.T) { }) defer tc.Stopper().Stop(ctx) - _, desc, err := tc.Servers[0].ScratchRangeEx() + _, desc, err := tc.Server(0).ScratchRangeEx() key := desc.StartKey.AsRawKey() require.NoError(t, err) require.NoError(t, tc.WaitForSplitAndInitialization(key)) @@ -4341,7 +4341,7 @@ func TestUninitializedReplicaRemainsQuiesced(t *testing.T) { } s2, err := tc.Server(1).GetStores().(*kvserver.Stores).GetStore(tc.Server(1).GetFirstStoreID()) require.NoError(t, err) - tc.Servers[1].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(s2.StoreID(), &unreliableRaftHandler{ + tc.Server(1).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(s2.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: s2, unreliableRaftHandlerFuncs: handlerFuncs, @@ -4378,7 +4378,7 @@ func TestUninitializedReplicaRemainsQuiesced(t *testing.T) { require.NoError(t, err) require.True(t, repl.IsInitialized()) require.False(t, repl.IsQuiescent()) - if !kvserver.ExpirationLeasesOnly.Get(&tc.Servers[0].ClusterSettings().SV) { + if !kvserver.ExpirationLeasesOnly.Get(&tc.Server(0).ClusterSettings().SV) { testutils.SucceedsSoon(t, func() error { if !repl.IsQuiescent() { return errors.Errorf("%s not quiescent", repl) @@ -4516,7 +4516,7 @@ func TestStoreRangeWaitForApplication(t *testing.T) { defer tc.Stopper().Stop(ctx) store0, store2 := tc.GetFirstStoreFromServer(t, 0), tc.GetFirstStoreFromServer(t, 2) - distSender := tc.Servers[0].DistSenderI().(kv.Sender) + distSender := tc.Server(0).DistSenderI().(kv.Sender) key := []byte("a") tc.SplitRangeOrFatal(t, key) @@ -4537,7 +4537,8 @@ func TestStoreRangeWaitForApplication(t *testing.T) { } var targets []target - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) conn, err := s.NodeDialer().(*nodedialer.Dialer).Dial(ctx, s.NodeID(), rpc.DefaultClass) if err != nil { t.Fatal(err) @@ -4665,7 +4666,7 @@ func TestStoreWaitForReplicaInit(t *testing.T) { defer tc.Stopper().Stop(ctx) store := tc.GetFirstStoreFromServer(t, 0) - conn, err := tc.Servers[0].NodeDialer().(*nodedialer.Dialer).Dial(ctx, store.Ident.NodeID, rpc.DefaultClass) + conn, err := tc.Server(0).NodeDialer().(*nodedialer.Dialer).Dial(ctx, store.Ident.NodeID, rpc.DefaultClass) if err != nil { t.Fatal(err) } @@ -4704,7 +4705,7 @@ func TestStoreWaitForReplicaInit(t *testing.T) { var repl *kvserver.Replica testutils.SucceedsSoon(t, func() (err error) { // Try several times, as the message may be dropped (see #18355). - tc.Servers[0].RaftTransport().(*kvserver.RaftTransport).SendAsync(&kvserverpb.RaftMessageRequest{ + tc.Server(0).RaftTransport().(*kvserver.RaftTransport).SendAsync(&kvserverpb.RaftMessageRequest{ ToReplica: roachpb.ReplicaDescriptor{ NodeID: store.Ident.NodeID, StoreID: store.Ident.StoreID, @@ -4760,7 +4761,7 @@ func TestTracingDoesNotRaceWithCancelation(t *testing.T) { require.Nil(t, err) for i := 0; i < 3; i++ { - tc.Servers[i].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.Target(i).StoreID, &unreliableRaftHandler{ + tc.Server(i).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.Target(i).StoreID, &unreliableRaftHandler{ rangeID: ri.Desc.RangeID, IncomingRaftMessageHandler: tc.GetFirstStoreFromServer(t, i), unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -5031,7 +5032,7 @@ func TestAckWriteBeforeApplication(t *testing.T) { } // Begin peforming a write on the Range. - magicTS = tc.Servers[0].Clock().Now() + magicTS = tc.Server(0).Clock().Now() atomic.StoreInt32(&filterActive, 1) ch := make(chan *kvpb.Error, 1) go func() { @@ -5336,7 +5337,7 @@ func TestProcessSplitAfterRightHandSideHasBeenRemoved(t *testing.T) { // Unsuccessful because the RHS will not accept the learner snapshot // and will be rolled back. Nevertheless it will have learned that it // has been removed at the old replica ID. - _, err = tc.Servers[0].DB().AdminChangeReplicas( + _, err = tc.Server(0).DB().AdminChangeReplicas( ctx, keyB, tc.LookupRangeOrFatal(t, keyB), kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(0)), ) @@ -5389,7 +5390,7 @@ func TestProcessSplitAfterRightHandSideHasBeenRemoved(t *testing.T) { // Unsuccessfuly because the RHS will not accept the learner snapshot // and will be rolled back. Nevertheless it will have learned that it // has been removed at the old replica ID. - _, err = tc.Servers[0].DB().AdminChangeReplicas( + _, err = tc.Server(0).DB().AdminChangeReplicas( ctx, keyB, tc.LookupRangeOrFatal(t, keyB), kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(0)), ) @@ -5465,7 +5466,7 @@ func TestProcessSplitAfterRightHandSideHasBeenRemoved(t *testing.T) { // removed at the old replica ID. We don't use tc.AddVoters because that // will retry until it runs out of time, since we're creating a // retriable-looking situation here that will persist. - _, err = tc.Servers[0].DB().AdminChangeReplicas( + _, err = tc.Server(0).DB().AdminChangeReplicas( ctx, keyB, tc.LookupRangeOrFatal(t, keyB), kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(0)), ) @@ -5529,7 +5530,7 @@ func TestProcessSplitAfterRightHandSideHasBeenRemoved(t *testing.T) { // // Not using tc.AddVoters because we expect an error, but that error // would be retried internally. - _, err = tc.Servers[0].DB().AdminChangeReplicas( + _, err = tc.Server(0).DB().AdminChangeReplicas( ctx, keyB, tc.LookupRangeOrFatal(t, keyB), kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(0)), ) @@ -5649,15 +5650,15 @@ func TestElectionAfterRestart(t *testing.T) { tc.Start(t) defer t.Log("stopped cluster") defer tc.Stopper().Stop(ctx) - _, err := tc.Conns[0].Exec(`CREATE TABLE t(x, PRIMARY KEY(x)) AS TABLE generate_series(1, $1)`, numRanges-1) + _, err := tc.ServerConn(0).Exec(`CREATE TABLE t(x, PRIMARY KEY(x)) AS TABLE generate_series(1, $1)`, numRanges-1) require.NoError(t, err) // Splitting in reverse order is faster (splitDelayHelper doesn't have to add any delays). - _, err = tc.Conns[0].Exec(`ALTER TABLE t SPLIT AT TABLE generate_series($1, 1, -1)`, numRanges-1) + _, err = tc.ServerConn(0).Exec(`ALTER TABLE t SPLIT AT TABLE generate_series($1, 1, -1)`, numRanges-1) require.NoError(t, err) require.NoError(t, tc.WaitForFullReplication()) testutils.SucceedsSoon(t, func() error { - for _, row := range sqlutils.MakeSQLRunner(tc.Conns[0]).QueryStr( + for _, row := range sqlutils.MakeSQLRunner(tc.ServerConn(0)).QueryStr( t, `SELECT range_id FROM [SHOW RANGES FROM TABLE t]`, ) { n, err := strconv.Atoi(row[0]) @@ -5686,7 +5687,8 @@ func TestElectionAfterRestart(t *testing.T) { for rangeID := range rangeIDs { var err error var lastIndex kvpb.RaftIndex - for _, srv := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + srv := tc.Server(serverIdx) _ = srv.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { s.VisitReplicas(func(replica *kvserver.Replica) (more bool) { if replica.RangeID != rangeID { @@ -5711,7 +5713,8 @@ func TestElectionAfterRestart(t *testing.T) { } return nil }) - for _, srv := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + srv := tc.Server(serverIdx) require.NoError(t, srv.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { return s.TODOEngine().Flush() })) @@ -5751,7 +5754,7 @@ func TestElectionAfterRestart(t *testing.T) { t.Log("started cluster") defer tc.Stopper().Stop(ctx) - runner := sqlutils.MakeSQLRunner(tc.Conns[0]) + runner := sqlutils.MakeSQLRunner(tc.ServerConn(0)) tBegin := timeutil.Now() require.Equal(t, fmt.Sprint(numRanges-1), runner.QueryStr(t, `SELECT count(1) FROM t`)[0][0]) dur := timeutil.Since(tBegin) @@ -5817,7 +5820,8 @@ func TestRaftSnapshotsWithMVCCRangeKeys(t *testing.T) { require.NoError(t, tc.WaitForVoters(keyC, tc.Targets(1, 2)...)) // Read them back from all stores. - for _, srv := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + srv := tc.Server(serverIdx) store, err := srv.GetStores().(*kvserver.Stores).GetStore(srv.GetFirstStoreID()) require.NoError(t, err) require.Equal(t, kvs{ @@ -6230,13 +6234,13 @@ func TestRaftPreVote(t *testing.T) { // Configure the partition, but don't activate it yet. if partial { // Partition n3 away from n1, in both directions. - dropRaftMessagesFrom(t, tc.Servers[0], rangeID, []roachpb.ReplicaID{3}, &partitioned) - dropRaftMessagesFrom(t, tc.Servers[2], rangeID, []roachpb.ReplicaID{1}, &partitioned) + dropRaftMessagesFrom(t, tc.Server(0), rangeID, []roachpb.ReplicaID{3}, &partitioned) + dropRaftMessagesFrom(t, tc.Server(2), rangeID, []roachpb.ReplicaID{1}, &partitioned) } else { // Partition n3 away from both of n1 and n2, in both directions. - dropRaftMessagesFrom(t, tc.Servers[0], rangeID, []roachpb.ReplicaID{3}, &partitioned) - dropRaftMessagesFrom(t, tc.Servers[1], rangeID, []roachpb.ReplicaID{3}, &partitioned) - dropRaftMessagesFrom(t, tc.Servers[2], rangeID, []roachpb.ReplicaID{1, 2}, &partitioned) + dropRaftMessagesFrom(t, tc.Server(0), rangeID, []roachpb.ReplicaID{3}, &partitioned) + dropRaftMessagesFrom(t, tc.Server(1), rangeID, []roachpb.ReplicaID{3}, &partitioned) + dropRaftMessagesFrom(t, tc.Server(2), rangeID, []roachpb.ReplicaID{1, 2}, &partitioned) } // Make sure the lease is on n1 and that everyone has applied it. @@ -6416,11 +6420,11 @@ func TestRaftCheckQuorum(t *testing.T) { // Set up dropping of inbound messages on n1 from n2,n3, but don't // activate it yet. var partitioned atomic.Bool - dropRaftMessagesFrom(t, tc.Servers[0], desc.RangeID, []roachpb.ReplicaID{2, 3}, &partitioned) + dropRaftMessagesFrom(t, tc.Server(0), desc.RangeID, []roachpb.ReplicaID{2, 3}, &partitioned) if symmetric { // Drop outbound messages from n1 to n2,n3 too. - dropRaftMessagesFrom(t, tc.Servers[1], desc.RangeID, []roachpb.ReplicaID{1}, &partitioned) - dropRaftMessagesFrom(t, tc.Servers[2], desc.RangeID, []roachpb.ReplicaID{1}, &partitioned) + dropRaftMessagesFrom(t, tc.Server(1), desc.RangeID, []roachpb.ReplicaID{1}, &partitioned) + dropRaftMessagesFrom(t, tc.Server(2), desc.RangeID, []roachpb.ReplicaID{1}, &partitioned) } // Make sure the lease is on n1 and that everyone has applied it. @@ -6807,9 +6811,9 @@ func TestRaftPreVoteUnquiesceDeadLeader(t *testing.T) { // Set up a complete partition for n1, but don't activate it yet. var partitioned atomic.Bool - dropRaftMessagesFrom(t, tc.Servers[0], desc.RangeID, []roachpb.ReplicaID{2, 3}, &partitioned) - dropRaftMessagesFrom(t, tc.Servers[1], desc.RangeID, []roachpb.ReplicaID{1}, &partitioned) - dropRaftMessagesFrom(t, tc.Servers[2], desc.RangeID, []roachpb.ReplicaID{1}, &partitioned) + dropRaftMessagesFrom(t, tc.Server(0), desc.RangeID, []roachpb.ReplicaID{2, 3}, &partitioned) + dropRaftMessagesFrom(t, tc.Server(1), desc.RangeID, []roachpb.ReplicaID{1}, &partitioned) + dropRaftMessagesFrom(t, tc.Server(2), desc.RangeID, []roachpb.ReplicaID{1}, &partitioned) // Make sure the lease is on n1 and that everyone has applied it. tc.TransferRangeLeaseOrFatal(t, desc, tc.Target(0)) diff --git a/pkg/kv/kvserver/client_relocate_range_test.go b/pkg/kv/kvserver/client_relocate_range_test.go index b9d4f1b126a0..5ae05371d243 100644 --- a/pkg/kv/kvserver/client_relocate_range_test.go +++ b/pkg/kv/kvserver/client_relocate_range_test.go @@ -47,7 +47,7 @@ func relocateAndCheck( t.Helper() every := log.Every(1 * time.Second) testutils.SucceedsSoon(t, func() error { - err := tc.Servers[0].DB(). + err := tc.Server(0).DB(). AdminRelocateRange( context.Background(), startKey.AsRawKey(), @@ -63,7 +63,7 @@ func relocateAndCheck( } return err }) - desc, err := tc.Servers[0].LookupRange(startKey.AsRawKey()) + desc, err := tc.Server(0).LookupRange(startKey.AsRawKey()) require.NoError(t, err) requireDescMembers(t, desc, append(voterTargets, nonVoterTargets...)) if len(voterTargets) > 0 { @@ -82,7 +82,7 @@ func requireRelocationFailure( errRegExp string, ) { testutils.SucceedsSoon(t, func() error { - err := tc.Servers[0].DB().AdminRelocateRange( + err := tc.Server(0).DB().AdminRelocateRange( ctx, startKey.AsRawKey(), voterTargets, @@ -331,7 +331,7 @@ func TestAdminRelocateRangeWithoutLeaseTransfer(t *testing.T) { relocateAndCheck(t, tc, k, tc.Targets(0, 1, 2), nil /* nonVoterTargets */) // Move the last voter without asking for the lease to move. - err := tc.Servers[0].DB().AdminRelocateRange( + err := tc.Server(0).DB().AdminRelocateRange( context.Background(), k.AsRawKey(), tc.Targets(3, 1, 0), @@ -387,7 +387,7 @@ func TestAdminRelocateRangeFailsWithDuplicates(t *testing.T) { }, } for _, subtest := range tests { - err := tc.Servers[0].DB().AdminRelocateRange( + err := tc.Server(0).DB().AdminRelocateRange( context.Background(), k.AsRawKey(), tc.Targets(subtest.voterTargets...), @@ -462,7 +462,7 @@ func TestReplicaRemovalDuringGet(t *testing.T) { // Perform write. pArgs := putArgs(key, []byte("foo")) - _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), pArgs) + _, pErr := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), pArgs) require.Nil(t, pErr) // Perform delayed read during replica removal. @@ -488,7 +488,7 @@ func TestReplicaRemovalDuringCPut(t *testing.T) { // Perform write. pArgs := putArgs(key, []byte("foo")) - _, pErr := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), pArgs) + _, pErr := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), pArgs) require.Nil(t, pErr) // Perform delayed conditional put during replica removal. This will cause @@ -561,7 +561,7 @@ func setupReplicaRemovalTest( err *kvpb.Error } resultC := make(chan result) - srv := tc.Servers[0] + srv := tc.Server(0) err := srv.Stopper().RunAsyncTask(ctx, "request", func(ctx context.Context) { reqCtx := context.WithValue(ctx, magicKey{}, struct{}{}) resp, pErr := kv.SendWrapped(reqCtx, srv.DistSenderI().(kv.Sender), req) diff --git a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go index 12fd0993d60a..68ac5dc5a88d 100644 --- a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go +++ b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go @@ -610,7 +610,7 @@ func TestReplicaCircuitBreaker_ExemptRequests(t *testing.T) { return addReq }, func() kvpb.Request { - return &kvpb.RevertRangeRequest{TargetTime: tc.Servers[0].Clock().Now()} + return &kvpb.RevertRangeRequest{TargetTime: tc.Server(0).Clock().Now()} }, func() kvpb.Request { return &kvpb.GCRequest{} @@ -830,7 +830,7 @@ func setupCircuitBreakerTest(t *testing.T) *circuitBreakerTest { require.NoError(t, tc.WaitForVoters(k, tc.Target(n2))) var repls []replWithKnob - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { repl := tc.GetFirstStoreFromServer(t, i).LookupReplica(keys.MustAddr(k)) enableProbe := makeBreakerToggleable(repl.Breaker()) repls = append(repls, replWithKnob{repl, enableProbe}) diff --git a/pkg/kv/kvserver/client_replica_gc_test.go b/pkg/kv/kvserver/client_replica_gc_test.go index f8aaa9a7a4e8..a65068250f5e 100644 --- a/pkg/kv/kvserver/client_replica_gc_test.go +++ b/pkg/kv/kvserver/client_replica_gc_test.go @@ -90,7 +90,7 @@ func TestReplicaGCQueueDropReplicaDirect(t *testing.T) { tc.AddVotersOrFatal(t, k, tc.Target(1), tc.Target(2)) require.NoError(t, tc.WaitForVoters(k, tc.Target(1), tc.Target(2))) - ts := tc.Servers[1] + ts := tc.Server(1) store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) @@ -170,7 +170,7 @@ func TestReplicaGCQueueDropReplicaGCOnScan(t *testing.T) { ) defer tc.Stopper().Stop(context.Background()) - ts := tc.Servers[1] + ts := tc.Server(1) store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) diff --git a/pkg/kv/kvserver/client_replica_raft_overload_test.go b/pkg/kv/kvserver/client_replica_raft_overload_test.go index 3e9b8e401e1e..f245b9bfddd0 100644 --- a/pkg/kv/kvserver/client_replica_raft_overload_test.go +++ b/pkg/kv/kvserver/client_replica_raft_overload_test.go @@ -81,7 +81,7 @@ func TestReplicaRaftOverload(t *testing.T) { // so it may or may not contribute here (depending on when it quiesces). // // See: https://github.com/cockroachdb/cockroach/issues/84252 - require.NoError(t, tc.Servers[0].DB().Put(ctx, tc.ScratchRange(t), "foo")) + require.NoError(t, tc.Server(0).DB().Put(ctx, tc.ScratchRange(t), "foo")) s1 := tc.GetFirstStoreFromServer(t, 0) require.NoError(t, s1.ComputeMetrics(ctx)) if n := s1.Metrics().RaftPausedFollowerCount.Value(); n == 0 { diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index 5a897ab01666..3714cb65d844 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -106,7 +106,8 @@ func TestReplicaClockUpdates(t *testing.T) { }) defer tc.Stopper().Stop(ctx) - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) clocks = append(clocks, s.Clock()) } store := tc.GetFirstStoreFromServer(t, 0) @@ -615,7 +616,7 @@ func testTxnReadWithinUncertaintyIntervalAfterIntentResolution( if cur := m.UnixNano(); cur > maxNanos { maxNanos = cur } - clocks = append(clocks, tc.Servers[i].Clock()) + clocks = append(clocks, tc.Server(i).Clock()) } // After doing so, perfectly synchronize them. for _, m := range manuals { @@ -625,12 +626,12 @@ func testTxnReadWithinUncertaintyIntervalAfterIntentResolution( // Create a new writer transaction. maxOffset := clocks[0].MaxOffset().Nanoseconds() require.NotZero(t, maxOffset) - writerTxn := roachpb.MakeTransaction("test_writer", keyA, isolation.Serializable, 1, clocks[0].Now(), maxOffset, int32(tc.Servers[0].NodeID())) + writerTxn := roachpb.MakeTransaction("test_writer", keyA, isolation.Serializable, 1, clocks[0].Now(), maxOffset, int32(tc.Server(0).NodeID())) // Write to key A and key B in the writer transaction. for _, key := range []roachpb.Key{keyA, keyB} { put := putArgs(key, []byte("val")) - resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), kvpb.Header{Txn: &writerTxn}, put) + resp, pErr := kv.SendWrappedWith(ctx, tc.Server(0).DistSenderI().(kv.Sender), kvpb.Header{Txn: &writerTxn}, put) require.Nil(t, pErr) writerTxn.Update(resp.Header().Txn) } @@ -656,7 +657,7 @@ func testTxnReadWithinUncertaintyIntervalAfterIntentResolution( et.LockSpans[i].EndKey = et.LockSpans[i].Key.Next() } } - etResp, pErr := kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), etH, et) + etResp, pErr := kv.SendWrappedWith(ctx, tc.Server(0).DistSenderI().(kv.Sender), etH, et) require.Nil(t, pErr) writerTxn.Update(etResp.Header().Txn) @@ -670,7 +671,7 @@ func testTxnReadWithinUncertaintyIntervalAfterIntentResolution( // // NB: we use writerTxn.MinTimestamp instead of clocks[1].Now() so that a // stray clock update doesn't influence the reader's read timestamp. - readerTxn := roachpb.MakeTransaction("test_reader", keyA, isolation.Serializable, 1, writerTxn.MinTimestamp, maxOffset, int32(tc.Servers[1].NodeID())) + readerTxn := roachpb.MakeTransaction("test_reader", keyA, isolation.Serializable, 1, writerTxn.MinTimestamp, maxOffset, int32(tc.Server(1).NodeID())) require.True(t, readerTxn.ReadTimestamp.Less(writerTxn.WriteTimestamp)) require.False(t, readerTxn.GlobalUncertaintyLimit.Less(writerTxn.WriteTimestamp)) @@ -683,7 +684,7 @@ func testTxnReadWithinUncertaintyIntervalAfterIntentResolution( // transactions are always an observed timestamp from their own gateway node. for i, key := range []roachpb.Key{keyB, keyA} { get := getArgs(key.Next()) - resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[1].DistSenderI().(kv.Sender), kvpb.Header{Txn: &readerTxn}, get) + resp, pErr := kv.SendWrappedWith(ctx, tc.Server(1).DistSenderI().(kv.Sender), kvpb.Header{Txn: &readerTxn}, get) require.Nil(t, pErr) require.Nil(t, resp.(*kvpb.GetResponse).Value) readerTxn.Update(resp.Header().Txn) @@ -735,14 +736,14 @@ func testTxnReadWithinUncertaintyIntervalAfterIntentResolution( // leading to a stale read. // resolve := resolveIntentArgs(roachpb.PENDING) - _, pErr = kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), resolve) + _, pErr = kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), resolve) require.Nil(t, pErr) } if alreadyResolved { // Resolve the committed value on key B to COMMITTED. resolve := resolveIntentArgs(roachpb.COMMITTED) - _, pErr = kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), resolve) + _, pErr = kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), resolve) require.Nil(t, pErr) } } @@ -751,7 +752,7 @@ func testTxnReadWithinUncertaintyIntervalAfterIntentResolution( // ReadWithinUncertaintyIntervalErrors. for _, key := range []roachpb.Key{keyA, keyB} { get := getArgs(key) - _, pErr := kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), kvpb.Header{Txn: &readerTxn}, get) + _, pErr := kv.SendWrappedWith(ctx, tc.Server(0).DistSenderI().(kv.Sender), kvpb.Header{Txn: &readerTxn}, get) require.NotNil(t, pErr) var rwuiErr *kvpb.ReadWithinUncertaintyIntervalError require.True(t, errors.As(pErr.GetDetail(), &rwuiErr)) @@ -815,7 +816,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { if cur := m.UnixNano(); cur > maxNanos { maxNanos = cur } - clocks = append(clocks, tc.Servers[i].Clock()) + clocks = append(clocks, tc.Server(i).Clock()) } // After doing so, perfectly synchronize them. for _, m := range manuals { @@ -826,13 +827,13 @@ func TestTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { now := clocks[1].Now() maxOffset := clocks[1].MaxOffset().Nanoseconds() require.NotZero(t, maxOffset) - txn := roachpb.MakeTransaction("test", keyB, isolation.Serializable, 1, now, maxOffset, int32(tc.Servers[1].SQLInstanceID())) + txn := roachpb.MakeTransaction("test", keyB, isolation.Serializable, 1, now, maxOffset, int32(tc.Server(1).SQLInstanceID())) require.True(t, txn.ReadTimestamp.Less(txn.GlobalUncertaintyLimit)) require.Len(t, txn.ObservedTimestamps, 0) // Collect an observed timestamp in that transaction from node 2. getB := getArgs(keyB) - resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[1].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getB) + resp, pErr := kv.SendWrappedWith(ctx, tc.Server(1).DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getB) require.Nil(t, pErr) txn.Update(resp.Header().Txn) require.Len(t, txn.ObservedTimestamps, 1) @@ -857,7 +858,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { // stale read. ba := &kvpb.BatchRequest{} ba.Add(putArgs(keyA, []byte("val"))) - br, pErr := tc.Servers[0].DistSenderI().(kv.Sender).Send(ctx, ba) + br, pErr := tc.Server(0).DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) writeTs := br.Timestamp @@ -879,7 +880,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { // avoid the uncertainty error. This is a good thing, as doing so would allow // for a stale read. getA := getArgs(keyA) - _, pErr = kv.SendWrappedWith(ctx, tc.Servers[1].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getA) + _, pErr = kv.SendWrappedWith(ctx, tc.Server(1).DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getA) require.NotNil(t, pErr) require.IsType(t, &kvpb.ReadWithinUncertaintyIntervalError{}, pErr.GetDetail()) } @@ -998,9 +999,9 @@ func TestTxnReadWithinUncertaintyIntervalAfterRangeMerge(t *testing.T) { // Grab the clock times before we increment the other clock. Otherwise, there // is a chance that server 0 will see server 2's clock and update itself prior // to reading these values. - now := tc.Servers[0].Clock().Now() - maxOffset := tc.Servers[0].Clock().MaxOffset().Nanoseconds() - instanceId := int32(tc.Servers[0].SQLInstanceID()) + now := tc.Server(0).Clock().Now() + maxOffset := tc.Server(0).Clock().MaxOffset().Nanoseconds() + instanceId := int32(tc.Server(0).SQLInstanceID()) // Move the RHS leaseholders clocks forward past the observed timestamp before // writing. @@ -1008,7 +1009,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterRangeMerge(t *testing.T) { // Write the data from a different transaction to establish the time for the // key as 10 ns in the future. - _, pErr := kv.SendWrapped(ctx, tc.Servers[2].DistSenderI().(kv.Sender), putArgs(keyC, []byte("value"))) + _, pErr := kv.SendWrapped(ctx, tc.Server(2).DistSenderI().(kv.Sender), putArgs(keyC, []byte("value"))) require.Nil(t, pErr) // Create two identical transactions. The first one will perform a read to a @@ -1017,7 +1018,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterRangeMerge(t *testing.T) { txn2 := roachpb.MakeTransaction("txn2", keyA, isolation.Serializable, 1, now, maxOffset, instanceId) // Simulate a read which will cause the observed time to be set to now - resp, pErr := kv.SendWrappedWith(ctx, tc.Servers[1].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getArgs(keyA)) + resp, pErr := kv.SendWrappedWith(ctx, tc.Server(1).DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getArgs(keyA)) require.Nil(t, pErr) // The client needs to update its transaction to the returned transaction which has observed timestamps in it txn = *resp.Header().Txn @@ -1043,7 +1044,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterRangeMerge(t *testing.T) { // Try and read the transaction from the context of a new transaction. This // will fail as expected as the observed timestamp will not be set. - _, pErr = kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn2}, getArgs(keyC)) + _, pErr = kv.SendWrappedWith(ctx, tc.Server(0).DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn2}, getArgs(keyC)) require.IsType(t, &kvpb.ReadWithinUncertaintyIntervalError{}, pErr.GetDetail()) // Try and read the key from the existing transaction. This should fail the @@ -1053,7 +1054,7 @@ func TestTxnReadWithinUncertaintyIntervalAfterRangeMerge(t *testing.T) { // - Other error (Bad) - We expect an uncertainty error so the client can choose a new timestamp and retry. // - Not found (Bad) - Error because the data was written before us. // - Found (Bad) - The write HLC timestamp is after our timestamp. - _, pErr = kv.SendWrappedWith(ctx, tc.Servers[0].DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getArgs(keyC)) + _, pErr = kv.SendWrappedWith(ctx, tc.Server(0).DistSenderI().(kv.Sender), kvpb.Header{Txn: &txn}, getArgs(keyC)) require.IsType(t, &kvpb.ReadWithinUncertaintyIntervalError{}, pErr.GetDetail()) } @@ -1202,7 +1203,7 @@ func TestNonTxnReadWithinUncertaintyIntervalAfterLeaseTransfer(t *testing.T) { // absence would not be a true stale read. ba := &kvpb.BatchRequest{} ba.Add(putArgs(key, []byte("val"))) - br, pErr := tc.Servers[0].DistSenderI().(kv.Sender).Send(ctx, ba) + br, pErr := tc.Server(0).DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) writeTs := br.Timestamp require.True(t, nonTxnOrigTs.Less(writeTs)) @@ -1459,7 +1460,7 @@ func setupLeaseTransferTest(t *testing.T) *leaseTransferTest { // First, do a write; we'll use it to determine when the dust has settled. l.leftKey = key incArgs := incrementArgs(l.leftKey, 1) - if _, pErr := kv.SendWrapped(context.Background(), l.tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), l.tc.Server(0).DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } l.replica0 = l.tc.GetFirstStoreFromServer(t, 0).LookupReplica(roachpb.RKey(key)) @@ -1905,15 +1906,15 @@ func TestLeaseExpirationBelowFutureTimeRequest(t *testing.T) { // Move the clock up near (but below) the lease expiration. l.manualClock.Increment((preLease.Expiration.WallTime - 10) - atPause) - now := l.tc.Servers[1].Clock().Now() + now := l.tc.Server(1).Clock().Now() // Construct a future-time request timestamp past the current lease's // expiration. Remember to set the synthetic bit so that it is not used // to update the store's clock. See Replica.checkRequestTimeRLocked for // the exact determination of whether a request timestamp is too far in // the future or not. - leaseRenewal := l.tc.Servers[1].RaftConfig().RangeLeaseRenewalDuration() - leaseRenewalMinusStasis := leaseRenewal - l.tc.Servers[1].Clock().MaxOffset() + leaseRenewal := l.tc.Server(1).RaftConfig().RangeLeaseRenewalDuration() + leaseRenewalMinusStasis := leaseRenewal - l.tc.Server(1).Clock().MaxOffset() reqTime := now.Add(leaseRenewalMinusStasis.Nanoseconds()-10, 0) if tooFarInFuture { reqTime = reqTime.Add(20, 0) @@ -1993,13 +1994,13 @@ func TestRangeLocalUncertaintyLimitAfterNewLease(t *testing.T) { } // Start a transaction using node2 as a gateway. - txn := roachpb.MakeTransaction("test", keyA, isolation.Serializable, 1, tc.Servers[1].Clock().Now(), tc.Servers[1].Clock().MaxOffset().Nanoseconds(), int32(tc.Servers[1].SQLInstanceID())) + txn := roachpb.MakeTransaction("test", keyA, isolation.Serializable, 1, tc.Server(1).Clock().Now(), tc.Server(1).Clock().MaxOffset().Nanoseconds(), int32(tc.Server(1).SQLInstanceID())) // Simulate a read to another range on node2 by setting the observed timestamp. - txn.UpdateObservedTimestamp(2, tc.Servers[1].Clock().NowAsClockTimestamp()) + txn.UpdateObservedTimestamp(2, tc.Server(1).Clock().NowAsClockTimestamp()) // Do a write on node1 to establish a key with its timestamp at now. if _, pErr := kv.SendWrapped( - ctx, tc.Servers[0].DistSenderI().(kv.Sender), putArgs(keyA, []byte("value")), + ctx, tc.Server(0).DistSenderI().(kv.Sender), putArgs(keyA, []byte("value")), ); pErr != nil { t.Fatal(pErr) } @@ -2009,7 +2010,7 @@ func TestRangeLocalUncertaintyLimitAfterNewLease(t *testing.T) { replica2 := tc.GetFirstStoreFromServer(t, 1).LookupReplica(roachpb.RKey(keyA)) // Transfer the lease from node1 to node2. - node1Before := tc.Servers[0].Clock().Now() + node1Before := tc.Server(0).Clock().Now() tc.TransferRangeLeaseOrFatal(t, *replica2.Desc(), tc.Target(1)) testutils.SucceedsSoon(t, func() error { lease, _ := replica2.GetLease() @@ -2020,7 +2021,7 @@ func TestRangeLocalUncertaintyLimitAfterNewLease(t *testing.T) { }) // Verify that after the lease transfer, node2's clock has advanced to at // least match node1's from before the lease transfer. - node2After := tc.Servers[1].Clock().Now() + node2After := tc.Server(1).Clock().Now() if node2After.Less(node1Before) { t.Fatalf("expected node2's clock walltime to be >= %s; got %s", node1Before, node2After) } @@ -2031,7 +2032,7 @@ func TestRangeLocalUncertaintyLimitAfterNewLease(t *testing.T) { // expect to see an uncertainty interval error. h := kvpb.Header{Txn: &txn} if _, pErr := kv.SendWrappedWith( - ctx, tc.Servers[0].DistSenderI().(kv.Sender), h, getArgs(keyA), + ctx, tc.Server(0).DistSenderI().(kv.Sender), h, getArgs(keyA), ); !testutils.IsPError(pErr, "uncertainty") { t.Fatalf("expected an uncertainty interval error; got %v", pErr) } @@ -2129,7 +2130,7 @@ func TestLeaseMetricsOnSplitAndTransfer(t *testing.T) { // expiration and epoch leases. var expirationLeases int64 var epochLeases int64 - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { if err := tc.GetFirstStoreFromServer(t, i).ComputeMetrics(context.Background()); err != nil { return err } @@ -2418,7 +2419,7 @@ func TestLeaseInfoRequest(t *testing.T) { tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{}) defer tc.Stopper().Stop(context.Background()) - kvDB0 := tc.Servers[0].DB() + kvDB0 := tc.Server(0).DB() key := []byte("a") rangeDesc, err := tc.LookupRange(key) @@ -2428,7 +2429,7 @@ func TestLeaseInfoRequest(t *testing.T) { replicas := make([]roachpb.ReplicaDescriptor, 3) for i := 0; i < 3; i++ { var ok bool - replicas[i], ok = rangeDesc.GetReplicaDescriptor(tc.Servers[i].GetFirstStoreID()) + replicas[i], ok = rangeDesc.GetReplicaDescriptor(tc.Server(i).GetFirstStoreID()) if !ok { t.Fatalf("expected to find replica in server %d", i) } @@ -2471,7 +2472,7 @@ func TestLeaseInfoRequest(t *testing.T) { // use an old, cached, version of the range descriptor that doesn't have the // local replica in it (and so the request would be routed away). // TODO(andrei): Add a batch option to not use the range cache. - s, err := tc.Servers[1].GetStores().(*kvserver.Stores).GetStore(tc.Servers[1].GetFirstStoreID()) + s, err := tc.Server(1).GetStores().(*kvserver.Stores).GetStore(tc.Server(1).GetFirstStoreID()) if err != nil { t.Fatal(err) } @@ -2819,7 +2820,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { manualClock.Pause() // Determine when to read. - readTS := tc.Servers[0].Clock().Now() + readTS := tc.Server(0).Clock().Now() if futureRead { readTS = readTS.Add(500*time.Millisecond.Nanoseconds(), 0).WithSynthetic(true) } @@ -2829,7 +2830,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { ba := &kvpb.BatchRequest{} ba.Timestamp = readTS ba.Add(getArgs(keyA)) - br, pErr := tc.Servers[0].DistSenderI().(kv.Sender).Send(ctx, ba) + br, pErr := tc.Server(0).DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) require.Equal(t, readTS, br.Timestamp) v, err := br.Responses[0].GetGet().Value.GetInt() @@ -2849,7 +2850,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { funcs.snapErr = func(*kvserverpb.SnapshotRequest_Header) error { return errors.New("rejected") } - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.StoreID(), &unreliableRaftHandler{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.StoreID(), &unreliableRaftHandler{ rangeID: repl0.GetRangeID(), IncomingRaftMessageHandler: store2, unreliableRaftHandlerFuncs: funcs, @@ -2875,14 +2876,14 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { // but we're never 100% successful. // NOTE: we bypass safety checks because the target node is behind on its log, // so the lease transfer would be rejected otherwise. - err = tc.Servers[0].DB().AdminTransferLeaseBypassingSafetyChecks(ctx, + err = tc.Server(0).DB().AdminTransferLeaseBypassingSafetyChecks(ctx, repl0.Desc().StartKey.AsRawKey(), tc.Target(2).StoreID) require.Nil(t, err) // Remove the partition. A snapshot to node 2 should follow. This snapshot // will inform node 2 that it is the new leaseholder for the range. Node 2 // should act accordingly and update its internal state to reflect this. - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, store2) + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, store2) tc.WaitForValues(t, keyC, []int64{4, 4, 4}) // Attempt to write under the read on the new leaseholder. The batch @@ -2894,7 +2895,7 @@ func TestLeaseTransferInSnapshotUpdatesTimestampCache(t *testing.T) { ba = &kvpb.BatchRequest{} ba.Timestamp = readTS ba.Add(incrementArgs(keyA, 1)) - br, pErr = tc.Servers[0].DistSenderI().(kv.Sender).Send(ctx, ba) + br, pErr = tc.Server(0).DistSenderI().(kv.Sender).Send(ctx, ba) require.Nil(t, pErr) require.NotEqual(t, readTS, br.Timestamp) require.True(t, readTS.Less(br.Timestamp)) @@ -2987,7 +2988,7 @@ func TestLeaseTransferRejectedIfTargetNeedsSnapshot(t *testing.T) { funcs.snapErr = func(*kvserverpb.SnapshotRequest_Header) error { return errors.New("rejected") } - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.StoreID(), &unreliableRaftHandler{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.StoreID(), &unreliableRaftHandler{ rangeID: repl0.GetRangeID(), IncomingRaftMessageHandler: store2, unreliableRaftHandlerFuncs: funcs, @@ -3038,7 +3039,7 @@ func TestLeaseTransferRejectedIfTargetNeedsSnapshot(t *testing.T) { require.True(t, isRejectedErr, "%+v", transferErr) // Remove the partition. A snapshot to node 2 should follow. - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, store2) + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store2.Ident.StoreID, store2) tc.WaitForValues(t, keyC, []int64{4, 4, 4}) // Now that node 2 caught up on the log through a snapshot, we should be @@ -3093,7 +3094,7 @@ func TestConcurrentAdminChangeReplicasRequests(t *testing.T) { ctx := context.Background() defer tc.Stopper().Stop(ctx) key := roachpb.Key("a") - db := tc.Servers[0].DB() + db := tc.Server(0).DB() rangeInfo, err := getRangeInfo(ctx, db, key) require.Nil(t, err) require.Len(t, rangeInfo.Desc.InternalReplicas, 1) @@ -3168,7 +3169,7 @@ func TestRandomConcurrentAdminChangeReplicasRequests(t *testing.T) { errs := make([]error, actors) var wg sync.WaitGroup key := roachpb.Key("a") - db := tc.Servers[0].DB() + db := tc.Server(0).DB() require.Nil( t, db.AdminRelocateRange( @@ -3293,7 +3294,7 @@ func TestReplicaTombstone(t *testing.T) { funcs.dropResp = func(*kvserverpb.RaftMessageResponse) bool { return true } - tc.Servers[1].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Server(1).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: store, unreliableRaftHandlerFuncs: funcs, @@ -3349,7 +3350,7 @@ func TestReplicaTombstone(t *testing.T) { raftFuncs.dropReq = func(req *kvserverpb.RaftMessageRequest) bool { return req.ToReplica.StoreID == store.StoreID() } - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: store, unreliableRaftHandlerFuncs: raftFuncs, @@ -3393,7 +3394,7 @@ func TestReplicaTombstone(t *testing.T) { // It will never find out it has been removed. We'll remove it // with a manual replica GC. store, _ := getFirstStoreReplica(t, tc.Server(2), key) - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: store, }) @@ -3432,7 +3433,7 @@ func TestReplicaTombstone(t *testing.T) { rangeID := desc.RangeID // Partition node 2 from all raft communication. store, _ := getFirstStoreReplica(t, tc.Server(2), keyA) - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: store, }) @@ -3513,7 +3514,7 @@ func TestReplicaTombstone(t *testing.T) { waiter.blockSnapshot = true } setMinHeartbeat(repl.ReplicaID() + 1) - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: store, unreliableRaftHandlerFuncs: unreliableRaftHandlerFuncs{ @@ -3542,7 +3543,7 @@ func TestReplicaTombstone(t *testing.T) { // // Don't use tc.AddVoter; this would retry internally as we're faking // a snapshot error here (and these are all considered retriable). - _, err = tc.Servers[0].DB().AdminChangeReplicas( + _, err = tc.Server(0).DB().AdminChangeReplicas( ctx, key, tc.LookupRangeOrFatal(t, key), kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(2)), ) require.Regexp(t, "boom", err) @@ -3560,7 +3561,7 @@ func TestReplicaTombstone(t *testing.T) { // We could replica GC these replicas without too much extra work but they // also should be rare. Note this is not new with learner replicas. setMinHeartbeat(5) - _, err = tc.Servers[0].DB().AdminChangeReplicas( + _, err = tc.Server(0).DB().AdminChangeReplicas( ctx, key, tc.LookupRangeOrFatal(t, key), kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(2)), ) require.Regexp(t, "boom", err) @@ -3620,7 +3621,7 @@ func TestReplicaTombstone(t *testing.T) { raftFuncs.dropReq = func(req *kvserverpb.RaftMessageRequest) bool { return partActive.Load().(bool) && req.Message.Type == raftpb.MsgApp } - tc.Servers[2].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Server(2).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: lhsDesc.RangeID, unreliableRaftHandlerFuncs: raftFuncs, IncomingRaftMessageHandler: &unreliableRaftHandler{ @@ -3704,7 +3705,7 @@ func TestAdminRelocateRangeSafety(t *testing.T) { }) ctx := context.Background() defer tc.Stopper().Stop(ctx) - db := tc.Servers[rand.Intn(numNodes)].DB() + db := tc.Server(rand.Intn(numNodes)).DB() // The test assumes from the way that the range gets set up that the lease // holder is node 1 and from the above relocate call that the range in @@ -3737,7 +3738,7 @@ func TestAdminRelocateRangeSafety(t *testing.T) { // completed. // Code above verified r1 is the leaseholder, so use it to ChangeReplicas. - r1, _, err := tc.Servers[0].GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, rangeInfo.Desc.RangeID) + r1, _, err := tc.Server(0).GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, rangeInfo.Desc.RangeID) assert.Nil(t, err) expDescAfterAdd := rangeInfo.Desc // for use with ChangeReplicas expDescAfterAdd.NextReplicaID++ @@ -3872,7 +3873,7 @@ func TestChangeReplicasLeaveAtomicRacesWithMerge(t *testing.T) { // Manually construct the batch because the (*DB).AdminChangeReplicas does // not yet support atomic replication changes. - db := tc.Servers[0].DB() + db := tc.Server(0).DB() swapReplicas := func(key roachpb.Key, desc roachpb.RangeDescriptor, add, remove int) (*roachpb.RangeDescriptor, error) { return db.AdminChangeReplicas(ctx, key, desc, []kvpb.ReplicationChange{ {ChangeType: roachpb.ADD_VOTER, Target: tc.Target(add)}, @@ -4556,7 +4557,7 @@ func TestDiscoverIntentAcrossLeaseTransferAwayAndBack(t *testing.T) { }}, }) defer tc.Stopper().Stop(ctx) - kvDB := tc.Servers[0].DB() + kvDB := tc.Server(0).DB() key := []byte("a") rangeDesc, err := tc.LookupRange(key) @@ -4926,7 +4927,7 @@ func TestRangeMigration(t *testing.T) { }) defer unregister() - kvDB := tc.Servers[0].DB() + kvDB := tc.Server(0).DB() req := migrateArgs(desc.StartKey.AsRawKey(), desc.EndKey.AsRawKey(), endV) if _, pErr := kv.SendWrappedWith(ctx, kvDB.GetFactory().NonTransactionalSender(), kvpb.Header{RangeID: desc.RangeID}, req); pErr != nil { t.Fatal(pErr) diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index 25b3a66c8922..d5347c80e73b 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -912,7 +912,7 @@ func TestStoreEmptyRangeSnapshotSize(t *testing.T) { // no user data. splitKey := keys.SystemSQLCodec.TablePrefix(bootstrap.TestingUserDescID(0)) splitArgs := adminSplitArgs(splitKey) - if _, err := kv.SendWrapped(ctx, tc.Servers[0].DistSenderI().(kv.Sender), splitArgs); err != nil { + if _, err := kv.SendWrapped(ctx, tc.Server(0).DistSenderI().(kv.Sender), splitArgs); err != nil { t.Fatal(err) } @@ -932,7 +932,7 @@ func TestStoreEmptyRangeSnapshotSize(t *testing.T) { messageRecorder.headers = append(messageRecorder.headers, header) }, } - tc.Servers[1].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.GetFirstStoreFromServer(t, 1).StoreID(), messageHandler) + tc.Server(1).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(tc.GetFirstStoreFromServer(t, 1).StoreID(), messageHandler) // Replicate the newly-split range to trigger a snapshot request from store 0 // to store 1. @@ -1472,7 +1472,7 @@ func runSetupSplitSnapshotRace( // Split the data range. splitArgs = adminSplitArgs(roachpb.Key("m")) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), splitArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Server(0).DistSenderI().(kv.Sender), splitArgs); pErr != nil { t.Fatal(pErr) } @@ -1505,7 +1505,7 @@ func runSetupSplitSnapshotRace( // failure and render the range unable to achieve quorum after // restart (in the SnapshotWins branch). incArgs = incrementArgs(rightKey, 3) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Server(0).DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } @@ -1513,7 +1513,7 @@ func runSetupSplitSnapshotRace( tc.WaitForValues(t, rightKey, []int64{0, 0, 0, 2, 5, 5}) // Scan the meta ranges to resolve all intents - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), + if _, pErr := kv.SendWrapped(context.Background(), tc.Server(0).DistSenderI().(kv.Sender), &kvpb.ScanRequest{ RequestHeader: kvpb.RequestHeader{ Key: keys.MetaMin, @@ -1552,7 +1552,7 @@ func TestSplitSnapshotRace_SplitWins(t *testing.T) { // Perform a write on the left range and wait for it to propagate. incArgs := incrementArgs(leftKey, 10) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Server(0).DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } tc.WaitForValues(t, leftKey, []int64{0, 11, 11, 11, 0, 0}) @@ -1563,7 +1563,7 @@ func TestSplitSnapshotRace_SplitWins(t *testing.T) { // Write to the right range. incArgs = incrementArgs(rightKey, 20) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Server(0).DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } tc.WaitForValues(t, rightKey, []int64{0, 0, 0, 25, 25, 25}) @@ -1589,7 +1589,7 @@ func TestSplitSnapshotRace_SnapshotWins(t *testing.T) { // Perform a write on the right range. incArgs := incrementArgs(rightKey, 20) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Server(0).DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } @@ -1613,13 +1613,13 @@ func TestSplitSnapshotRace_SnapshotWins(t *testing.T) { // it helps wake up dormant ranges that would otherwise have to wait // for retry timeouts. incArgs = incrementArgs(leftKey, 10) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Server(0).DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } tc.WaitForValues(t, leftKey, []int64{0, 11, 11, 11, 0, 0}) incArgs = incrementArgs(rightKey, 200) - if _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), incArgs); pErr != nil { + if _, pErr := kv.SendWrapped(context.Background(), tc.Server(0).DistSenderI().(kv.Sender), incArgs); pErr != nil { t.Fatal(pErr) } tc.WaitForValues(t, rightKey, []int64{0, 0, 0, 225, 225, 225}) @@ -1682,7 +1682,7 @@ func TestStoreSplitTimestampCacheDifferentLeaseHolder(t *testing.T) { t.Fatal(err) } - db := tc.Servers[0].DB() // irrelevant which one we use + db := tc.Server(0).DB() // irrelevant which one we use // Make a context tied to the Stopper. The test works without, but this // is cleaner since we won't properly terminate the transaction below. @@ -2124,7 +2124,7 @@ func TestStoreRangeSplitRaceUninitializedRHS(t *testing.T) { // range). splitKey := roachpb.Key(encoding.EncodeVarintDescending([]byte("a"), int64(i))) splitArgs := adminSplitArgs(splitKey) - _, pErr := kv.SendWrapped(context.Background(), tc.Servers[0].DistSenderI().(kv.Sender), splitArgs) + _, pErr := kv.SendWrapped(context.Background(), tc.Server(0).DistSenderI().(kv.Sender), splitArgs) errChan <- pErr }(i) go func() { @@ -2145,7 +2145,7 @@ func TestStoreRangeSplitRaceUninitializedRHS(t *testing.T) { // side in the split trigger was racing with the uninitialized // version for the same group, resulting in clobbered HardState). for term := uint64(1); ; term++ { - if sent := tc.Servers[1].RaftTransport().(*kvserver.RaftTransport).SendAsync(&kvserverpb.RaftMessageRequest{ + if sent := tc.Server(1).RaftTransport().(*kvserver.RaftTransport).SendAsync(&kvserverpb.RaftMessageRequest{ RangeID: trigger.RightDesc.RangeID, ToReplica: replicas[0], FromReplica: replicas[1], @@ -2212,7 +2212,7 @@ func TestLeaderAfterSplit(t *testing.T) { defer tc.Stopper().Stop(ctx) store := tc.GetFirstStoreFromServer(t, 0) - sender := tc.Servers[0].DistSenderI().(kv.Sender) + sender := tc.Server(0).DistSenderI().(kv.Sender) leftKey := roachpb.Key("a") splitKey := roachpb.Key("m") @@ -3383,14 +3383,14 @@ func TestSplitTriggerMeetsUnexpectedReplicaID(t *testing.T) { // second node). g := ctxgroup.WithContext(ctx) g.GoCtx(func(ctx context.Context) error { - _, err := tc.Servers[0].DB().AdminChangeReplicas( + _, err := tc.Server(0).DB().AdminChangeReplicas( ctx, k, tc.LookupRangeOrFatal(t, k), kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(1)), ) return err }) store, _ := getFirstStoreReplica(t, tc.Server(1), k) - tc.Servers[1].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ + tc.Server(1).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: store, }) @@ -3405,7 +3405,7 @@ func TestSplitTriggerMeetsUnexpectedReplicaID(t *testing.T) { // // We avoid sending a snapshot because that snapshot would include the // split trigger and we want that to be processed via the log. - d, err := tc.Servers[0].DB().AdminChangeReplicas( + d, err := tc.Server(0).DB().AdminChangeReplicas( ctx, descLHS.StartKey.AsRawKey(), descLHS, kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(1)), ) require.NoError(t, err) @@ -3420,7 +3420,7 @@ func TestSplitTriggerMeetsUnexpectedReplicaID(t *testing.T) { // Now repeatedly re-add the learner on the rhs, so it has a // different replicaID than the split trigger expects. add := func() { - _, err := tc.Servers[0].DB().AdminChangeReplicas( + _, err := tc.Server(0).DB().AdminChangeReplicas( ctx, kRHS, tc.LookupRangeOrFatal(t, kRHS), kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(1)), ) // The "snapshot intersects existing range" error is expected if the store @@ -3468,7 +3468,7 @@ func TestSplitTriggerMeetsUnexpectedReplicaID(t *testing.T) { // Re-enable raft and wait for the lhs to catch up to the post-split // descriptor. This used to panic with "raft group deleted". - tc.Servers[1].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), store) + tc.Server(1).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(store.StoreID(), store) testutils.SucceedsSoon(t, func() error { repl, err := store.GetReplica(descLHS.RangeID) if err != nil { diff --git a/pkg/kv/kvserver/client_store_test.go b/pkg/kv/kvserver/client_store_test.go index 48deba083f7b..fd7c36910eea 100644 --- a/pkg/kv/kvserver/client_store_test.go +++ b/pkg/kv/kvserver/client_store_test.go @@ -54,7 +54,7 @@ func TestStoreSetRangesMaxBytes(t *testing.T) { ) defer tc.Stopper().Stop(ctx) store := tc.GetFirstStoreFromServer(t, 0) - tdb := sqlutils.MakeSQLRunner(tc.Conns[0]) + tdb := sqlutils.MakeSQLRunner(tc.ServerConn(0)) tdb.Exec(t, `SET CLUSTER SETTING kv.closed_timestamp.target_duration = '100ms'`) // speeds up the test diff --git a/pkg/kv/kvserver/consistency_queue_test.go b/pkg/kv/kvserver/consistency_queue_test.go index bf6dd52bd528..cea7ef6d14c2 100644 --- a/pkg/kv/kvserver/consistency_queue_test.go +++ b/pkg/kv/kvserver/consistency_queue_test.go @@ -501,7 +501,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { tc := testcluster.StartTestCluster(t, 1, clusterArgs) defer tc.Stopper().Stop(context.Background()) - db0 := tc.Servers[0].DB() + db0 := tc.Server(0).DB() // Split off a range so that we get away from the timeseries writes, which // pollute the stats with ContainsEstimates=true. Note that the split clears @@ -561,7 +561,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { tc := testcluster.StartTestCluster(t, numNodes, clusterArgs) defer tc.Stopper().Stop(ctx) - srv0 := tc.Servers[0] + srv0 := tc.Server(0) db0 := srv0.DB() // Run a goroutine that writes to the range in a tight loop. This tests that @@ -611,7 +611,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { // The stats should magically repair themselves. We'll first do a quick check // and then a full recomputation. - repl, _, err := tc.Servers[0].GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, rangeID) + repl, _, err := tc.Server(0).GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, rangeID) require.NoError(t, err) ms := repl.GetMVCCStats() if ms.SysCount >= sysCountGarbage { diff --git a/pkg/kv/kvserver/flow_control_integration_test.go b/pkg/kv/kvserver/flow_control_integration_test.go index 1465ad70e60d..da21e8ebfb13 100644 --- a/pkg/kv/kvserver/flow_control_integration_test.go +++ b/pkg/kv/kvserver/flow_control_integration_test.go @@ -100,7 +100,7 @@ func TestFlowControlBasic(t *testing.T) { for i := 0; i < numNodes; i++ { si, err := tc.Server(i).GetStores().(*kvserver.Stores).GetStore(tc.Server(i).GetFirstStoreID()) require.NoError(t, err) - tc.Servers[i].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(si.StoreID(), + tc.Server(i).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(si.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: si, @@ -1548,7 +1548,7 @@ ORDER BY name ASC; h.comment(`-- (Replacing current raft leader on n1 in raft group with new n4 replica.)`) testutils.SucceedsSoon(t, func() error { // Relocate range from n1 -> n4. - if err := tc.Servers[2].DB(). + if err := tc.Server(2).DB(). AdminRelocateRange( context.Background(), desc.StartKey.AsRawKey(), tc.Targets(1, 2, 3), nil, transferLeaseFirst); err != nil { @@ -1779,7 +1779,7 @@ ORDER BY name ASC; // Wait for the range to quiesce. h.comment(`-- (Wait for range to quiesce.)`) testutils.SucceedsSoon(t, func() error { - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { rep := tc.GetFirstStoreFromServer(t, i).LookupReplica(roachpb.RKey(k)) require.NotNil(t, rep) if !rep.IsQuiescent() { @@ -1907,7 +1907,7 @@ func TestFlowControlUnquiescedRange(t *testing.T) { for i := 0; i < numNodes; i++ { si, err := tc.Server(i).GetStores().(*kvserver.Stores).GetStore(tc.Server(i).GetFirstStoreID()) require.NoError(t, err) - tc.Servers[i].RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(si.StoreID(), + tc.Server(i).RaftTransport().(*kvserver.RaftTransport).ListenIncomingRaftMessages(si.StoreID(), &unreliableRaftHandler{ rangeID: desc.RangeID, IncomingRaftMessageHandler: si, @@ -2333,10 +2333,10 @@ func newFlowControlTestHelper(t *testing.T, tc *testcluster.TestCluster) *flowCo } func (h *flowControlTestHelper) init() { - if _, err := h.tc.Conns[0].Exec(`SET CLUSTER SETTING kvadmission.flow_control.enabled = true`); err != nil { + if _, err := h.tc.ServerConn(0).Exec(`SET CLUSTER SETTING kvadmission.flow_control.enabled = true`); err != nil { h.t.Fatal(err) } - if _, err := h.tc.Conns[0].Exec(`SET CLUSTER SETTING kvadmission.flow_control.mode = 'apply_to_all'`); err != nil { + if _, err := h.tc.ServerConn(0).Exec(`SET CLUSTER SETTING kvadmission.flow_control.mode = 'apply_to_all'`); err != nil { h.t.Fatal(err) } } diff --git a/pkg/kv/kvserver/gossip_test.go b/pkg/kv/kvserver/gossip_test.go index d7c05794d775..6c9ecc615658 100644 --- a/pkg/kv/kvserver/gossip_test.go +++ b/pkg/kv/kvserver/gossip_test.go @@ -42,7 +42,7 @@ func TestGossipFirstRange(t *testing.T) { errors := make(chan error, 1) descs := make(chan *roachpb.RangeDescriptor) - unregister := tc.Servers[0].GossipI().(*gossip.Gossip). + unregister := tc.Server(0).GossipI().(*gossip.Gossip). RegisterCallback(gossip.KeyFirstRangeDescriptor, func(_ string, content roachpb.Value) { var desc roachpb.RangeDescriptor @@ -172,10 +172,10 @@ func TestGossipHandlesReplacedNode(t *testing.T) { // Take down the first node and replace it with a new one. oldNodeIdx := 0 newServerArgs := serverArgs - newServerArgs.Addr = tc.Servers[oldNodeIdx].AdvRPCAddr() - newServerArgs.SQLAddr = tc.Servers[oldNodeIdx].AdvSQLAddr() + newServerArgs.Addr = tc.Server(oldNodeIdx).AdvRPCAddr() + newServerArgs.SQLAddr = tc.Server(oldNodeIdx).AdvSQLAddr() newServerArgs.PartOfCluster = true - newServerArgs.JoinAddr = tc.Servers[1].AdvRPCAddr() + newServerArgs.JoinAddr = tc.Server(1).AdvRPCAddr() log.Infof(ctx, "stopping server %d", oldNodeIdx) tc.StopServer(oldNodeIdx) // We are re-using a hard-coded port. Other processes on the system may by now @@ -195,7 +195,8 @@ func TestGossipHandlesReplacedNode(t *testing.T) { // Ensure that all servers still running are responsive. If the two remaining // original nodes don't refresh their connection to the address of the first // node, they can get stuck here. - for i, server := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { + server := tc.Server(i) if i == oldNodeIdx { continue } diff --git a/pkg/kv/kvserver/intentresolver/intent_resolver_integration_test.go b/pkg/kv/kvserver/intentresolver/intent_resolver_integration_test.go index ec6bea20716e..0f4dcbe3e8ce 100644 --- a/pkg/kv/kvserver/intentresolver/intent_resolver_integration_test.go +++ b/pkg/kv/kvserver/intentresolver/intent_resolver_integration_test.go @@ -189,7 +189,7 @@ func TestAsyncIntentResolution_ByteSizePagination(t *testing.T) { } // Set the max raft command size to 5MB. - st := tc.Servers[0].ClusterSettings() + st := tc.Server(0).ClusterSettings() st.Manual.Store(true) kvserverbase.MaxCommandSize.Override(ctx, &st.SV, 5<<20) @@ -284,7 +284,7 @@ func TestSyncIntentResolution_ByteSizePagination(t *testing.T) { } // Set the max raft command size to 5MB. - st := tc.Servers[0].ClusterSettings() + st := tc.Server(0).ClusterSettings() st.Manual.Store(true) kvserverbase.MaxCommandSize.Override(ctx, &st.SV, 5<<20) @@ -334,7 +334,8 @@ func TestSyncIntentResolution_ByteSizePagination(t *testing.T) { } func forceScanOnAllReplicationQueues(tc *testcluster.TestCluster) (err error) { - for _, s := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { + s := tc.Server(i) err = s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceReplicationScanAndProcess() }) diff --git a/pkg/kv/kvserver/liveness/client_test.go b/pkg/kv/kvserver/liveness/client_test.go index e3fb65c5c0e0..3dcc09197f3d 100644 --- a/pkg/kv/kvserver/liveness/client_test.go +++ b/pkg/kv/kvserver/liveness/client_test.go @@ -162,7 +162,7 @@ func TestNodeLivenessStatusMap(t *testing.T) { ctx = logtags.AddTag(ctx, "in test", nil) log.Infof(ctx, "setting zone config to disable replication") - if _, err := tc.Conns[0].Exec(`ALTER RANGE meta CONFIGURE ZONE using num_replicas = 1`); err != nil { + if _, err := tc.ServerConn(0).Exec(`ALTER RANGE meta CONFIGURE ZONE using num_replicas = 1`); err != nil { t.Fatal(err) } @@ -284,7 +284,7 @@ func TestNodeLivenessDecommissionedCallback(t *testing.T) { tc.Start(t) defer tc.Stopper().Stop(ctx) - nl1 := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness) + nl1 := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness) // Make sure the callback doesn't fire willy-nilly... func() { @@ -340,7 +340,7 @@ func TestGetActiveNodes(t *testing.T) { defer tc.Stopper().Stop(ctx) // At this point StartTestCluster has waited for all nodes to become live. - nl1 := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness) + nl1 := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness) require.Equal(t, []roachpb.NodeID{1, 2, 3, 4, 5}, getActiveNodes(nl1)) // Mark n5 as decommissioning, which should reduce node count. diff --git a/pkg/kv/kvserver/loqrecovery/collect_raft_log_test.go b/pkg/kv/kvserver/loqrecovery/collect_raft_log_test.go index 113bc83e1522..32fd974f2fab 100644 --- a/pkg/kv/kvserver/loqrecovery/collect_raft_log_test.go +++ b/pkg/kv/kvserver/loqrecovery/collect_raft_log_test.go @@ -64,7 +64,7 @@ func TestFindUpdateDescriptor(t *testing.T) { lHSBefore, rHS, err = tc.SplitRange(splitKey) require.NoError(t, err, "failed to split scratch range") - lHSAfter, err = tc.Servers[0].MergeRanges(scratchKey) + lHSAfter, err = tc.Server(0).MergeRanges(scratchKey) require.NoError(t, err, "failed to merge scratch range") require.NoError(t, diff --git a/pkg/kv/kvserver/node_liveness_test.go b/pkg/kv/kvserver/node_liveness_test.go index 32581ded4b77..5f11ee8a364c 100644 --- a/pkg/kv/kvserver/node_liveness_test.go +++ b/pkg/kv/kvserver/node_liveness_test.go @@ -49,8 +49,9 @@ import ( func verifyLiveness(t *testing.T, tc *testcluster.TestCluster) { testutils.SucceedsSoon(t, func() error { - for _, s := range tc.Servers { - return verifyLivenessServer(s, int64(len(tc.Servers))) + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) + return verifyLivenessServer(s, int64(tc.NumServers())) } return nil }) @@ -69,7 +70,8 @@ func verifyLivenessServer(s serverutils.TestServerInterface, numServers int64) e func pauseNodeLivenessHeartbeatLoops(tc *testcluster.TestCluster) func() { var enableFns []func() - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) enableFns = append(enableFns, s.NodeLiveness().(*liveness.NodeLiveness).PauseHeartbeatLoopForTest()) } return func() { @@ -102,9 +104,10 @@ func TestNodeLiveness(t *testing.T) { pauseNodeLivenessHeartbeatLoops(tc) // Advance clock past the liveness threshold to verify IsLive becomes false. - manualClock.Increment(tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).TestingGetLivenessThreshold().Nanoseconds() + 1) + manualClock.Increment(tc.Server(0).NodeLiveness().(*liveness.NodeLiveness).TestingGetLivenessThreshold().Nanoseconds() + 1) - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) nl := s.NodeLiveness().(*liveness.NodeLiveness) nodeID := s.NodeID() if nl.GetNodeVitalityFromCache(nodeID).IsLive(livenesspb.IsAliveNotification) { @@ -119,7 +122,8 @@ func TestNodeLiveness(t *testing.T) { }) } // Trigger a manual heartbeat and verify liveness is reestablished. - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) nl := s.NodeLiveness().(*liveness.NodeLiveness) l, ok := nl.Self() assert.True(t, ok) @@ -139,7 +143,8 @@ func TestNodeLiveness(t *testing.T) { verifyLiveness(t, tc) // Verify metrics counts. - for i, s := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { + s := tc.Server(i) nl := s.NodeLiveness().(*liveness.NodeLiveness) if c := nl.Metrics().HeartbeatSuccesses.Count(); c < 2 { t.Errorf("node %d: expected metrics count >= 2; got %d", (i + 1), c) @@ -180,7 +185,7 @@ func TestNodeLivenessInitialIncrement(t *testing.T) { // Verify liveness of all nodes for all nodes. verifyLiveness(t, tc) - nl, ok := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).GetLiveness(tc.Servers[0].NodeID()) + nl, ok := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness).GetLiveness(tc.Server(0).NodeID()) assert.True(t, ok) if nl.Epoch != 1 { t.Errorf("expected epoch to be set to 1 initially; got %d", nl.Epoch) @@ -193,7 +198,7 @@ func TestNodeLivenessInitialIncrement(t *testing.T) { func verifyEpochIncremented(t *testing.T, tc *testcluster.TestCluster, nodeIdx int) { testutils.SucceedsSoon(t, func() error { - liv, ok := tc.Servers[nodeIdx].NodeLiveness().(*liveness.NodeLiveness).GetLiveness(tc.Servers[nodeIdx].NodeID()) + liv, ok := tc.Server(nodeIdx).NodeLiveness().(*liveness.NodeLiveness).GetLiveness(tc.Server(nodeIdx).NodeID()) if !ok { return errors.New("liveness not found") } @@ -322,10 +327,11 @@ func TestNodeIsLiveCallback(t *testing.T) { started.Set(true) // Advance clock past the liveness threshold. - manualClock.Increment(tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).TestingGetLivenessThreshold().Nanoseconds() + 1) + manualClock.Increment(tc.Server(0).NodeLiveness().(*liveness.NodeLiveness).TestingGetLivenessThreshold().Nanoseconds() + 1) // Trigger a manual heartbeat and verify callbacks for each node ID are invoked. - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) nl := s.NodeLiveness().(*liveness.NodeLiveness) l, ok := nl.Self() assert.True(t, ok) @@ -337,7 +343,8 @@ func TestNodeIsLiveCallback(t *testing.T) { testutils.SucceedsSoon(t, func() error { cbMu.Lock() defer cbMu.Unlock() - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) nodeID := s.NodeID() if _, ok := cbs[nodeID]; !ok { return errors.Errorf("expected IsLive callback for node %d", nodeID) @@ -365,7 +372,7 @@ func TestNodeHeartbeatCallback(t *testing.T) { // Verify that last update time has been set for all nodes. verifyUptimes := func() { - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { s := tc.GetFirstStoreFromServer(t, i) uptm, err := s.ReadLastUpTimestamp(context.Background()) require.NoError(t, err) @@ -404,24 +411,24 @@ func TestNodeLivenessEpochIncrement(t *testing.T) { pauseNodeLivenessHeartbeatLoops(tc) // First try to increment the epoch of a known-live node. - deadNodeID := tc.Servers[1].NodeID() - oldLiveness, ok := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).GetLiveness(deadNodeID) + deadNodeID := tc.Server(1).NodeID() + oldLiveness, ok := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness).GetLiveness(deadNodeID) assert.True(t, ok) - if err := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).IncrementEpoch( + if err := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness).IncrementEpoch( ctx, oldLiveness.Liveness, ); !testutils.IsError(err, "cannot increment epoch on live node") { t.Fatalf("expected error incrementing a live node: %+v", err) } // Advance clock past liveness threshold & increment epoch. - manualClock.Increment(tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).TestingGetLivenessThreshold().Nanoseconds() + 1) - if err := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).IncrementEpoch(ctx, oldLiveness.Liveness); err != nil { + manualClock.Increment(tc.Server(0).NodeLiveness().(*liveness.NodeLiveness).TestingGetLivenessThreshold().Nanoseconds() + 1) + if err := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness).IncrementEpoch(ctx, oldLiveness.Liveness); err != nil { t.Fatalf("unexpected error incrementing a non-live node: %+v", err) } // Verify that the epoch has been advanced. testutils.SucceedsSoon(t, func() error { - newLiveness, ok := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).GetLiveness(deadNodeID) + newLiveness, ok := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness).GetLiveness(deadNodeID) if !ok { return errors.New("liveness not found") } @@ -431,19 +438,19 @@ func TestNodeLivenessEpochIncrement(t *testing.T) { if newLiveness.Expiration != oldLiveness.Expiration { return errors.Errorf("expected expiration to remain unchanged") } - if tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).GetNodeVitalityFromCache(deadNodeID).IsLive(livenesspb.IsAliveNotification) { + if tc.Server(0).NodeLiveness().(*liveness.NodeLiveness).GetNodeVitalityFromCache(deadNodeID).IsLive(livenesspb.IsAliveNotification) { return errors.Errorf("expected dead node to remain dead after epoch increment") } return nil }) // Verify epoch increment metric count. - if c := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).Metrics().EpochIncrements.Count(); c != 1 { + if c := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness).Metrics().EpochIncrements.Count(); c != 1 { t.Errorf("expected epoch increment == 1; got %d", c) } // Verify error on incrementing an already-incremented epoch. - if err := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).IncrementEpoch( + if err := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness).IncrementEpoch( ctx, oldLiveness.Liveness, ); !errors.Is(err, liveness.ErrEpochAlreadyIncremented) { t.Fatalf("unexpected error incrementing a non-live node: %+v", err) @@ -451,7 +458,7 @@ func TestNodeLivenessEpochIncrement(t *testing.T) { // Verify error incrementing with a too-high expectation for liveness epoch. oldLiveness.Epoch = 3 - if err := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness).IncrementEpoch( + if err := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness).IncrementEpoch( ctx, oldLiveness.Liveness, ); !testutils.IsError(err, "unexpected liveness epoch 2; expected >= 3") { t.Fatalf("expected error incrementing with a too-high expected epoch: %+v", err) @@ -500,7 +507,8 @@ func TestNodeLivenessRestart(t *testing.T) { // Clear the liveness records in store 1's gossip to make sure we're // seeing the liveness record properly gossiped at store startup. var expKeys []string - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) nodeID := s.NodeID() key := gossip.MakeNodeLivenessKey(nodeID) expKeys = append(expKeys, key) @@ -633,7 +641,7 @@ func TestNodeLivenessGetIsLiveMap(t *testing.T) { verifyLiveness(t, tc) pauseNodeLivenessHeartbeatLoops(tc) - nl := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness) + nl := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness) require.True(t, nl.GetNodeVitalityFromCache(1).IsLive(livenesspb.IsAliveNotification)) require.True(t, nl.GetNodeVitalityFromCache(2).IsLive(livenesspb.IsAliveNotification)) require.True(t, nl.GetNodeVitalityFromCache(3).IsLive(livenesspb.IsAliveNotification)) @@ -642,7 +650,7 @@ func TestNodeLivenessGetIsLiveMap(t *testing.T) { manualClock.Increment(nl.TestingGetLivenessThreshold().Nanoseconds() + 1) var livenessRec liveness.Record testutils.SucceedsSoon(t, func() error { - lr, ok := nl.GetLiveness(tc.Servers[0].NodeID()) + lr, ok := nl.GetLiveness(tc.Server(0).NodeID()) if !ok { return errors.New("liveness not found") } @@ -688,7 +696,7 @@ func TestNodeLivenessGetLivenesses(t *testing.T) { verifyLiveness(t, tc) pauseNodeLivenessHeartbeatLoops(tc) - nl := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness) + nl := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness) actualLMapNodes := make(map[roachpb.NodeID]struct{}) originalExpiration := testStartTime + nl.TestingGetLivenessThreshold().Nanoseconds() for id, v := range nl.ScanNodeVitalityFromCache() { @@ -709,7 +717,7 @@ func TestNodeLivenessGetLivenesses(t *testing.T) { manualClock.Increment(nl.TestingGetLivenessThreshold().Nanoseconds() + 1) var livenessRecord liveness.Record testutils.SucceedsSoon(t, func() error { - livenessRec, ok := nl.GetLiveness(tc.Servers[0].NodeID()) + livenessRec, ok := nl.GetLiveness(tc.Server(0).NodeID()) if !ok { return errors.New("liveness not found") } @@ -809,9 +817,9 @@ func TestNodeLivenessConcurrentIncrementEpochs(t *testing.T) { const concurrency = 10 // Advance the clock and this time increment epoch concurrently for node 1. - nl := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness) + nl := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness) manualClock.Increment(nl.TestingGetLivenessThreshold().Nanoseconds() + 1) - l, ok := nl.GetLiveness(tc.Servers[1].NodeID()) + l, ok := nl.GetLiveness(tc.Server(1).NodeID()) assert.True(t, ok) errCh := make(chan error, concurrency) for i := 0; i < concurrency; i++ { @@ -869,7 +877,7 @@ func TestNodeLivenessSetDraining(t *testing.T) { verifyLiveness(t, tc) drainingNodeIdx := 0 - drainingNodeID := tc.Servers[0].NodeID() + drainingNodeID := tc.Server(0).NodeID() nodeIDAppearsInStoreList := func(id roachpb.NodeID, sl storepool.StoreList) bool { for _, store := range sl.TestingStores() { @@ -882,7 +890,7 @@ func TestNodeLivenessSetDraining(t *testing.T) { // Verify success on failed update of a liveness record that already has the // given draining setting. - if err := tc.Servers[drainingNodeIdx].NodeLiveness().(*liveness.NodeLiveness).TestingSetDrainingInternal( + if err := tc.Server(drainingNodeIdx).NodeLiveness().(*liveness.NodeLiveness).TestingSetDrainingInternal( ctx, liveness.Record{Liveness: livenesspb.Liveness{ NodeID: drainingNodeID, }}, false, @@ -890,7 +898,7 @@ func TestNodeLivenessSetDraining(t *testing.T) { t.Fatal(err) } - if err := tc.Servers[drainingNodeIdx].NodeLiveness().(*liveness.NodeLiveness).SetDraining(ctx, true /* drain */, nil /* reporter */); err != nil { + if err := tc.Server(drainingNodeIdx).NodeLiveness().(*liveness.NodeLiveness).SetDraining(ctx, true /* drain */, nil /* reporter */); err != nil { t.Fatal(err) } @@ -900,7 +908,8 @@ func TestNodeLivenessSetDraining(t *testing.T) { // Executed in a retry loop to wait until the new liveness record has // been gossiped to the rest of the cluster. testutils.SucceedsSoon(t, func() error { - for i, s := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { + s := tc.Server(i) curNodeID := s.NodeID() sl, alive, _ := tc.GetFirstStoreFromServer(t, i).GetStoreConfig().StorePool.TestingGetStoreList() if alive != expectedLive { @@ -934,7 +943,8 @@ func TestNodeLivenessSetDraining(t *testing.T) { // Executed in a retry loop to wait until the new liveness record has // been gossiped to the rest of the cluster. testutils.SucceedsSoon(t, func() error { - for i, s := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { + s := tc.Server(i) curNodeID := s.NodeID() sl, alive, _ := tc.GetFirstStoreFromServer(t, i).GetStoreConfig().StorePool.TestingGetStoreList() if alive != expectedLive { @@ -1063,7 +1073,8 @@ func TestNodeLivenessRetryAmbiguousResultOnCreateError(t *testing.T) { }) defer tc.Stopper().Stop(ctx) - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) // Verify retry of the ambiguous result for heartbeat loop. testutils.SucceedsSoon(t, func() error { return verifyLivenessServer(s, 3) @@ -1154,7 +1165,8 @@ func TestNodeLivenessNoRetryOnAmbiguousResultCausedByCancellation(t *testing.T) func verifyNodeIsDecommissioning(t *testing.T, tc *testcluster.TestCluster, nodeID roachpb.NodeID) { testutils.SucceedsSoon(t, func() error { - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) liv, _ := s.NodeLiveness().(*liveness.NodeLiveness).GetLiveness(nodeID) if !liv.Membership.Decommissioning() { return errors.Errorf("unexpected Membership value of %v for node %v", liv.Membership, liv.NodeID) @@ -1200,8 +1212,8 @@ func testNodeLivenessSetDecommissioning(t *testing.T, decommissionNodeIdx int) { // Verify liveness of all nodes for all nodes. verifyLiveness(t, tc) - callerNodeLiveness := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness) - nodeID := tc.Servers[decommissionNodeIdx].NodeID() + callerNodeLiveness := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness) + nodeID := tc.Server(decommissionNodeIdx).NodeID() // Verify success on failed update of a liveness record that already has the // given decommissioning setting. @@ -1263,9 +1275,9 @@ func TestNodeLivenessDecommissionAbsent(t *testing.T) { verifyLiveness(t, tc) const goneNodeID = roachpb.NodeID(10000) - nl := tc.Servers[0].NodeLiveness().(*liveness.NodeLiveness) - nl1 := tc.Servers[1].NodeLiveness().(*liveness.NodeLiveness) - nl2 := tc.Servers[1].NodeLiveness().(*liveness.NodeLiveness) + nl := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness) + nl1 := tc.Server(1).NodeLiveness().(*liveness.NodeLiveness) + nl2 := tc.Server(1).NodeLiveness().(*liveness.NodeLiveness) // When the node simply never existed, expect an error. if _, err := nl.SetMembershipStatus( @@ -1275,10 +1287,10 @@ func TestNodeLivenessDecommissionAbsent(t *testing.T) { } // Pretend the node was once there but isn't gossiped anywhere. - if err := tc.Servers[0].DB().CPut(ctx, keys.NodeLivenessKey(goneNodeID), &livenesspb.Liveness{ + if err := tc.Server(0).DB().CPut(ctx, keys.NodeLivenessKey(goneNodeID), &livenesspb.Liveness{ NodeID: goneNodeID, Epoch: 1, - Expiration: tc.Servers[0].Clock().Now().ToLegacyTimestamp(), + Expiration: tc.Server(0).Clock().Now().ToLegacyTimestamp(), Membership: livenesspb.MembershipStatus_ACTIVE, }, nil); err != nil { t.Fatal(err) diff --git a/pkg/kv/kvserver/replica_closedts_test.go b/pkg/kv/kvserver/replica_closedts_test.go index 7649ecdec217..32d6df16cbed 100644 --- a/pkg/kv/kvserver/replica_closedts_test.go +++ b/pkg/kv/kvserver/replica_closedts_test.go @@ -565,7 +565,7 @@ func TestRejectedLeaseDoesntDictateClosedTimestamp(t *testing.T) { manual.Pause() // Upreplicate a range. - n1, n2 := tc.Servers[0], tc.Servers[1] + n1, n2 := tc.Server(0), tc.Server(1) // One of the filters hardcodes a node id. require.Equal(t, roachpb.NodeID(2), n2.NodeID()) key := tc.ScratchRangeWithExpirationLease(t) @@ -919,7 +919,8 @@ func testNonBlockingReadsWithReaderFn( } // Reader goroutines: run one reader per store. - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) g.Go(func() error { diff --git a/pkg/kv/kvserver/replica_learner_test.go b/pkg/kv/kvserver/replica_learner_test.go index 87eb20d7ce4d..9124131001c0 100644 --- a/pkg/kv/kvserver/replica_learner_test.go +++ b/pkg/kv/kvserver/replica_learner_test.go @@ -279,7 +279,7 @@ func TestAddReplicaWithReceiverThrottling(t *testing.T) { if err != nil { return err } - _, err = tc.Servers[0].DB().AdminChangeReplicas(ctx, scratch, desc, + _, err = tc.Server(0).DB().AdminChangeReplicas(ctx, scratch, desc, kvpb.MakeReplicationChanges(roachpb.ADD_NON_VOTER, tc.Target(2)), ) replicationChange <- err @@ -303,7 +303,7 @@ func TestAddReplicaWithReceiverThrottling(t *testing.T) { if err != nil { return err } - _, err = tc.Servers[0].DB().AdminChangeReplicas( + _, err = tc.Server(0).DB().AdminChangeReplicas( ctx, scratch, desc, kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(1)), ) replicationChange <- err @@ -437,7 +437,7 @@ func TestDelegateSnapshot(t *testing.T) { testutils.SucceedsSoon(t, func() error { var desc roachpb.RangeDescriptor rKey := keys.MustAddr(scratchKey) - require.NoError(t, tc.Servers[2].DB().GetProto(ctx, keys.RangeDescriptorKey(rKey), &desc)) + require.NoError(t, tc.Server(2).DB().GetProto(ctx, keys.RangeDescriptorKey(rKey), &desc)) if desc.Generation != leaderDesc.Generation { return errors.Newf("Generation mismatch %d != %d", desc.Generation, leaderDesc.Generation) } @@ -536,7 +536,7 @@ func TestDelegateSnapshotFails(t *testing.T) { _, err = setupPartitionedRange(tc, desc.RangeID, 0, 0, true, unreliableRaftHandlerFuncs{}) require.NoError(t, err) - _, err = tc.Servers[0].DB().AdminChangeReplicas( + _, err = tc.Server(0).DB().AdminChangeReplicas( ctx, scratchKey, desc, kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(1)), ) @@ -569,7 +569,7 @@ func TestDelegateSnapshotFails(t *testing.T) { _, err := setupPartitionedRange(tc, desc.RangeID, 0, 2, true, unreliableRaftHandlerFuncs{}) require.NoError(t, err) - _, err = tc.Servers[0].DB().AdminChangeReplicas( + _, err = tc.Server(0).DB().AdminChangeReplicas( ctx, scratchKey, desc, kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(1)), ) // The delegate can not send this request since it does not have the latest @@ -602,7 +602,7 @@ func TestDelegateSnapshotFails(t *testing.T) { _, err := setupPartitionedRange(tc, desc.RangeID, 0, 2, true, unreliableRaftHandlerFuncs{}) require.NoError(t, err) - _, err = tc.Servers[0].DB().AdminChangeReplicas( + _, err = tc.Server(0).DB().AdminChangeReplicas( ctx, scratchKey, desc, kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(1)), ) require.NoError(t, err) @@ -644,7 +644,7 @@ func TestDelegateSnapshotFails(t *testing.T) { senders.mu.Unlock() block.Store(2) - _, err := tc.Servers[0].DB().AdminChangeReplicas( + _, err := tc.Server(0).DB().AdminChangeReplicas( ctx, scratchKey, desc, kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(1)), ) require.ErrorContains(t, err, "BAM: receive error") @@ -670,7 +670,7 @@ func TestDelegateSnapshotFails(t *testing.T) { senders.desc = append(senders.desc, roachpb.ReplicaDescriptor{NodeID: 1, StoreID: 1}) senders.mu.Unlock() - _, err := tc.Servers[0].DB().AdminChangeReplicas( + _, err := tc.Server(0).DB().AdminChangeReplicas( ctx, scratchKey, desc, kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(1)), ) require.NoError(t, err) @@ -704,7 +704,7 @@ func TestDelegateSnapshotFails(t *testing.T) { // Don't allow store 4 to see the new descriptor through Raft. block.Store(true) - _, err := tc.Servers[0].DB().AdminChangeReplicas( + _, err := tc.Server(0).DB().AdminChangeReplicas( ctx, scratchKey, desc, kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(1)), ) require.ErrorContains(t, err, "generation has changed") @@ -737,7 +737,7 @@ func TestDelegateSnapshotFails(t *testing.T) { defer tc.Stopper().Stop(ctx) // This will truncate the log on the first store. truncateLog = func() { - server := tc.Servers[0] + server := tc.Server(0) store, _ := server.GetStores().(*kvserver.Stores).GetStore(server.GetFirstStoreID()) store.MustForceRaftLogScanAndProcess() } @@ -753,7 +753,7 @@ func TestDelegateSnapshotFails(t *testing.T) { // Don't allow the new store to see Raft updates. blockRaft.Store(true) - _, err := tc.Servers[0].DB().AdminChangeReplicas( + _, err := tc.Server(0).DB().AdminChangeReplicas( ctx, scratchKey, desc, kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(1)), ) require.NoError(t, err) @@ -1458,7 +1458,7 @@ func TestLearnerAdminChangeReplicasRace(t *testing.T) { if err != nil { return err } - _, err = tc.Servers[0].DB().AdminChangeReplicas( + _, err = tc.Server(0).DB().AdminChangeReplicas( ctx, scratchStartKey, desc, kvpb.MakeReplicationChanges(roachpb.ADD_VOTER, tc.Target(1)), ) return err @@ -1842,7 +1842,7 @@ func TestDemotedLearnerRemovalHandlesRace(t *testing.T) { var finishAndGetRecording func() tracingpb.Recording err := tc.Stopper().RunAsyncTask(ctx, "test", func(ctx context.Context) { ctx, finishAndGetRecording = tracing.ContextWithRecordingSpan( - ctx, tc.Servers[0].Tracer(), "rebalance", + ctx, tc.Server(0).Tracer(), "rebalance", ) _, err := tc.RebalanceVoter( ctx, @@ -1872,7 +1872,7 @@ func TestDemotedLearnerRemovalHandlesRace(t *testing.T) { // Manually remove the learner replica from the range, and expect that to not // affect the previous rebalance anymore. - _, leaseRepl := getFirstStoreReplica(t, tc.Servers[0], scratchKey) + _, leaseRepl := getFirstStoreReplica(t, tc.Server(0), scratchKey) require.NotNil(t, leaseRepl) beforeDesc := tc.LookupRangeOrFatal(t, scratchKey) _, err = leaseRepl.TestingRemoveLearner( diff --git a/pkg/kv/kvserver/replica_probe_test.go b/pkg/kv/kvserver/replica_probe_test.go index 7dae73451327..26dc2335f13f 100644 --- a/pkg/kv/kvserver/replica_probe_test.go +++ b/pkg/kv/kvserver/replica_probe_test.go @@ -119,7 +119,8 @@ func TestReplicaProbeRequest(t *testing.T) { } // Sanity check that ProbeRequest is fit for sending through the entire KV // stack, with both routing policies. - for _, srv := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + srv := tc.Server(serverIdx) db := srv.DB() for _, policy := range []kvpb.RoutingPolicy{ kvpb.RoutingPolicy_LEASEHOLDER, @@ -153,10 +154,10 @@ func TestReplicaProbeRequest(t *testing.T) { testutils.SucceedsSoon(t, func() error { seen.Lock() defer seen.Unlock() - if exp, act := len(seen.m), len(tc.Servers); exp != act { + if exp, act := len(seen.m), tc.NumServers(); exp != act { return errors.Errorf("waiting for stores to apply command: %d/%d", act, exp) } - // We'll usually see 2 * len(tc.Servers) probes since we sent two probes, but see + // We'll usually see 2 * tc.NumServers() probes since we sent two probes, but see // the comment about errant snapshots above. We just want this test to be reliable // so expect at least one probe in command application. n := 1 @@ -170,7 +171,8 @@ func TestReplicaProbeRequest(t *testing.T) { // We can also probe directly at each Replica. This is the intended use case // for Replica-level circuit breakers (#33007). - for _, srv := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + srv := tc.Server(serverIdx) repl, _, err := srv.GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, desc.RangeID) require.NoError(t, err) ba := &kvpb.BatchRequest{} @@ -188,7 +190,8 @@ func TestReplicaProbeRequest(t *testing.T) { seen.Lock() seen.injectedErr = injErr seen.Unlock() - for _, srv := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + srv := tc.Server(serverIdx) repl, _, err := srv.GetStores().(*kvserver.Stores).GetReplicaForRangeID(ctx, desc.RangeID) require.NoError(t, err) ba := &kvpb.BatchRequest{} diff --git a/pkg/kv/kvserver/replica_rangefeed_test.go b/pkg/kv/kvserver/replica_rangefeed_test.go index 5817350d5277..288a144f45d5 100644 --- a/pkg/kv/kvserver/replica_rangefeed_test.go +++ b/pkg/kv/kvserver/replica_rangefeed_test.go @@ -115,7 +115,7 @@ func TestReplicaRangefeed(t *testing.T) { tc := testcluster.StartTestCluster(t, numNodes, args) defer tc.Stopper().Stop(ctx) - ts := tc.Servers[0] + ts := tc.Server(0) firstStore, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) @@ -148,7 +148,7 @@ func TestReplicaRangefeed(t *testing.T) { for i := 0; i < numNodes; i++ { stream := newTestStream() streams[i] = stream - srv := tc.Servers[i] + srv := tc.Server(i) store, err := srv.GetStores().(*kvserver.Stores).GetStore(srv.GetFirstStoreID()) if err != nil { t.Fatal(err) @@ -297,7 +297,7 @@ func TestReplicaRangefeed(t *testing.T) { t.Fatal(err) } - server1 := tc.Servers[1] + server1 := tc.Server(1) store1, pErr := server1.GetStores().(*kvserver.Stores).GetStore(server1.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) @@ -469,7 +469,7 @@ func TestReplicaRangefeed(t *testing.T) { testutils.SucceedsSoon(t, func() error { for i := 0; i < numNodes; i++ { - ts := tc.Servers[i] + ts := tc.Server(i) store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) @@ -528,7 +528,7 @@ func TestReplicaRangefeedErrors(t *testing.T) { }, ) - ts := tc.Servers[0] + ts := tc.Server(0) store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) @@ -621,7 +621,7 @@ func TestReplicaRangefeedErrors(t *testing.T) { stream := newTestStream() streamErrC := make(chan error, 1) rangefeedSpan := mkSpan("a", "z") - ts := tc.Servers[removeStore] + ts := tc.Server(removeStore) store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { t.Fatal(err) @@ -656,7 +656,7 @@ func TestReplicaRangefeedErrors(t *testing.T) { stream := newTestStream() streamErrC := make(chan error, 1) rangefeedSpan := mkSpan("a", "z") - ts := tc.Servers[0] + ts := tc.Server(0) store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { t.Fatal(err) @@ -687,7 +687,7 @@ func TestReplicaRangefeedErrors(t *testing.T) { tc, rangeID := setup(t, base.TestingKnobs{}) defer tc.Stopper().Stop(ctx) - ts := tc.Servers[0] + ts := tc.Server(0) store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { t.Fatal(err) @@ -753,22 +753,23 @@ func TestReplicaRangefeedErrors(t *testing.T) { tc, rangeID := setup(t, base.TestingKnobs{}) defer tc.Stopper().Stop(ctx) - ts2 := tc.Servers[2] + ts2 := tc.Server(2) partitionStore, err := ts2.GetStores().(*kvserver.Stores).GetStore(ts2.GetFirstStoreID()) if err != nil { t.Fatal(err) } - ts := tc.Servers[0] + ts := tc.Server(0) firstStore, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { t.Fatal(err) } - secondStore, err := tc.Servers[1].GetStores().(*kvserver.Stores).GetStore(tc.Servers[1].GetFirstStoreID()) + secondStore, err := tc.Server(1).GetStores().(*kvserver.Stores).GetStore(tc.Server(1).GetFirstStoreID()) if err != nil { t.Fatal(err) } - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) store, err := server.GetStores().(*kvserver.Stores).GetStore(server.GetFirstStoreID()) if err != nil { t.Fatal(err) @@ -890,7 +891,7 @@ func TestReplicaRangefeedErrors(t *testing.T) { tc, _ := setup(t, knobs) defer tc.Stopper().Stop(ctx) - ts := tc.Servers[0] + ts := tc.Server(0) store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { t.Fatal(err) @@ -959,7 +960,7 @@ func TestReplicaRangefeedErrors(t *testing.T) { tc, _ := setup(t, knobs) defer tc.Stopper().Stop(ctx) - ts := tc.Servers[0] + ts := tc.Server(0) store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if err != nil { t.Fatal(err) @@ -1033,7 +1034,7 @@ func TestReplicaRangefeedMVCCHistoryMutationError(t *testing.T) { ReplicationMode: base.ReplicationManual, }) defer tc.Stopper().Stop(ctx) - ts := tc.Servers[0] + ts := tc.Server(0) store, err := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) require.NoError(t, err) tc.SplitRangeOrFatal(t, splitKey) diff --git a/pkg/kv/kvserver/replicate_queue_test.go b/pkg/kv/kvserver/replicate_queue_test.go index 2de951e595b0..8796a26cb7a6 100644 --- a/pkg/kv/kvserver/replicate_queue_test.go +++ b/pkg/kv/kvserver/replicate_queue_test.go @@ -83,7 +83,8 @@ func TestReplicateQueueRebalance(t *testing.T) { ) defer tc.Stopper().Stop(context.Background()) - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) st := server.ClusterSettings() st.Manual.Store(true) kvserver.LoadBasedRebalancingMode.Override(ctx, &st.SV, int64(kvserver.LBRebalancingOff)) @@ -119,8 +120,9 @@ func TestReplicateQueueRebalance(t *testing.T) { } countReplicas := func() []int { - counts := make([]int, len(tc.Servers)) - for _, s := range tc.Servers { + counts := make([]int, tc.NumServers()) + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) err := s.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { counts[s.StoreID()-1] += s.ReplicaCount() return nil @@ -160,7 +162,7 @@ func TestReplicateQueueRebalance(t *testing.T) { // Query the range log to see if anything unexpected happened. Concretely, // we'll make sure that our tracked ranges never had >3 replicas. - infos, err := queryRangeLog(tc.Conns[0], `SELECT info FROM system.rangelog ORDER BY timestamp DESC`) + infos, err := queryRangeLog(tc.ServerConn(0), `SELECT info FROM system.rangelog ORDER BY timestamp DESC`) require.NoError(t, err) for _, info := range infos { if _, ok := trackedRanges[info.UpdatedDesc.RangeID]; !ok || len(info.UpdatedDesc.Replicas().VoterDescriptors()) <= 3 { @@ -240,7 +242,8 @@ func TestReplicateQueueRebalanceMultiStore(t *testing.T) { args) defer tc.Stopper().Stop(context.Background()) ctx := context.Background() - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) st := server.ClusterSettings() st.Manual.Store(true) allocatorimpl.LeaseRebalanceThreshold.Override(ctx, &st.SV, leaseRebalanceThreshold) @@ -279,7 +282,8 @@ func TestReplicateQueueRebalanceMultiStore(t *testing.T) { countReplicas := func() (total int, perStore []int) { perStore = make([]int, numStores) - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) err := s.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { require.Zero(t, perStore[s.StoreID()-1]) perStore[s.StoreID()-1] = s.ReplicaCount() @@ -292,7 +296,8 @@ func TestReplicateQueueRebalanceMultiStore(t *testing.T) { } countLeases := func() (total int, perStore []int) { perStore = make([]int, numStores) - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) err := s.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { c, err := s.Capacity(ctx, false) require.NoError(t, err) @@ -350,7 +355,7 @@ func TestReplicateQueueRebalanceMultiStore(t *testing.T) { // Query the range log to see if anything unexpected happened. Concretely, // we'll make sure that our tracked ranges never had >3 replicas. - infos, err := queryRangeLog(tc.Conns[0], `SELECT info FROM system.rangelog ORDER BY timestamp DESC`) + infos, err := queryRangeLog(tc.ServerConn(0), `SELECT info FROM system.rangelog ORDER BY timestamp DESC`) require.NoError(t, err) for _, info := range infos { if _, ok := trackedRanges[info.UpdatedDesc.RangeID]; !ok || len(info.UpdatedDesc.Replicas().VoterDescriptors()) <= 3 { @@ -393,7 +398,7 @@ func TestReplicateQueueUpReplicateOddVoters(t *testing.T) { tc.AddAndStartServer(t, base.TestServerArgs{}) - if err := tc.Servers[0].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { + if err := tc.Server(0).GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { return s.ForceReplicationScanAndProcess() }); err != nil { t.Fatal(err) @@ -401,13 +406,13 @@ func TestReplicateQueueUpReplicateOddVoters(t *testing.T) { // After the initial splits have been performed, all of the resulting ranges // should be present in replicate queue purgatory (because we only have a // single store in the test and thus replication cannot succeed). - expected, err := tc.Servers[0].ExpectedInitialRangeCount() + expected, err := tc.Server(0).ExpectedInitialRangeCount() if err != nil { t.Fatal(err) } var store *kvserver.Store - _ = tc.Servers[0].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { + _ = tc.Server(0).GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { store = s return nil }) @@ -432,7 +437,7 @@ func TestReplicateQueueUpReplicateOddVoters(t *testing.T) { }) infos, err := filterRangeLog( - tc.Conns[0], desc.RangeID, kvserverpb.RangeLogEventType_add_voter, kvserverpb.ReasonRangeUnderReplicated, + tc.ServerConn(0), desc.RangeID, kvserverpb.RangeLogEventType_add_voter, kvserverpb.ReasonRangeUnderReplicated, ) if err != nil { t.Fatal(err) @@ -483,7 +488,8 @@ func TestReplicateQueueDownReplicate(t *testing.T) { ) require.NoError(t, err) - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) require.NoError(t, s.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { require.NoError(t, s.ForceReplicationScanAndProcess()) return nil @@ -502,7 +508,7 @@ func TestReplicateQueueDownReplicate(t *testing.T) { desc := tc.LookupRangeOrFatal(t, testKey) infos, err := filterRangeLog( - tc.Conns[0], desc.RangeID, kvserverpb.RangeLogEventType_remove_voter, kvserverpb.ReasonRangeOverReplicated, + tc.ServerConn(0), desc.RangeID, kvserverpb.RangeLogEventType_remove_voter, kvserverpb.ReasonRangeOverReplicated, ) require.NoError(t, err) require.Truef(t, len(infos) >= 1, "found no down replication due to over-replication in the range logs") @@ -511,7 +517,8 @@ func TestReplicateQueueDownReplicate(t *testing.T) { func scanAndGetNumNonVoters( t *testing.T, tc *testcluster.TestCluster, scratchKey roachpb.Key, ) (numNonVoters int) { - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) // Nudge internal queues to up/down-replicate our scratch range. require.NoError(t, s.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { require.NoError(t, s.ForceSplitScanAndProcess()) @@ -1018,7 +1025,7 @@ func getLeaseholderStore( if err != nil { return nil, err } - leaseHolderSrv := tc.Servers[leaseHolder.NodeID-1] + leaseHolderSrv := tc.Server(leaseHolder.NodeID - 1) store, err := leaseHolderSrv.GetStores().(*kvserver.Stores).GetStore(leaseHolder.StoreID) if err != nil { return nil, err @@ -1400,7 +1407,8 @@ func getAggregateMetricCounts( voterMap map[roachpb.NodeID]roachpb.StoreID, add bool, ) (currentCount int64, currentVoterCount int64) { - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) if storeId, exists := voterMap[s.NodeID()]; exists { store, err := s.GetStores().(*kvserver.Stores).GetStore(storeId) if err != nil { @@ -1722,7 +1730,8 @@ func filterRangeLog( } func toggleReplicationQueues(tc *testcluster.TestCluster, active bool) { - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) _ = s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.SetReplicateQueueActive(active) return nil @@ -1731,7 +1740,8 @@ func toggleReplicationQueues(tc *testcluster.TestCluster, active bool) { } func forceScanOnAllReplicationQueues(tc *testcluster.TestCluster) (err error) { - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) err = s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceReplicationScanAndProcess() }) @@ -1740,7 +1750,8 @@ func forceScanOnAllReplicationQueues(tc *testcluster.TestCluster) (err error) { } func toggleSplitQueues(tc *testcluster.TestCluster, active bool) { - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) _ = s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.SetSplitQueueActive(active) return nil @@ -1793,7 +1804,7 @@ func TestLargeUnsplittableRangeReplicate(t *testing.T) { toggleReplicationQueues(tc, false /* active */) toggleSplitQueues(tc, false /* active */) - db := tc.Conns[0] + db := tc.ServerConn(0) _, err := db.Exec("create table t (i int primary key, s string)") require.NoError(t, err) @@ -1828,7 +1839,8 @@ func TestLargeUnsplittableRangeReplicate(t *testing.T) { forceProcess := func() { // Speed up the queue processing. - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) err := s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceReplicationScanAndProcess() }) @@ -1940,7 +1952,7 @@ func TestTransferLeaseToLaggingNode(t *testing.T) { // Get the system.comments' range and lease holder var rangeID roachpb.RangeID var leaseHolderNodeID uint64 - s := sqlutils.MakeSQLRunner(tc.Conns[0]) + s := sqlutils.MakeSQLRunner(tc.ServerConn(0)) s.Exec(t, "INSERT INTO system.comments VALUES(0,0,0,'abc')") s.QueryRow(t, "SELECT range_id, lease_holder FROM "+ @@ -1952,7 +1964,7 @@ func TestTransferLeaseToLaggingNode(t *testing.T) { } log.Infof(ctx, "RangeID %d, RemoteNodeID %d, LeaseHolderNodeID %d", rangeID, remoteNodeID, leaseHolderNodeID) - leaseHolderSrv := tc.Servers[leaseHolderNodeID-1] + leaseHolderSrv := tc.Server(leaseHolderNodeID - 1) leaseHolderStoreID := leaseHolderSrv.GetFirstStoreID() leaseHolderStore, err := leaseHolderSrv.GetStores().(*kvserver.Stores).GetStore(leaseHolderStoreID) if err != nil { @@ -1960,7 +1972,7 @@ func TestTransferLeaseToLaggingNode(t *testing.T) { } // Start delaying Raft messages to the remote node - remoteSrv := tc.Servers[remoteNodeID-1] + remoteSrv := tc.Server(remoteNodeID - 1) remoteStoreID := remoteSrv.GetFirstStoreID() remoteStore, err := remoteSrv.GetStores().(*kvserver.Stores).GetStore(remoteStoreID) if err != nil { @@ -1974,7 +1986,7 @@ func TestTransferLeaseToLaggingNode(t *testing.T) { workerReady := make(chan bool) // Create persistent range load. require.NoError(t, tc.Stopper().RunAsyncTask(ctx, "load", func(ctx context.Context) { - s = sqlutils.MakeSQLRunner(tc.Conns[remoteNodeID-1]) + s = sqlutils.MakeSQLRunner(tc.ServerConn(remoteNodeID - 1)) workerReady <- true for { s.Exec(t, fmt.Sprintf("update system.comments set comment='abc' "+ @@ -2052,7 +2064,7 @@ func TestTransferLeaseToLaggingNode(t *testing.T) { ) return nil } - currentSrv := tc.Servers[leaseBefore.Replica.NodeID-1] + currentSrv := tc.Server(leaseBefore.Replica.NodeID - 1) leaseStore, err := currentSrv.GetStores().(*kvserver.Stores).GetStore(currentSrv.GetFirstStoreID()) if err != nil { return err @@ -2112,7 +2124,7 @@ func TestReplicateQueueAcquiresInvalidLeases(t *testing.T) { }, ) defer tc.Stopper().Stop(ctx) - db := tc.Conns[0] + db := tc.ServerConn(0) // Disable consistency checker and sql stats collection that may acquire a // lease by querying a range. _, err := db.Exec("set cluster setting server.consistency_check.interval = '0s'") @@ -2147,7 +2159,7 @@ func TestReplicateQueueAcquiresInvalidLeases(t *testing.T) { require.Len(t, invalidLeases(), 0) // Restart the servers to invalidate the leases. - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { tc.StopServer(i) err = tc.RestartServerWithInspect(i, nil) require.NoError(t, err) @@ -2155,7 +2167,8 @@ func TestReplicateQueueAcquiresInvalidLeases(t *testing.T) { forceProcess := func() { // Speed up the queue processing. - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) err := s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { return store.ForceReplicationScanAndProcess() }) @@ -2184,7 +2197,8 @@ func TestReplicateQueueAcquiresInvalidLeases(t *testing.T) { func iterateOverAllStores( t *testing.T, tc *testcluster.TestCluster, f func(*kvserver.Store) error, ) { - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) require.NoError(t, server.GetStores().(*kvserver.Stores).VisitStores(f)) } } @@ -2345,7 +2359,7 @@ func TestPromoteNonVoterInAddVoter(t *testing.T) { var rangeID roachpb.RangeID err = db.QueryRow("SELECT range_id FROM [SHOW RANGES FROM TABLE t] LIMIT 1").Scan(&rangeID) require.NoError(t, err) - addVoterEvents, err := filterRangeLog(tc.Conns[0], + addVoterEvents, err := filterRangeLog(tc.ServerConn(0), rangeID, kvserverpb.RangeLogEventType_add_voter, kvserverpb.ReasonRangeUnderReplicated) require.NoError(t, err) @@ -2540,7 +2554,7 @@ func TestReplicateQueueLeasePreferencePurgatoryError(t *testing.T) { }) defer tc.Stopper().Stop(ctx) - db := tc.Conns[0] + db := tc.ServerConn(0) setLeasePreferences := func(node int) { _, err := db.Exec(fmt.Sprintf(`ALTER TABLE t CONFIGURE ZONE USING num_replicas=3, num_voters=3, voter_constraints='[]', lease_preferences='[[+rack=%d]]'`, diff --git a/pkg/kv/kvserver/scatter_test.go b/pkg/kv/kvserver/scatter_test.go index 0e6c983b2533..3fb83360a252 100644 --- a/pkg/kv/kvserver/scatter_test.go +++ b/pkg/kv/kvserver/scatter_test.go @@ -64,7 +64,7 @@ func TestAdminScatterWithDrainingNodes(t *testing.T) { drainingStore := tc.GetFirstStoreFromServer(t, drainingServerIdx) // Wait until the non-draining node is aware of the draining node. - testutils.SucceedsSoon(t, tc.Servers[drainingServerIdx].HeartbeatNodeLiveness) + testutils.SucceedsSoon(t, tc.Server(drainingServerIdx).HeartbeatNodeLiveness) testutils.SucceedsSoon(t, func() error { isDraining, err := nonDrainingStore.GetStoreConfig().StorePool.IsDraining(drainingStore.StoreID()) if err != nil { diff --git a/pkg/kv/kvserver/single_key_test.go b/pkg/kv/kvserver/single_key_test.go index 4e6636aa3b02..971264abbdf0 100644 --- a/pkg/kv/kvserver/single_key_test.go +++ b/pkg/kv/kvserver/single_key_test.go @@ -45,7 +45,7 @@ func TestSingleKey(t *testing.T) { // Initialize the value for our test key to zero. const key = "test-key" - initDB := tc.Servers[0].DB() + initDB := tc.Server(0).DB() if err := initDB.Put(ctx, key, 0); err != nil { t.Fatal(err) } @@ -63,7 +63,7 @@ func TestSingleKey(t *testing.T) { // key. Each worker is configured to talk to a different node in the // cluster. for i := 0; i < num; i++ { - db := tc.Servers[i].DB() + db := tc.Server(i).DB() go func() { var r result for timeutil.Now().Before(deadline) { diff --git a/pkg/kv/txn_external_test.go b/pkg/kv/txn_external_test.go index 9e00ac15de4f..facb6b4e23e1 100644 --- a/pkg/kv/txn_external_test.go +++ b/pkg/kv/txn_external_test.go @@ -212,11 +212,11 @@ func TestRollbackAfterAmbiguousCommit(t *testing.T) { var db *kv.DB var tr *tracing.Tracer if leaseHolder.NodeID == 1 { - db = tc.Servers[1].DB() - tr = tc.Servers[1].TracerI().(*tracing.Tracer) + db = tc.Server(1).DB() + tr = tc.Server(1).TracerI().(*tracing.Tracer) } else { - db = tc.Servers[0].DB() - tr = tc.Servers[0].TracerI().(*tracing.Tracer) + db = tc.Server(0).DB() + tr = tc.Server(0).TracerI().(*tracing.Tracer) } txn := db.NewTxn(ctx, "test") @@ -410,7 +410,8 @@ func testTxnNegotiateAndSendDoesNotBlock(t *testing.T, multiRange, strict, route // Reader goroutines: perform bounded-staleness reads that hit the server-side // negotiation fast-path. - for _, s := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + s := tc.Server(serverIdx) store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) require.NoError(t, err) tracer := s.Tracer() @@ -530,7 +531,7 @@ func TestRevScanAndGet(t *testing.T) { tci := serverutils.StartNewTestCluster(t, 1, base.TestClusterArgs{}) tc := tci.(*testcluster.TestCluster) defer tc.Stopper().Stop(ctx) - db := tc.Servers[0].DB() + db := tc.Server(0).DB() require.NoError(t, db.AdminSplit(ctx, "b", hlc.MaxTimestamp)) require.NoError(t, db.AdminSplit(ctx, "h", hlc.MaxTimestamp)) diff --git a/pkg/server/connectivity_test.go b/pkg/server/connectivity_test.go index 4bab255cd9cf..3a0ac353915d 100644 --- a/pkg/server/connectivity_test.go +++ b/pkg/server/connectivity_test.go @@ -337,7 +337,7 @@ func TestJoinVersionGate(t *testing.T) { oldVersionServerArgs := commonArg oldVersionServerArgs.Knobs = knobs - oldVersionServerArgs.JoinAddr = tc.Servers[0].AdvRPCAddr() + oldVersionServerArgs.JoinAddr = tc.Server(0).AdvRPCAddr() serv, err := tc.AddServer(oldVersionServerArgs) if err != nil { @@ -374,7 +374,7 @@ func TestDecommissionedNodeCannotConnect(t *testing.T) { for _, status := range []livenesspb.MembershipStatus{ livenesspb.MembershipStatus_DECOMMISSIONING, livenesspb.MembershipStatus_DECOMMISSIONED, } { - require.NoError(t, tc.Servers[0].Decommission(ctx, status, []roachpb.NodeID{decomSrv.NodeID()})) + require.NoError(t, tc.Server(0).Decommission(ctx, status, []roachpb.NodeID{decomSrv.NodeID()})) } testutils.SucceedsSoon(t, func() error { diff --git a/pkg/server/multi_store_test.go b/pkg/server/multi_store_test.go index 3006504f5db5..aa0d71bf5f57 100644 --- a/pkg/server/multi_store_test.go +++ b/pkg/server/multi_store_test.go @@ -99,7 +99,8 @@ func TestAddNewStoresToExistingNodes(t *testing.T) { // Sanity check that we're testing what we wanted to test and didn't accidentally // bootstrap three single-node clusters (who knows). - for _, srv := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + srv := tc.Server(serverIdx) require.Equal(t, clusterID, srv.StorageClusterID()) } @@ -107,7 +108,8 @@ func TestAddNewStoresToExistingNodes(t *testing.T) { // store ID. testutils.SucceedsSoon(t, func() error { var storeIDs []roachpb.StoreID - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) var storeCount = 0 if err := server.GetStores().(*kvserver.Stores).VisitStores( func(s *kvserver.Store) error { @@ -169,7 +171,8 @@ func TestMultiStoreIDAlloc(t *testing.T) { // Sanity check that we're testing what we wanted to test and didn't accidentally // bootstrap three single-node clusters (who knows). clusterID := tc.Server(0).StorageClusterID() - for _, srv := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + srv := tc.Server(serverIdx) require.Equal(t, clusterID, srv.StorageClusterID()) } @@ -177,7 +180,8 @@ func TestMultiStoreIDAlloc(t *testing.T) { // store ID. testutils.SucceedsSoon(t, func() error { var storeIDs []roachpb.StoreID - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) var storeCount = 0 if err := server.GetStores().(*kvserver.Stores).VisitStores( func(s *kvserver.Store) error { diff --git a/pkg/server/server_import_ts_test.go b/pkg/server/server_import_ts_test.go index b3cfd6a90e7f..de998b28afca 100644 --- a/pkg/server/server_import_ts_test.go +++ b/pkg/server/server_import_ts_test.go @@ -21,7 +21,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/testutils/testcluster" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" "github.com/cockroachdb/cockroach/pkg/ts" "github.com/cockroachdb/cockroach/pkg/ts/tspb" "github.com/cockroachdb/cockroach/pkg/ts/tsutil" @@ -49,26 +49,26 @@ func TestServerWithTimeseriesImport(t *testing.T) { var bytesDumped int64 func() { - srv := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{}) - defer srv.Stopper().Stop(ctx) + s := serverutils.StartServerOnly(t, base.TestServerArgs{}) + defer s.Stopper().Stop(ctx) - cc := srv.Server(0).RPCClientConn(t, username.RootUserName()) + cc := s.RPCClientConn(t, username.RootUserName()) bytesDumped = dumpTSNonempty(t, cc, path) t.Logf("dumped %s bytes", humanizeutil.IBytes(bytesDumped)) }() // Start a new server that will not write time series, and instruct it to // ingest the dump we just wrote. - args := base.TestClusterArgs{} - args.ServerArgs.Settings = cluster.MakeTestingClusterSettings() - ts.TimeseriesStorageEnabled.Override(ctx, &args.ServerArgs.Settings.SV, false) - args.ServerArgs.Knobs.Server = &server.TestingKnobs{ + args := base.TestServerArgs{} + args.Settings = cluster.MakeTestingClusterSettings() + ts.TimeseriesStorageEnabled.Override(ctx, &args.Settings.SV, false) + args.Knobs.Server = &server.TestingKnobs{ ImportTimeseriesFile: path, ImportTimeseriesMappingFile: path + ".yaml", } - srv := testcluster.StartTestCluster(t, 1, args) - defer srv.Stopper().Stop(ctx) - cc := srv.Server(0).RPCClientConn(t, username.RootUserName()) + s := serverutils.StartServerOnly(t, args) + defer s.Stopper().Stop(ctx) + cc := s.RPCClientConn(t, username.RootUserName()) // This would fail if we didn't supply a dump. Just the fact that it returns // successfully proves that we ingested at least some time series (or that we // failed to disable time series). diff --git a/pkg/server/version_cluster_test.go b/pkg/server/version_cluster_test.go index d9df61fb24e3..7046833a5668 100644 --- a/pkg/server/version_cluster_test.go +++ b/pkg/server/version_cluster_test.go @@ -192,8 +192,8 @@ func TestClusterVersionPersistedOnJoin(t *testing.T) { tc := setupMixedCluster(t, knobs, versions, dir) defer tc.TestCluster.Stopper().Stop(ctx) - for i := 0; i < len(tc.TestCluster.Servers); i++ { - for _, engine := range tc.TestCluster.Servers[i].Engines() { + for i := 0; i < tc.TestCluster.NumServers(); i++ { + for _, engine := range tc.TestCluster.Server(i).Engines() { cv := engine.MinVersion() if cv != newVersion { t.Fatalf("n%d: expected version %v, got %v", i+1, newVersion, cv) @@ -273,7 +273,7 @@ func TestClusterVersionUpgrade(t *testing.T) { testutils.SucceedsWithin(t, func() error { for i := 0; i < tc.NumServers(); i++ { - st := tc.Servers[i].ClusterSettings() + st := tc.Server(i).ClusterSettings() v := st.Version.ActiveVersion(ctx) wantActive := isNoopUpdate if isActive := v.IsActiveVersion(newVersion); isActive != wantActive { @@ -310,7 +310,7 @@ func TestClusterVersionUpgrade(t *testing.T) { // already in the table. testutils.SucceedsWithin(t, func() error { for i := 0; i < tc.NumServers(); i++ { - vers := tc.Servers[i].ClusterSettings().Version.ActiveVersion(ctx) + vers := tc.Server(i).ClusterSettings().Version.ActiveVersion(ctx) if v := vers.String(); v == curVersion { if isNoopUpdate { continue @@ -325,7 +325,7 @@ func TestClusterVersionUpgrade(t *testing.T) { // Since the wrapped version setting exposes the new versions, it must // definitely be present on all stores on the first try. - if err := tc.Servers[1].GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { + if err := tc.Server(1).GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { cv := s.TODOEngine().MinVersion() if act := cv.String(); act != exp { t.Fatalf("%s: %s persisted, but should be %s", s, act, exp) @@ -358,7 +358,7 @@ func TestAllVersionsAgree(t *testing.T) { // comes. testutils.SucceedsSoon(tc, func() error { for i := 0; i < tc.NumServers(); i++ { - if version := tc.Servers[i].ClusterSettings().Version.ActiveVersion(ctx); version.String() != exp { + if version := tc.Server(i).ClusterSettings().Version.ActiveVersion(ctx); version.String() != exp { return fmt.Errorf("%d: incorrect version %s (wanted %s)", i, version, exp) } if version := tc.getVersionFromShow(i); version != exp { @@ -477,7 +477,7 @@ func TestClusterVersionMixedVersionTooOld(t *testing.T) { // Check that we can still talk to the first three nodes. for i := 0; i < tc.NumServers()-1; i++ { testutils.SucceedsSoon(tc, func() error { - if version := tc.Servers[i].ClusterSettings().Version.ActiveVersion(ctx).String(); version != v0s { + if version := tc.Server(i).ClusterSettings().Version.ActiveVersion(ctx).String(); version != v0s { return errors.Errorf("%d: incorrect version %s (wanted %s)", i, version, v0s) } if version := tc.getVersionFromShow(i); version != v0s { diff --git a/pkg/sql/ambiguous_commit_test.go b/pkg/sql/ambiguous_commit_test.go index 52fd6fd31df7..1cd91a2a2ffd 100644 --- a/pkg/sql/ambiguous_commit_test.go +++ b/pkg/sql/ambiguous_commit_test.go @@ -154,13 +154,14 @@ func TestAmbiguousCommit(t *testing.T) { // Avoid distSQL so we can reliably hydrate the intended dist // sender's cache below. - for _, server := range tc.Servers { + for serverIdx := 0; serverIdx < tc.NumServers(); serverIdx++ { + server := tc.Server(serverIdx) st := server.ClusterSettings() st.Manual.Store(true) sql.DistSQLClusterExecMode.Override(ctx, &st.SV, int64(sessiondatapb.DistSQLOff)) } - sqlDB := tc.Conns[0] + sqlDB := tc.ServerConn(0) if _, err := sqlDB.Exec(`CREATE DATABASE test`); err != nil { t.Fatal(err) diff --git a/pkg/sql/catalog/lease/lease_test.go b/pkg/sql/catalog/lease/lease_test.go index 8e4570cfd7b3..26bb6e5c42fa 100644 --- a/pkg/sql/catalog/lease/lease_test.go +++ b/pkg/sql/catalog/lease/lease_test.go @@ -2514,14 +2514,14 @@ func TestOutstandingLeasesMetric(t *testing.T) { tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{}) ctx := context.Background() defer tc.Stopper().Stop(ctx) - _, err := tc.Conns[0].ExecContext(ctx, "CREATE TABLE a (a INT PRIMARY KEY)") + _, err := tc.ServerConn(0).ExecContext(ctx, "CREATE TABLE a (a INT PRIMARY KEY)") assert.NoError(t, err) - _, err = tc.Conns[0].ExecContext(ctx, "CREATE TABLE b (a INT PRIMARY KEY)") + _, err = tc.ServerConn(0).ExecContext(ctx, "CREATE TABLE b (a INT PRIMARY KEY)") assert.NoError(t, err) - gauge := tc.Servers[0].LeaseManager().(*lease.Manager).TestingOutstandingLeasesGauge() + gauge := tc.Server(0).LeaseManager().(*lease.Manager).TestingOutstandingLeasesGauge() outstandingLeases := gauge.Value() - _, err = tc.Conns[0].ExecContext(ctx, "SELECT * FROM a") + _, err = tc.ServerConn(0).ExecContext(ctx, "SELECT * FROM a") assert.NoError(t, err) afterQuery := gauge.Value() @@ -2535,7 +2535,7 @@ func TestOutstandingLeasesMetric(t *testing.T) { } // Expect at least 3 leases: one for a, one for the default database, and one for b. - _, err = tc.Conns[0].ExecContext(ctx, "SELECT * FROM b") + _, err = tc.ServerConn(0).ExecContext(ctx, "SELECT * FROM b") assert.NoError(t, err) afterQuery = gauge.Value() @@ -3145,7 +3145,7 @@ ALTER TABLE t1 SPLIT AT VALUES (1); require.NoError(t, err) // Get the lease manager and table ID for acquiring a lease on. beforeExecute.Lock() - leaseManager = tc.Servers[0].LeaseManager().(*lease.Manager) + leaseManager = tc.Server(0).LeaseManager().(*lease.Manager) beforeExecute.Unlock() tempTableID := uint64(0) err = conn.QueryRow("SELECT table_id FROM crdb_internal.tables WHERE name = $1 AND database_name = current_database()", diff --git a/pkg/sql/colfetcher/bytes_read_test.go b/pkg/sql/colfetcher/bytes_read_test.go index 13d2e17f897e..6e6be5da5e9c 100644 --- a/pkg/sql/colfetcher/bytes_read_test.go +++ b/pkg/sql/colfetcher/bytes_read_test.go @@ -34,7 +34,7 @@ func TestBytesRead(t *testing.T) { ctx := context.Background() defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) // Create the table with disabled automatic table stats collection. The // stats collection is disabled so that the ColBatchScan would read the diff --git a/pkg/sql/colfetcher/vectorized_batch_size_test.go b/pkg/sql/colfetcher/vectorized_batch_size_test.go index 2079c54a4d0b..e170e1f118a9 100644 --- a/pkg/sql/colfetcher/vectorized_batch_size_test.go +++ b/pkg/sql/colfetcher/vectorized_batch_size_test.go @@ -88,7 +88,7 @@ func TestScanBatchSize(t *testing.T) { ctx := context.Background() defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) sqlDB := sqlutils.MakeSQLRunner(conn) // Until we propagate the estimated row count hint in the KV projection @@ -160,7 +160,7 @@ func TestCFetcherLimitsOutputBatch(t *testing.T) { tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ReplicationMode: base.ReplicationAuto}) ctx := context.Background() defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) // Until we propagate the estimated row count hint in the KV projection // pushdown case, this test is expected to fail if the direct scans are diff --git a/pkg/sql/colflow/vectorized_flow_deadlock_test.go b/pkg/sql/colflow/vectorized_flow_deadlock_test.go index bca95fde4cb6..f95506316e4f 100644 --- a/pkg/sql/colflow/vectorized_flow_deadlock_test.go +++ b/pkg/sql/colflow/vectorized_flow_deadlock_test.go @@ -50,7 +50,7 @@ func TestVectorizedFlowDeadlocksWhenSpilling(t *testing.T) { tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ServerArgs: serverArgs}) ctx := context.Background() defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) _, err := conn.ExecContext(ctx, "CREATE TABLE t (a, b) AS SELECT i, i FROM generate_series(1, 10000) AS g(i)") require.NoError(t, err) diff --git a/pkg/sql/colflow/vectorized_flow_planning_test.go b/pkg/sql/colflow/vectorized_flow_planning_test.go index e3b0bba531a1..cec094649f39 100644 --- a/pkg/sql/colflow/vectorized_flow_planning_test.go +++ b/pkg/sql/colflow/vectorized_flow_planning_test.go @@ -31,7 +31,7 @@ func TestVectorizedPlanning(t *testing.T) { tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ReplicationMode: base.ReplicationAuto}) ctx := context.Background() defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) t.Run("no columnarizer-materializer", func(t *testing.T) { if !buildutil.CrdbTestBuild { diff --git a/pkg/sql/crdb_internal_test.go b/pkg/sql/crdb_internal_test.go index e15d2463c7bd..f69c345e13c7 100644 --- a/pkg/sql/crdb_internal_test.go +++ b/pkg/sql/crdb_internal_test.go @@ -129,10 +129,10 @@ func TestRangeLocalityBasedOnNodeIDs(t *testing.T) { }, ) defer tc.Stopper().Stop(ctx) - assert.EqualValues(t, 1, tc.Servers[len(tc.Servers)-1].GetFirstStoreID()) + assert.EqualValues(t, 1, tc.Server(tc.NumServers()-1).GetFirstStoreID()) // Set to 2 so the next store id will be 3. - assert.NoError(t, tc.Servers[0].DB().Put(ctx, keys.StoreIDGenerator, 2)) + assert.NoError(t, tc.Server(0).DB().Put(ctx, keys.StoreIDGenerator, 2)) // NodeID=2, StoreID=3 tc.AddAndStartServer(t, @@ -140,10 +140,10 @@ func TestRangeLocalityBasedOnNodeIDs(t *testing.T) { Locality: roachpb.Locality{Tiers: []roachpb.Tier{{Key: "node", Value: "2"}}}, }, ) - assert.EqualValues(t, 3, tc.Servers[len(tc.Servers)-1].GetFirstStoreID()) + assert.EqualValues(t, 3, tc.Server(tc.NumServers()-1).GetFirstStoreID()) // Set to 1 so the next store id will be 2. - assert.NoError(t, tc.Servers[0].DB().Put(ctx, keys.StoreIDGenerator, 1)) + assert.NoError(t, tc.Server(0).DB().Put(ctx, keys.StoreIDGenerator, 1)) // NodeID=3, StoreID=2 tc.AddAndStartServer(t, @@ -151,10 +151,10 @@ func TestRangeLocalityBasedOnNodeIDs(t *testing.T) { Locality: roachpb.Locality{Tiers: []roachpb.Tier{{Key: "node", Value: "3"}}}, }, ) - assert.EqualValues(t, 2, tc.Servers[len(tc.Servers)-1].GetFirstStoreID()) + assert.EqualValues(t, 2, tc.Server(tc.NumServers()-1).GetFirstStoreID()) assert.NoError(t, tc.WaitForFullReplication()) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) var replicas, localities string sqlDB.QueryRow(t, `select replicas, replica_localities from crdb_internal.ranges limit 1`). Scan(&replicas, &localities) diff --git a/pkg/sql/create_test.go b/pkg/sql/create_test.go index a22259c1c6c7..a002256da3d2 100644 --- a/pkg/sql/create_test.go +++ b/pkg/sql/create_test.go @@ -157,7 +157,7 @@ func TestParallelCreateTables(t *testing.T) { t.Fatal(err) } // Get the id descriptor generator count. - s := tc.Servers[0].ApplicationLayer() + s := tc.Server(0).ApplicationLayer() idgen := descidgen.NewGenerator(s.ClusterSettings(), s.Codec(), s.DB()) descIDStart, err := idgen.PeekNextUniqueDescID(context.Background()) if err != nil { @@ -212,7 +212,7 @@ func TestParallelCreateConflictingTables(t *testing.T) { } // Get the id descriptor generator count. - s := tc.Servers[0].ApplicationLayer() + s := tc.Server(0).ApplicationLayer() idgen := descidgen.NewGenerator(s.ClusterSettings(), s.Codec(), s.DB()) descIDStart, err := idgen.PeekNextUniqueDescID(context.Background()) if err != nil { diff --git a/pkg/sql/importer/exportcsv_test.go b/pkg/sql/importer/exportcsv_test.go index 5d232389e971..7c1f83d3250d 100644 --- a/pkg/sql/importer/exportcsv_test.go +++ b/pkg/sql/importer/exportcsv_test.go @@ -160,7 +160,7 @@ func TestExportNullWithEmptyNullAs(t *testing.T) { t, 1, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: dir}}) defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) db := sqlutils.MakeSQLRunner(conn) // Set up dummy accounts table with NULL value @@ -327,7 +327,7 @@ func TestExportUserDefinedTypes(t *testing.T) { t, 1, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: dir}}) defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) sqlDB := sqlutils.MakeSQLRunner(conn) // Set up some initial state for the tests. @@ -516,7 +516,7 @@ func TestExportTargetFileSizeSetting(t *testing.T) { t, 1, base.TestClusterArgs{ServerArgs: base.TestServerArgs{ExternalIODir: dir}}) defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) sqlDB := sqlutils.MakeSQLRunner(conn) sqlDB.Exec(t, `EXPORT INTO CSV 'nodelocal://1/foo' WITH chunk_size='10KB' FROM select i, gen_random_uuid() from generate_series(1, 4000) as i;`) diff --git a/pkg/sql/opt/exec/explain/output_test.go b/pkg/sql/opt/exec/explain/output_test.go index 3e8195763f58..945030ad8c5d 100644 --- a/pkg/sql/opt/exec/explain/output_test.go +++ b/pkg/sql/opt/exec/explain/output_test.go @@ -132,7 +132,7 @@ func TestMaxDiskSpillUsage(t *testing.T) { ctx := context.Background() defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) _, err := conn.ExecContext(ctx, ` CREATE TABLE t (a PRIMARY KEY, b) AS SELECT i, i FROM generate_series(1, 10) AS g(i) @@ -183,7 +183,7 @@ func TestCPUTimeEndToEnd(t *testing.T) { ctx := context.Background() defer tc.Stopper().Stop(ctx) - db := sqlutils.MakeSQLRunner(tc.Conns[0]) + db := sqlutils.MakeSQLRunner(tc.ServerConn(0)) runQuery := func(query string, hideCPU bool) { rows := db.QueryStr(t, "EXPLAIN ANALYZE "+query) @@ -247,7 +247,7 @@ func TestContentionTimeOnWrites(t *testing.T) { ctx := context.Background() defer tc.Stopper().Stop(ctx) - runner := sqlutils.MakeSQLRunner(tc.Conns[0]) + runner := sqlutils.MakeSQLRunner(tc.ServerConn(0)) runner.Exec(t, "CREATE TABLE t (k INT PRIMARY KEY, v INT)") // The test involves three goroutines: @@ -276,7 +276,7 @@ func TestContentionTimeOnWrites(t *testing.T) { close(sem) } }() - txn, err := tc.Conns[0].Begin() + txn, err := tc.ServerConn(0).Begin() if err != nil { errCh <- err return diff --git a/pkg/sql/physicalplan/span_resolver_test.go b/pkg/sql/physicalplan/span_resolver_test.go index cb55e4aba1ed..4d205ef55c9c 100644 --- a/pkg/sql/physicalplan/span_resolver_test.go +++ b/pkg/sql/physicalplan/span_resolver_test.go @@ -51,7 +51,7 @@ func TestSpanResolverUsesCaches(t *testing.T) { defer tc.Stopper().Stop(context.Background()) rowRanges, _ := setupRanges( - tc.Conns[0], tc.Servers[0], tc.Servers[0].DB(), t) + tc.ServerConn(0), tc.Server(0), tc.Server(0).DB(), t) // Replicate the row ranges on all of the first 3 nodes. Save the 4th node in // a pristine state, with empty caches. @@ -83,7 +83,7 @@ func TestSpanResolverUsesCaches(t *testing.T) { } // Create a SpanResolver using the 4th node, with empty caches. - s3 := tc.Servers[3] + s3 := tc.Server(3) lr := physicalplan.NewSpanResolver( s3.ClusterSettings(), @@ -117,7 +117,7 @@ func TestSpanResolverUsesCaches(t *testing.T) { if len(replicas) != 3 { t.Fatalf("expected replies for 3 spans, got %d", len(replicas)) } - si := tc.Servers[0] + si := tc.Server(0) storeID := si.GetFirstStoreID() for i := 0; i < 3; i++ { @@ -134,7 +134,7 @@ func TestSpanResolverUsesCaches(t *testing.T) { // Now populate the cached on node 4 and query again. This time, we expect to see // each span on its own range. - if err := populateCache(tc.Conns[3], 3 /* expectedNumRows */); err != nil { + if err := populateCache(tc.ServerConn(3), 3 /* expectedNumRows */); err != nil { t.Fatal(err) } replicas, err = resolveSpans(context.Background(), lr.NewSpanResolverIterator(nil, nil), spans...) @@ -144,7 +144,7 @@ func TestSpanResolverUsesCaches(t *testing.T) { var expected [][]rngInfo for i := 0; i < 3; i++ { - expected = append(expected, []rngInfo{selectReplica(tc.Servers[i].NodeID(), rowRanges[i])}) + expected = append(expected, []rngInfo{selectReplica(tc.Server(i).NodeID(), rowRanges[i])}) } if err = expectResolved(replicas, expected...); err != nil { t.Fatal(err) diff --git a/pkg/sql/revert_test.go b/pkg/sql/revert_test.go index 932f7f08354e..aa55db6d8fe6 100644 --- a/pkg/sql/revert_test.go +++ b/pkg/sql/revert_test.go @@ -190,7 +190,7 @@ func TestRevertSpansFanout(t *testing.T) { }) defer tc.Stopper().Stop(context.Background()) s := tc.ApplicationLayer(0) - sqlDB := tc.Conns[0] + sqlDB := tc.ServerConn(0) execCfg := s.ExecutorConfig().(sql.ExecutorConfig) diff --git a/pkg/sql/show_ranges_test.go b/pkg/sql/show_ranges_test.go index e602d74a90f4..937bd1466c3a 100644 --- a/pkg/sql/show_ranges_test.go +++ b/pkg/sql/show_ranges_test.go @@ -39,7 +39,7 @@ func TestShowRangesWithLocality(t *testing.T) { tc := testcluster.StartTestCluster(t, numNodes, base.TestClusterArgs{}) defer tc.Stopper().Stop(ctx) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) sqlDB.Exec(t, `CREATE TABLE t (x INT PRIMARY KEY)`) sqlDB.Exec(t, `ALTER TABLE t SPLIT AT SELECT i FROM generate_series(0, 20) AS g(i)`) @@ -124,7 +124,7 @@ func TestShowRangesMultipleStores(t *testing.T) { assert.NoError(t, tc.WaitForFullReplication()) // Scatter a system table so that the lease is unlike to be on node 1. - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) sqlDB.Exec(t, "ALTER TABLE system.jobs SCATTER") // Ensure that the localities line up. for _, q := range []string{ @@ -184,7 +184,7 @@ func TestShowRangesWithDetails(t *testing.T) { tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{}) defer tc.Stopper().Stop(ctx) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) sqlDB.Exec(t, "CREATE DATABASE test") sqlDB.Exec(t, "USE test") sqlDB.Exec(t, ` diff --git a/pkg/sql/show_trace_replica_test.go b/pkg/sql/show_trace_replica_test.go index 7ce0973f4a18..dc650c43234a 100644 --- a/pkg/sql/show_trace_replica_test.go +++ b/pkg/sql/show_trace_replica_test.go @@ -65,7 +65,7 @@ func TestShowTraceReplica(t *testing.T) { tc := testcluster.StartTestCluster(t, numNodes, tcArgs) defer tc.Stopper().Stop(ctx) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) sqlDB.Exec(t, `ALTER RANGE "default" CONFIGURE ZONE USING constraints = '[+n4]'`) sqlDB.Exec(t, `ALTER DATABASE system CONFIGURE ZONE USING constraints = '[+n4]'`) sqlDB.Exec(t, `CREATE DATABASE d`) diff --git a/pkg/sql/sqlstats/persistedsqlstats/bench_test.go b/pkg/sql/sqlstats/persistedsqlstats/bench_test.go index b90993f3fcca..d899bf4eb806 100644 --- a/pkg/sql/sqlstats/persistedsqlstats/bench_test.go +++ b/pkg/sql/sqlstats/persistedsqlstats/bench_test.go @@ -143,8 +143,8 @@ func BenchmarkSqlStatsPersisted(b *testing.B) { SQLMemoryPoolSize: 512 << 20, }, }) - sqlRunner := sqlutils.MakeRoundRobinSQLRunner(tc.Conns[0], - tc.Conns[1], tc.Conns[2]) + sqlRunner := sqlutils.MakeRoundRobinSQLRunner(tc.ServerConn(0), + tc.ServerConn(1), tc.ServerConn(2)) return sqlRunner, tc }, }, @@ -159,9 +159,9 @@ func BenchmarkSqlStatsPersisted(b *testing.B) { SQLMemoryPoolSize: 512 << 20, }, }) - sqlRunner := sqlutils.MakeRoundRobinSQLRunner(tc.Conns[0], - tc.Conns[1], tc.Conns[2], tc.Conns[3], - tc.Conns[4], tc.Conns[5]) + sqlRunner := sqlutils.MakeRoundRobinSQLRunner(tc.ServerConn(0), + tc.ServerConn(1), tc.ServerConn(2), tc.ServerConn(3), + tc.ServerConn(4), tc.ServerConn(5)) return sqlRunner, tc }, }, diff --git a/pkg/sql/stats/create_stats_job_test.go b/pkg/sql/stats/create_stats_job_test.go index 55ea334a782e..754f813aeb5a 100644 --- a/pkg/sql/stats/create_stats_job_test.go +++ b/pkg/sql/stats/create_stats_job_test.go @@ -67,7 +67,7 @@ func TestCreateStatsControlJob(t *testing.T) { ctx := context.Background() tc := testcluster.StartTestCluster(t, nodes, params) defer tc.Stopper().Stop(ctx) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) sqlDB.Exec(t, `CREATE DATABASE d`) sqlDB.Exec(t, `CREATE TABLE d.t (x INT PRIMARY KEY)`) var tID descpb.ID @@ -140,7 +140,7 @@ func TestAtMostOneRunningCreateStats(t *testing.T) { const nodes = 1 tc := testcluster.StartTestCluster(t, nodes, params) defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) sqlDB := sqlutils.MakeSQLRunner(conn) sqlDB.Exec(t, `CREATE DATABASE d`) @@ -235,7 +235,7 @@ func TestDeleteFailedJob(t *testing.T) { serverArgs := base.TestServerArgs{Knobs: base.TestingKnobs{JobsTestingKnobs: jobs.NewTestingKnobsWithShortIntervals()}} tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ServerArgs: serverArgs}) defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) sqlDB := sqlutils.MakeSQLRunner(conn) sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled = false`) @@ -309,7 +309,7 @@ func TestCreateStatsProgress(t *testing.T) { const nodes = 1 tc := testcluster.StartTestCluster(t, nodes, params) defer tc.Stopper().Stop(ctx) - conn := tc.Conns[0] + conn := tc.ServerConn(0) sqlDB := sqlutils.MakeSQLRunner(conn) sqlDB.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled = false`) @@ -387,7 +387,7 @@ func TestCreateStatsProgress(t *testing.T) { // Invalidate the stats cache so that we can be sure to get the latest stats. var tableID descpb.ID sqlDB.QueryRow(t, `SELECT id FROM system.namespace WHERE name = 't'`).Scan(&tableID) - tc.Servers[0].ExecutorConfig().(sql.ExecutorConfig).TableStatsCache.InvalidateTableStats( + tc.Server(0).ExecutorConfig().(sql.ExecutorConfig).TableStatsCache.InvalidateTableStats( ctx, tableID, ) @@ -448,7 +448,7 @@ func TestCreateStatsAsOfTime(t *testing.T) { ctx := context.Background() tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{}) defer tc.Stopper().Stop(ctx) - sqlDB := sqlutils.MakeSQLRunner(tc.Conns[0]) + sqlDB := sqlutils.MakeSQLRunner(tc.ServerConn(0)) sqlDB.Exec(t, `CREATE DATABASE d`) sqlDB.Exec(t, `CREATE TABLE d.t (x INT PRIMARY KEY)`) diff --git a/pkg/sql/tests/monotonic_insert_test.go b/pkg/sql/tests/monotonic_insert_test.go index 607b434c0770..fa435ea13ee9 100644 --- a/pkg/sql/tests/monotonic_insert_test.go +++ b/pkg/sql/tests/monotonic_insert_test.go @@ -107,7 +107,8 @@ func testMonotonicInserts(t *testing.T, distSQLMode sessiondatapb.DistSQLExecMod ) defer tc.Stopper().Stop(ctx) - for _, server := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { + server := tc.Server(i) st := server.ClusterSettings() st.Manual.Store(true) sql.DistSQLClusterExecMode.Override(ctx, &st.SV, int64(distSQLMode)) @@ -118,8 +119,8 @@ func testMonotonicInserts(t *testing.T, distSQLMode sessiondatapb.DistSQLExecMod } var clients []mtClient - for i := range tc.Conns { - clients = append(clients, mtClient{ID: i, DB: tc.Conns[i]}) + for i := 0; i < tc.NumServers(); i++ { + clients = append(clients, mtClient{ID: i, DB: tc.ServerConn(i)}) } // We will insert into this table by selecting MAX(val) and increasing by // one and expect that val and sts (the commit timestamp) are both @@ -225,7 +226,7 @@ RETURNING val, sts, node, tb`, } } - sem := make(chan struct{}, 2*len(tc.Conns)) + sem := make(chan struct{}, 2*tc.NumServers()) timer := time.After(5 * time.Second) defer verify() diff --git a/pkg/sql/tests/system_table_test.go b/pkg/sql/tests/system_table_test.go index 3e3c58267510..eab99f5c9390 100644 --- a/pkg/sql/tests/system_table_test.go +++ b/pkg/sql/tests/system_table_test.go @@ -181,7 +181,7 @@ func TestSystemTableLiterals(t *testing.T) { }) defer tc.Stopper().Stop(ctx) - s := tc.Servers[0] + s := tc.Server(0) testcases := make(map[string]testcase) for _, table := range systemschema.MakeSystemTables() { diff --git a/pkg/testutils/testcluster/testcluster.go b/pkg/testutils/testcluster/testcluster.go index 28b1f28b090f..d54edcd91582 100644 --- a/pkg/testutils/testcluster/testcluster.go +++ b/pkg/testutils/testcluster/testcluster.go @@ -60,8 +60,8 @@ import ( // analogous to TestServer, but with control over range replication and join // flags. type TestCluster struct { - Servers []serverutils.TestServerInterface - Conns []*gosql.DB + servers []serverutils.TestServerInterface + conns []*gosql.DB // reusableListeners is populated if (and only if) TestClusterArgs.reusableListeners is set. reusableListeners map[int] /* idx */ *listenerutil.ReusableListener @@ -80,18 +80,18 @@ var _ serverutils.TestClusterInterface = &TestCluster{} // NumServers is part of TestClusterInterface. func (tc *TestCluster) NumServers() int { - return len(tc.Servers) + return len(tc.servers) } // Server is part of TestClusterInterface. func (tc *TestCluster) Server(idx int) serverutils.TestServerInterface { - return tc.Servers[idx] + return tc.servers[idx] } // NodeIDs is part of TestClusterInterface. func (tc *TestCluster) NodeIDs() []roachpb.NodeID { - nodeIds := make([]roachpb.NodeID, len(tc.Servers)) - for i, s := range tc.Servers { + nodeIds := make([]roachpb.NodeID, len(tc.servers)) + for i, s := range tc.servers { nodeIds[i] = s.NodeID() } return nodeIds @@ -99,7 +99,7 @@ func (tc *TestCluster) NodeIDs() []roachpb.NodeID { // ServerConn is part of TestClusterInterface. func (tc *TestCluster) ServerConn(idx int) *gosql.DB { - return tc.Conns[idx] + return tc.conns[idx] } // Stopper returns the stopper for this testcluster. @@ -110,7 +110,7 @@ func (tc *TestCluster) Stopper() *stop.Stopper { // StartedDefaultTestTenant returns whether this cluster started a default // test tenant. func (tc *TestCluster) StartedDefaultTestTenant() bool { - return tc.Servers[0].StartedDefaultTestTenant() + return tc.servers[0].StartedDefaultTestTenant() } // ApplicationLayer calls .ApplicationLayer() on the ith server in @@ -149,7 +149,7 @@ func (tc *TestCluster) stopServers(ctx context.Context) { go func(i int, s *stop.Stopper) { defer wg.Done() if s != nil { - quiesceCtx := logtags.AddTag(ctx, "n", tc.Servers[i].NodeID()) + quiesceCtx := logtags.AddTag(ctx, "n", tc.servers[i].NodeID()) s.Quiesce(quiesceCtx) } }(i, s) @@ -175,7 +175,7 @@ func (tc *TestCluster) stopServers(ctx context.Context) { // example of this. // // [1]: cleanupSessionTempObjects - tracer := tc.Servers[i].Tracer() + tracer := tc.servers[i].Tracer() testutils.SucceedsSoon(tc.t, func() error { var sps []tracing.RegistrySpan _ = tracer.VisitSpans(func(span tracing.RegistrySpan) error { @@ -359,7 +359,7 @@ func NewTestCluster( // in a separate thread and with ParallelStart enabled (otherwise it'll block // on waiting for init for the first server). func (tc *TestCluster) Start(t serverutils.TestFataler) { - nodes := len(tc.Servers) + nodes := len(tc.servers) var errCh chan error if tc.clusterArgs.ParallelStart { errCh = make(chan error, nodes) @@ -370,7 +370,7 @@ func (tc *TestCluster) Start(t serverutils.TestFataler) { // server in the cluster since they should all be set to the same value // (validated below). probabilisticallyStartTestTenant := false - if !tc.Servers[0].DefaultTestTenantDisabled() { + if !tc.servers[0].DefaultTestTenantDisabled() { probabilisticallyStartTestTenant = serverutils.ShouldStartDefaultTestTenant(t, tc.serverArgs[0]) } @@ -387,9 +387,9 @@ func (tc *TestCluster) Start(t serverutils.TestFataler) { // with two separate if checks because the DisableDefaultTestTenant flag // could have been set coming into this function by the caller. if !probabilisticallyStartTestTenant { - tc.Servers[i].DisableDefaultTestTenant() + tc.servers[i].DisableDefaultTestTenant() } - if tc.Servers[i].DefaultTestTenantDisabled() { + if tc.servers[i].DefaultTestTenantDisabled() { if startedTestTenant && i > 0 { t.Fatal(errors.Newf("starting only some nodes with a test tenant is not"+ "currently supported - attempted to disable SQL sever on node %d", i)) @@ -408,7 +408,7 @@ func (tc *TestCluster) Start(t serverutils.TestFataler) { // We want to wait for stores for each server in order to have predictable // store IDs. Otherwise, stores can be asynchronously bootstrapped in an // unexpected order (#22342). - tc.WaitForNStores(t, i+1, tc.Servers[0].GossipI().(*gossip.Gossip)) + tc.WaitForNStores(t, i+1, tc.servers[0].GossipI().(*gossip.Gossip)) } } @@ -423,12 +423,12 @@ func (tc *TestCluster) Start(t serverutils.TestFataler) { } } - tc.WaitForNStores(t, tc.NumServers(), tc.Servers[0].GossipI().(*gossip.Gossip)) + tc.WaitForNStores(t, tc.NumServers(), tc.servers[0].GossipI().(*gossip.Gossip)) } // Now that we have started all the servers on the bootstrap version, let us // run the migrations up to the overridden BinaryVersion. - s := tc.Servers[0] + s := tc.servers[0] if v := s.BinaryVersionOverride(); v != (roachpb.Version{}) { for _, layer := range []serverutils.ApplicationLayerInterface{s.SystemLayer(), s.ApplicationLayer()} { ie := layer.InternalExecutor().(isql.Executor) @@ -480,8 +480,8 @@ func (tc *TestCluster) Start(t serverutils.TestFataler) { tc.WaitForNodeStatuses(t) testutils.SucceedsSoon(t, func() error { var err error - for _, ssrv := range tc.Servers { - for _, dsrv := range tc.Servers { + for _, ssrv := range tc.servers { + for _, dsrv := range tc.servers { stl := dsrv.StorageLayer() // Note: we avoid using .RPCClientConn() here to avoid accumulating // stopper closures in RAM during the SucceedsSoon iterations. @@ -544,14 +544,14 @@ func (tc *TestCluster) AddAndStartServer( // The new Server's copy of serverArgs might be changed according to the // cluster's ReplicationMode. func (tc *TestCluster) AddAndStartServerE(serverArgs base.TestServerArgs) error { - if serverArgs.JoinAddr == "" && len(tc.Servers) > 0 { - serverArgs.JoinAddr = tc.Servers[0].AdvRPCAddr() + if serverArgs.JoinAddr == "" && len(tc.servers) > 0 { + serverArgs.JoinAddr = tc.servers[0].AdvRPCAddr() } if _, err := tc.AddServer(serverArgs); err != nil { return err } - return tc.startServer(len(tc.Servers)-1, serverArgs) + return tc.startServer(len(tc.servers)-1, serverArgs) } // AddServer is like AddAndStartServer, except it does not start it. @@ -613,7 +613,7 @@ func (tc *TestCluster) AddServer( s.DisableStartTenant(serverutils.PreventStartTenantError) } - tc.Servers = append(tc.Servers, s) + tc.servers = append(tc.Servers, s) tc.serverArgs = append(tc.serverArgs, serverArgs) tc.mu.Lock() @@ -625,7 +625,7 @@ func (tc *TestCluster) AddServer( // startServer is the companion method to AddServer, and is responsible for // actually starting the server. func (tc *TestCluster) startServer(idx int, serverArgs base.TestServerArgs) error { - server := tc.Servers[idx] + server := tc.servers[idx] if err := server.Start(context.Background()); err != nil { return err } @@ -637,7 +637,7 @@ func (tc *TestCluster) startServer(idx int, serverArgs base.TestServerArgs) erro tc.mu.Lock() defer tc.mu.Unlock() - tc.Conns = append(tc.Conns, dbConn) + tc.conns = append(tc.conns, dbConn) return nil } @@ -690,7 +690,7 @@ func (tc *TestCluster) WaitForNStores(t serverutils.TestFataler, n int, g *gossi // LookupRange is part of TestClusterInterface. func (tc *TestCluster) LookupRange(key roachpb.Key) (roachpb.RangeDescriptor, error) { - return tc.Servers[0].LookupRange(key) + return tc.servers[0].LookupRange(key) } // LookupRangeOrFatal is part of TestClusterInterface. @@ -716,7 +716,7 @@ func (tc *TestCluster) LookupRangeOrFatal( func (tc *TestCluster) SplitRangeWithExpiration( splitKey roachpb.Key, expirationTime hlc.Timestamp, ) (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) { - return tc.Servers[0].SplitRangeWithExpiration(splitKey, expirationTime) + return tc.servers[0].SplitRangeWithExpiration(splitKey, expirationTime) } // SplitRange splits the range containing splitKey. @@ -729,14 +729,14 @@ func (tc *TestCluster) SplitRangeWithExpiration( func (tc *TestCluster) SplitRange( splitKey roachpb.Key, ) (roachpb.RangeDescriptor, roachpb.RangeDescriptor, error) { - return tc.Servers[0].SplitRange(splitKey) + return tc.servers[0].SplitRange(splitKey) } // SplitRangeOrFatal is the same as SplitRange but will Fatal the test on error. func (tc *TestCluster) SplitRangeOrFatal( t serverutils.TestFataler, splitKey roachpb.Key, ) (roachpb.RangeDescriptor, roachpb.RangeDescriptor) { - lhsDesc, rhsDesc, err := tc.Servers[0].SplitRange(splitKey) + lhsDesc, rhsDesc, err := tc.servers[0].SplitRange(splitKey) if err != nil { t.Fatalf(`splitting at %s: %+v`, splitKey, err) } @@ -745,7 +745,7 @@ func (tc *TestCluster) SplitRangeOrFatal( // MergeRanges merges the range containing leftKey with the range to its right. func (tc *TestCluster) MergeRanges(leftKey roachpb.Key) (roachpb.RangeDescriptor, error) { - return tc.Servers[0].MergeRanges(leftKey) + return tc.servers[0].MergeRanges(leftKey) } // MergeRangesOrFatal is the same as MergeRanges but will Fatal the test on @@ -762,7 +762,7 @@ func (tc *TestCluster) MergeRangesOrFatal( // Target returns a ReplicationTarget for the specified server. func (tc *TestCluster) Target(serverIdx int) roachpb.ReplicationTarget { - s := tc.Servers[serverIdx] + s := tc.servers[serverIdx] return roachpb.ReplicationTarget{ NodeID: s.NodeID(), StoreID: s.GetFirstStoreID(), @@ -790,13 +790,13 @@ func (tc *TestCluster) changeReplicas( if err := testutils.SucceedsSoonError(func() error { tc.t.Helper() var beforeDesc roachpb.RangeDescriptor - if err := tc.Servers[0].DB().GetProto( + if err := tc.servers[0].DB().GetProto( ctx, keys.RangeDescriptorKey(startKey), &beforeDesc, ); err != nil { return errors.Wrap(err, "range descriptor lookup error") } var err error - desc, err = tc.Servers[0].DB().AdminChangeReplicas( + desc, err = tc.servers[0].DB().AdminChangeReplicas( ctx, startKey.AsRawKey(), beforeDesc, kvpb.MakeReplicationChanges(changeType, targets...), ) if kvserver.IsRetriableReplicationChangeError(err) { @@ -1022,7 +1022,7 @@ func (tc *TestCluster) SwapVoterWithNonVoter( ctx := context.Background() key := keys.MustAddr(startKey) var beforeDesc roachpb.RangeDescriptor - if err := tc.Servers[0].DB().GetProto( + if err := tc.servers[0].DB().GetProto( ctx, keys.RangeDescriptorKey(key), &beforeDesc, ); err != nil { return nil, errors.Wrap(err, "range descriptor lookup error") @@ -1034,7 +1034,7 @@ func (tc *TestCluster) SwapVoterWithNonVoter( {ChangeType: roachpb.REMOVE_VOTER, Target: voterTarget}, } - return tc.Servers[0].DB().AdminChangeReplicas(ctx, key, beforeDesc, changes) + return tc.servers[0].DB().AdminChangeReplicas(ctx, key, beforeDesc, changes) } // SwapVoterWithNonVoterOrFatal is part of TestClusterInterface. @@ -1063,7 +1063,7 @@ func (tc *TestCluster) RebalanceVoter( ) (*roachpb.RangeDescriptor, error) { key := keys.MustAddr(startKey) var beforeDesc roachpb.RangeDescriptor - if err := tc.Servers[0].DB().GetProto( + if err := tc.servers[0].DB().GetProto( ctx, keys.RangeDescriptorKey(key), &beforeDesc, ); err != nil { return nil, errors.Wrap(err, "range descriptor lookup error") @@ -1072,7 +1072,7 @@ func (tc *TestCluster) RebalanceVoter( {ChangeType: roachpb.REMOVE_VOTER, Target: src}, {ChangeType: roachpb.ADD_VOTER, Target: dest}, } - return tc.Servers[0].DB().AdminChangeReplicas(ctx, key, beforeDesc, changes) + return tc.servers[0].DB().AdminChangeReplicas(ctx, key, beforeDesc, changes) } // RebalanceVoterOrFatal is part of TestClusterInterface. @@ -1093,7 +1093,7 @@ func (tc *TestCluster) RebalanceVoterOrFatal( func (tc *TestCluster) TransferRangeLease( rangeDesc roachpb.RangeDescriptor, dest roachpb.ReplicationTarget, ) error { - err := tc.Servers[0].DB().AdminTransferLease(context.TODO(), + err := tc.servers[0].DB().AdminTransferLease(context.TODO(), rangeDesc.StartKey.AsRawKey(), dest.StoreID) if err != nil { return errors.Wrapf(err, "%q: transfer lease unexpected error", rangeDesc.StartKey) @@ -1338,7 +1338,7 @@ func (tc *TestCluster) FindRangeLeaseHolder( // kv scratch space (it doesn't overlap system spans or SQL tables). The range // is lazily split off on the first call to ScratchRange. func (tc *TestCluster) ScratchRange(t serverutils.TestFataler) roachpb.Key { - scratchKey, err := tc.Servers[0].ScratchRange() + scratchKey, err := tc.servers[0].ScratchRange() if err != nil { t.Fatal(err) } @@ -1349,7 +1349,7 @@ func (tc *TestCluster) ScratchRange(t serverutils.TestFataler) roachpb.Key { // suitable for use as kv scratch space and that has an expiration based lease. // The range is lazily split off on the first call to ScratchRangeWithExpirationLease. func (tc *TestCluster) ScratchRangeWithExpirationLease(t serverutils.TestFataler) roachpb.Key { - scratchKey, err := tc.Servers[0].ScratchRangeWithExpirationLease() + scratchKey, err := tc.servers[0].ScratchRangeWithExpirationLease() if err != nil { t.Fatal(err) } @@ -1398,7 +1398,7 @@ func (tc *TestCluster) WaitForSplitAndInitialization(startKey roachpb.Key) error func (tc *TestCluster) FindMemberServer( storeID roachpb.StoreID, ) (serverutils.TestServerInterface, error) { - for _, server := range tc.Servers { + for _, server := range tc.servers { if server.GetStores().(*kvserver.Stores).HasStore(storeID) { return server, nil } @@ -1428,7 +1428,7 @@ func (tc *TestCluster) WaitForFullReplication() error { log.Infof(context.TODO(), "WaitForFullReplication took: %s", end.Sub(start)) }() - if len(tc.Servers) < 3 { + if len(tc.servers) < 3 { // If we have less than three nodes, we will never have full replication. return nil } @@ -1442,10 +1442,10 @@ func (tc *TestCluster) WaitForFullReplication() error { notReplicated := true for r := retry.Start(opts); r.Next() && notReplicated; { notReplicated = false - for _, s := range tc.Servers { + for _, s := range tc.servers { err := s.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { - if n := s.ClusterNodeCount(); n != len(tc.Servers) { - log.Infof(context.TODO(), "%s only sees %d/%d available nodes", s, n, len(tc.Servers)) + if n := s.ClusterNodeCount(); n != len(tc.servers) { + log.Infof(context.TODO(), "%s only sees %d/%d available nodes", s, n, len(tc.servers)) notReplicated = true return nil } @@ -1490,7 +1490,7 @@ func (tc *TestCluster) WaitForFullReplication() error { // This bug currently prevents LastUpdated to tick in metamorphic tests // with kv.expiration_leases_only.enabled = true. func (tc *TestCluster) WaitFor5NodeReplication() error { - if len(tc.Servers) > 4 && tc.ReplicationMode() == base.ReplicationAuto { + if len(tc.servers) > 4 && tc.ReplicationMode() == base.ReplicationAuto { // We need to wait for zone config propagations before we could check // conformance since zone configs are propagated synchronously. // Generous timeout is added to allow rangefeeds to catch up. On startup @@ -1509,7 +1509,7 @@ func (tc *TestCluster) WaitFor5NodeReplication() error { // are applied. func (tc *TestCluster) WaitForZoneConfigPropagation() error { now := tc.Server(0).Clock().Now() - for _, s := range tc.Servers { + for _, s := range tc.servers { scs := s.SpanConfigKVSubscriber().(spanconfig.KVSubscriber) if err := testutils.SucceedsSoonError(func() error { if scs.LastUpdated().Less(now) { @@ -1554,7 +1554,7 @@ func (tc *TestCluster) WaitForNodeStatuses(t serverutils.TestFataler) { } nodeIDs[node.Desc.NodeID] = true } - for _, s := range tc.Servers { + for _, s := range tc.servers { // Not using s.NodeID() here, on purpose. s.NodeID() uses the // in-RAM version in the RPC context, which is set earlier than // the node descriptor. @@ -1570,8 +1570,8 @@ func (tc *TestCluster) WaitForNodeStatuses(t serverutils.TestFataler) { // node in the cluster. func (tc *TestCluster) WaitForNodeLiveness(t serverutils.TestFataler) { testutils.SucceedsSoon(t, func() error { - db := tc.Servers[0].DB() - for _, s := range tc.Servers { + db := tc.servers[0].DB() + for _, s := range tc.servers { key := keys.NodeLivenessKey(s.NodeID()) var liveness livenesspb.Liveness if err := db.GetProto(context.Background(), key, &liveness); err != nil { @@ -1596,7 +1596,7 @@ func (tc *TestCluster) ReplicationMode() base.TestClusterReplicationMode { // ToggleReplicateQueues implements TestClusterInterface. func (tc *TestCluster) ToggleReplicateQueues(active bool) { - for _, s := range tc.Servers { + for _, s := range tc.servers { _ = s.GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { store.SetReplicateQueueActive(active) return nil @@ -1608,8 +1608,8 @@ func (tc *TestCluster) ToggleReplicateQueues(active bool) { // from all configured engines, filling in zeros when the value is not // found. func (tc *TestCluster) ReadIntFromStores(key roachpb.Key) []int64 { - results := make([]int64, len(tc.Servers)) - for i, server := range tc.Servers { + results := make([]int64, len(tc.servers)) + for i, server := range tc.servers { err := server.GetStores().(*kvserver.Stores).VisitStores(func(s *kvserver.Store) error { valRes, err := storage.MVCCGet(context.Background(), s.TODOEngine(), key, server.Clock().Now(), storage.MVCCGetOptions{}) @@ -1650,7 +1650,7 @@ func (tc *TestCluster) WaitForValues(t serverutils.TestFataler, key roachpb.Key, func (tc *TestCluster) GetFirstStoreFromServer( t serverutils.TestFataler, server int, ) *kvserver.Store { - ts := tc.Servers[server] + ts := tc.servers[server] store, pErr := ts.GetStores().(*kvserver.Stores).GetStore(ts.GetFirstStoreID()) if pErr != nil { t.Fatal(pErr) @@ -1660,7 +1660,7 @@ func (tc *TestCluster) GetFirstStoreFromServer( // Restart stops and then starts all the servers in the cluster. func (tc *TestCluster) Restart() error { - for i := range tc.Servers { + for i := range tc.servers { tc.StopServer(i) if err := tc.RestartServer(i); err != nil { return err @@ -1705,9 +1705,9 @@ func (tc *TestCluster) RestartServerWithInspect( } else { serverArgs.Addr = "" // Try and point the server to a live server in the cluster to join. - for i := range tc.Servers { + for i := range tc.servers { if !tc.ServerStopped(i) { - serverArgs.JoinAddr = tc.Servers[i].AdvRPCAddr() + serverArgs.JoinAddr = tc.servers[i].AdvRPCAddr() } } } @@ -1740,7 +1740,7 @@ func (tc *TestCluster) RestartServerWithInspect( // This ensures that the stopper's Stop() method can abort an async Start() call. tc.mu.Lock() defer tc.mu.Unlock() - tc.Servers[idx] = s + tc.servers[idx] = s tc.mu.serverStoppers[idx] = s.Stopper() if inspect != nil { @@ -1756,7 +1756,7 @@ func (tc *TestCluster) RestartServerWithInspect( if err != nil { return err } - tc.Conns[idx] = dbConn + tc.conns[idx] = dbConn return nil }(); err != nil { return err @@ -1776,7 +1776,7 @@ func (tc *TestCluster) RestartServerWithInspect( var err error for r.Next() { err = func() error { - for idx, s := range tc.Servers { + for idx, s := range tc.servers { if tc.ServerStopped(idx) { continue } @@ -1818,8 +1818,8 @@ func (tc *TestCluster) GetRaftLeader( var raftLeaderRepl *kvserver.Replica testutils.SucceedsSoon(t, func() error { var latestTerm uint64 - for i := range tc.Servers { - err := tc.Servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { + for i := range tc.servers { + err := tc.servers[i].GetStores().(*kvserver.Stores).VisitStores(func(store *kvserver.Store) error { repl := store.LookupReplica(key) if repl == nil { // Replica does not exist on this store or there is no raft @@ -1924,7 +1924,7 @@ func (tc *TestCluster) SplitTable( func (tc *TestCluster) WaitForTenantCapabilities( t serverutils.TestFataler, tenID roachpb.TenantID, targetCaps map[tenantcapabilities.ID]string, ) { - for i, ts := range tc.Servers { + for i, ts := range tc.servers { serverutils.WaitForTenantCapabilities(t, ts, tenID, targetCaps, fmt.Sprintf("server %d", i)) } } diff --git a/pkg/testutils/testcluster/testcluster_test.go b/pkg/testutils/testcluster/testcluster_test.go index 82ecc584c63e..b3ab4df9485a 100644 --- a/pkg/testutils/testcluster/testcluster_test.go +++ b/pkg/testutils/testcluster/testcluster_test.go @@ -54,9 +54,9 @@ func TestManualReplication(t *testing.T) { }) defer tc.Stopper().Stop(context.Background()) - s0 := sqlutils.MakeSQLRunner(tc.Conns[0]) - s1 := sqlutils.MakeSQLRunner(tc.Conns[1]) - s2 := sqlutils.MakeSQLRunner(tc.Conns[2]) + s0 := sqlutils.MakeSQLRunner(tc.ServerConn(0)) + s1 := sqlutils.MakeSQLRunner(tc.ServerConn(1)) + s2 := sqlutils.MakeSQLRunner(tc.ServerConn(2)) s0.Exec(t, `CREATE DATABASE t`) s0.Exec(t, `CREATE TABLE test (k INT PRIMARY KEY, v INT)`) @@ -71,7 +71,7 @@ func TestManualReplication(t *testing.T) { s2.ExecRowsAffected(t, 3, `DELETE FROM test`) // Split the table to a new range. - kvDB := tc.Servers[0].DB() + kvDB := tc.Server(0).DB() tableDesc := desctestutils.TestingGetPublicTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") tableStartKey := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.GetID())) @@ -101,9 +101,9 @@ func TestManualReplication(t *testing.T) { } for i := 0; i < 3; i++ { if _, ok := tableRangeDesc.GetReplicaDescriptor( - tc.Servers[i].GetFirstStoreID()); !ok { + tc.Server(i).GetFirstStoreID()); !ok { t.Fatalf("expected replica on store %d, got %+v", - tc.Servers[i].GetFirstStoreID(), tableRangeDesc.InternalReplicas) + tc.Server(i).GetFirstStoreID(), tableRangeDesc.InternalReplicas) } } @@ -113,7 +113,7 @@ func TestManualReplication(t *testing.T) { if err != nil { t.Fatal(err) } - if leaseHolder.StoreID != tc.Servers[0].GetFirstStoreID() { + if leaseHolder.StoreID != tc.Server(0).GetFirstStoreID() { t.Fatalf("expected initial lease on server idx 0, but is on node: %+v", leaseHolder) } @@ -131,7 +131,7 @@ func TestManualReplication(t *testing.T) { if err != nil { t.Fatal(err) } - if leaseHolder.StoreID != tc.Servers[1].GetFirstStoreID() { + if leaseHolder.StoreID != tc.Server(1).GetFirstStoreID() { t.Fatalf("expected lease on server idx 1 (node: %d store: %d), but is on node: %+v", tc.Server(1).NodeID(), tc.Server(1).GetFirstStoreID(), @@ -297,7 +297,7 @@ func TestRestart(t *testing.T) { require.NoError(t, tc.WaitForFullReplication()) ids := make([]roachpb.ReplicationTarget, numServers) - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { ids[i] = tc.Target(i) } @@ -322,7 +322,7 @@ func TestRestart(t *testing.T) { require.NoError(t, tc.Restart()) // Validates that the NodeID and StoreID remain the same after a restart. - for i := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { require.Equal(t, ids[i], tc.Target(i)) } diff --git a/pkg/upgrade/upgrademanager/manager_external_test.go b/pkg/upgrade/upgrademanager/manager_external_test.go index f965243f8fc3..bed48d892b26 100644 --- a/pkg/upgrade/upgrademanager/manager_external_test.go +++ b/pkg/upgrade/upgrademanager/manager_external_test.go @@ -180,7 +180,7 @@ RETURNING id;`, firstID).Scan(&secondID)) fakeJobBlockChan := <-ch // Ensure that we see the assertion error. - _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, endCV.String()) + _, err = tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, endCV.String()) require.Regexp(t, "found multiple non-terminal jobs for version", err) // Let the fake, erroneous job finish with an error. @@ -188,7 +188,7 @@ RETURNING id;`, firstID).Scan(&secondID)) require.Regexp(t, "boom", <-runErr) // See the TODO below for why we need this. - _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING sql.txn_stats.sample_rate = 0`) + _, err = tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING sql.txn_stats.sample_rate = 0`) require.NoError(t, err) // Launch a second upgrade which later we'll ensure does not kick off @@ -305,7 +305,8 @@ func TestMigrateUpdatesReplicaVersion(t *testing.T) { // Wait until all nodes have are considered live. nl := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness) testutils.SucceedsSoon(t, func() error { - for _, s := range tc.Servers { + for i := 0; i < tc.NumServers(); i++ { + s := tc.Server(i) id := s.NodeID() if !nl.GetNodeVitalityFromCache(id).IsLive(livenesspb.Upgrade) { return errors.Newf("n%s not live yet", id) @@ -315,7 +316,7 @@ func TestMigrateUpdatesReplicaVersion(t *testing.T) { }) // Kick off the upgrade process. - _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, endCV.String()) + _, err = tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, endCV.String()) require.NoError(t, err) if got := repl.Version(); got != endCV { diff --git a/pkg/upgrade/upgrades/database_role_settings_table_user_id_migration_test.go b/pkg/upgrade/upgrades/database_role_settings_table_user_id_migration_test.go index 0433426864f8..9800cc4c1c29 100644 --- a/pkg/upgrade/upgrades/database_role_settings_table_user_id_migration_test.go +++ b/pkg/upgrade/upgrades/database_role_settings_table_user_id_migration_test.go @@ -81,10 +81,10 @@ func runTestDatabaseRoleSettingsUserIDMigration(t *testing.T, numUsers int) { tdb.CheckQueryResults(t, "SELECT count(*) FROM system.database_role_settings", [][]string{{strconv.Itoa(numUsers + 1)}}) // Run migrations. - _, err := tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err := tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_1DatabaseRoleSettingsHasRoleIDColumn).String()) require.NoError(t, err) - _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err = tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_1DatabaseRoleSettingsRoleIDColumnBackfilled).String()) require.NoError(t, err) diff --git a/pkg/upgrade/upgrades/external_connections_table_user_id_migration_test.go b/pkg/upgrade/upgrades/external_connections_table_user_id_migration_test.go index 07f6fc976db0..57b223912681 100644 --- a/pkg/upgrade/upgrades/external_connections_table_user_id_migration_test.go +++ b/pkg/upgrade/upgrades/external_connections_table_user_id_migration_test.go @@ -84,10 +84,10 @@ func runTestExternalConnectionsUserIDMigration(t *testing.T, numUsers int) { tdb.CheckQueryResults(t, "SELECT count(*) FROM system.external_connections", [][]string{{strconv.Itoa(numUsers)}}) // Run migrations. - _, err := tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err := tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_1ExternalConnectionsTableHasOwnerIDColumn).String()) require.NoError(t, err) - _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err = tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_1ExternalConnectionsTableOwnerIDColumnBackfilled).String()) require.NoError(t, err) diff --git a/pkg/upgrade/upgrades/json_forward_indexes_test.go b/pkg/upgrade/upgrades/json_forward_indexes_test.go index a3ceddc965fe..5abc20ffa0a9 100644 --- a/pkg/upgrade/upgrades/json_forward_indexes_test.go +++ b/pkg/upgrade/upgrades/json_forward_indexes_test.go @@ -54,7 +54,7 @@ func TestJSONForwardingIndexes(t *testing.T) { // Set the cluster version to 22.2 to test that with the legacy schema changer // we cannot create forward indexes on JSON columns. - _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err = tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V22_2).String()) require.NoError(t, err) @@ -100,7 +100,7 @@ func TestJSONForwardingIndexes(t *testing.T) { // Set the cluster version to 23.1 to test that with the declarative schema // changer we cannot create forward indexes on JSON columns. - _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err = tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_1).String()) require.NoError(t, err) @@ -142,7 +142,7 @@ func TestJSONForwardingIndexes(t *testing.T) { // Setting a cluster version that supports forward indexes on JSON // columns and expecting success when creating forward indexes. - _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err = tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_2).String()) require.NoError(t, err) diff --git a/pkg/upgrade/upgrades/role_members_ids_migration_test.go b/pkg/upgrade/upgrades/role_members_ids_migration_test.go index adc8c796e7bd..2546da81ce81 100644 --- a/pkg/upgrade/upgrades/role_members_ids_migration_test.go +++ b/pkg/upgrade/upgrades/role_members_ids_migration_test.go @@ -99,10 +99,10 @@ func runTestRoleMembersIDMigration(t *testing.T, numUsers int) { }) // Run migrations. - _, err := tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err := tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_1RoleMembersTableHasIDColumns).String()) require.NoError(t, err) - _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err = tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_1RoleMembersIDColumnsBackfilled).String()) require.NoError(t, err) @@ -133,7 +133,7 @@ func runTestRoleMembersIDMigration(t *testing.T, numUsers int) { FAMILY fam_4_role_id (role_id), FAMILY fam_5_member_id (member_id) )` - r := tc.Conns[0].QueryRow("SELECT create_statement FROM [SHOW CREATE TABLE system.role_members]") + r := tc.ServerConn(0).QueryRow("SELECT create_statement FROM [SHOW CREATE TABLE system.role_members]") var actualSchema string require.NoError(t, r.Scan(&actualSchema)) require.Equal(t, expectedSchema, actualSchema) diff --git a/pkg/upgrade/upgrades/system_privileges_index_migration_test.go b/pkg/upgrade/upgrades/system_privileges_index_migration_test.go index d169e068e690..86e3b317e1c3 100644 --- a/pkg/upgrade/upgrades/system_privileges_index_migration_test.go +++ b/pkg/upgrade/upgrades/system_privileges_index_migration_test.go @@ -45,7 +45,7 @@ func TestSystemPrivilegesIndexMigration(t *testing.T) { tdb := sqlutils.MakeSQLRunner(db) // Run migration. - _, err := tc.Conns[0].ExecContext( + _, err := tc.ServerConn(0).ExecContext( ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_1AlterSystemPrivilegesAddIndexOnPathAndUsername).String(), diff --git a/pkg/upgrade/upgrades/system_privileges_user_id_migration_test.go b/pkg/upgrade/upgrades/system_privileges_user_id_migration_test.go index 09bcb101a3bf..c0ca77d1ea9d 100644 --- a/pkg/upgrade/upgrades/system_privileges_user_id_migration_test.go +++ b/pkg/upgrade/upgrades/system_privileges_user_id_migration_test.go @@ -81,10 +81,10 @@ func runTestSystemPrivilegesUserIDMigration(t *testing.T, numUsers int) { tdb.CheckQueryResults(t, "SELECT count(*) FROM system.privileges", [][]string{{strconv.Itoa(numUsers + 1)}}) // Run migrations. - _, err := tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err := tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_1SystemPrivilegesTableHasUserIDColumn).String()) require.NoError(t, err) - _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err = tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_1SystemPrivilegesTableUserIDColumnBackfilled).String()) require.NoError(t, err) diff --git a/pkg/upgrade/upgrades/web_sessions_table_user_id_migration_test.go b/pkg/upgrade/upgrades/web_sessions_table_user_id_migration_test.go index 24007faee458..ffe60b0ef2ec 100644 --- a/pkg/upgrade/upgrades/web_sessions_table_user_id_migration_test.go +++ b/pkg/upgrade/upgrades/web_sessions_table_user_id_migration_test.go @@ -92,10 +92,10 @@ VALUES ( } // Run migrations. - _, err := tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err := tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_1WebSessionsTableHasUserIDColumn).String()) require.NoError(t, err) - _, err = tc.Conns[0].ExecContext(ctx, `SET CLUSTER SETTING version = $1`, + _, err = tc.ServerConn(0).ExecContext(ctx, `SET CLUSTER SETTING version = $1`, clusterversion.ByKey(clusterversion.V23_1WebSessionsTableUserIDColumnBackfilled).String()) require.NoError(t, err) diff --git a/pkg/util/tracing/collector/collector_test.go b/pkg/util/tracing/collector/collector_test.go index 890f33a288af..d66e9b0ef51e 100644 --- a/pkg/util/tracing/collector/collector_test.go +++ b/pkg/util/tracing/collector/collector_test.go @@ -132,8 +132,8 @@ func TestTracingCollectorGetSpanRecordings(t *testing.T) { traceCollector := collector.New( localTracer, func(ctx context.Context) ([]sqlinstance.InstanceInfo, error) { - instanceIDs := make([]sqlinstance.InstanceInfo, len(tc.Servers)) - for i := range tc.Servers { + instanceIDs := make([]sqlinstance.InstanceInfo, tc.NumServers()) + for i := 0; i < tc.NumServers(); i++ { instanceIDs[i].InstanceID = tc.Server(i).SQLInstanceID() } return instanceIDs, nil @@ -231,8 +231,9 @@ func TestClusterInflightTraces(t *testing.T) { tc.SystemLayer(0), tc.SystemLayer(1), } - systemDBs := make([]*gosql.DB, len(tc.Servers)) - for i, s := range tc.Servers { + systemDBs := make([]*gosql.DB, tc.NumServers()) + for i := 0; i < tc.NumServers(); i++ { + s := tc.Server(i) systemDBs[i] = s.SQLConn(t, "") } @@ -248,14 +249,15 @@ func TestClusterInflightTraces(t *testing.T) { switch config { case "single-tenant": testCases = []testCase{{ - servers: []serverutils.ApplicationLayerInterface{tc.Servers[0], tc.Servers[1]}, + servers: []serverutils.ApplicationLayerInterface{tc.Server(0), tc.Server(1)}, dbs: systemDBs, }} case "shared-process": - tenants := make([]serverutils.ApplicationLayerInterface, len(tc.Servers)) - dbs := make([]*gosql.DB, len(tc.Servers)) - for i, s := range tc.Servers { + tenants := make([]serverutils.ApplicationLayerInterface, tc.NumServers()) + dbs := make([]*gosql.DB, tc.NumServers()) + for i := 0; i < tc.NumServers(); i++ { + s := tc.Server(i) tenant, db, err := s.StartSharedProcessTenant(ctx, base.TestSharedProcessTenantArgs{TenantName: "app"}) require.NoError(t, err) tenants[i] = tenant @@ -276,10 +278,10 @@ func TestClusterInflightTraces(t *testing.T) { case "separate-process": tenantID := roachpb.MustMakeTenantID(10) - tenants := make([]serverutils.ApplicationLayerInterface, len(tc.Servers)) - dbs := make([]*gosql.DB, len(tc.Servers)) - for i := range tc.Servers { - tenant, err := tc.Servers[i].StartTenant(ctx, base.TestTenantArgs{TenantID: tenantID}) + tenants := make([]serverutils.ApplicationLayerInterface, tc.NumServers()) + dbs := make([]*gosql.DB, tc.NumServers()) + for i := 0; i < tc.NumServers(); i++ { + tenant, err := tc.Server(i).StartTenant(ctx, base.TestTenantArgs{TenantID: tenantID}) require.NoError(t, err) tenants[i] = tenant dbs[i] = tenant.SQLConn(t, "")