diff --git a/pkg/base/node_id_test.go b/pkg/base/node_id_test.go index 485f4276a01f..f82f08f09037 100644 --- a/pkg/base/node_id_test.go +++ b/pkg/base/node_id_test.go @@ -30,7 +30,7 @@ func TestNodeIDContainer(t *testing.T) { } for i := 0; i < 2; i++ { - n.Set(context.TODO(), 5) + n.Set(context.Background(), 5) if val := n.Get(); val != 5 { t.Errorf("value should be 5, not %d", val) } diff --git a/pkg/bench/bench_test.go b/pkg/bench/bench_test.go index 534a6d179053..439806721e14 100644 --- a/pkg/bench/bench_test.go +++ b/pkg/bench/bench_test.go @@ -884,7 +884,7 @@ CREATE TABLE bench.insert_distinct ( fmt.Fprintf(&buf, "(%d, %d)", zipf.Uint64(), n) } - if _, err := db.DB.ExecContext(context.TODO(), buf.String()); err != nil { + if _, err := db.DB.ExecContext(context.Background(), buf.String()); err != nil { return err } } diff --git a/pkg/bench/pgbench_test.go b/pkg/bench/pgbench_test.go index 16bf627e77ff..b0dd8cd28e5b 100644 --- a/pkg/bench/pgbench_test.go +++ b/pkg/bench/pgbench_test.go @@ -109,7 +109,7 @@ func BenchmarkPgbenchExec(b *testing.B) { defer log.Scope(b).Close(b) b.Run("Cockroach", func(b *testing.B) { s, _, _ := serverutils.StartServer(b, base.TestServerArgs{Insecure: true}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) pgURL, cleanupFn := sqlutils.PGUrl( b, s.ServingSQLAddr(), "benchmarkCockroach", url.User(security.RootUser)) diff --git a/pkg/blobs/bench_test.go b/pkg/blobs/bench_test.go index 755a9e73bdd3..a168c4a6c213 100644 --- a/pkg/blobs/bench_test.go +++ b/pkg/blobs/bench_test.go @@ -62,7 +62,7 @@ func BenchmarkStreamingReadFile(b *testing.B) { rpcContext.TestingAllowNamedRPCToAnonymousServer = true factory := setUpService(b, rpcContext, localNodeID, remoteNodeID, localExternalDir, remoteExternalDir) - blobClient, err := factory(context.TODO(), remoteNodeID) + blobClient, err := factory(context.Background(), remoteNodeID) if err != nil { b.Fatal(err) } @@ -84,7 +84,7 @@ func benchmarkStreamingReadFile(b *testing.B, tc *benchmarkTestCase) { b.ResetTimer() b.SetBytes(tc.fileSize) for i := 0; i < b.N; i++ { - reader, err := tc.blobClient.ReadFile(context.TODO(), tc.fileName) + reader, err := tc.blobClient.ReadFile(context.Background(), tc.fileName) if err != nil { b.Fatal(err) } @@ -113,7 +113,7 @@ func BenchmarkStreamingWriteFile(b *testing.B) { rpcContext.TestingAllowNamedRPCToAnonymousServer = true factory := setUpService(b, rpcContext, localNodeID, remoteNodeID, localExternalDir, remoteExternalDir) - blobClient, err := factory(context.TODO(), remoteNodeID) + blobClient, err := factory(context.Background(), remoteNodeID) if err != nil { b.Fatal(err) } @@ -134,7 +134,7 @@ func benchmarkStreamingWriteFile(b *testing.B, tc *benchmarkTestCase) { b.ResetTimer() b.SetBytes(tc.fileSize) for i := 0; i < b.N; i++ { - err := tc.blobClient.WriteFile(context.TODO(), tc.fileName, bytes.NewReader(content)) + err := tc.blobClient.WriteFile(context.Background(), tc.fileName, bytes.NewReader(content)) if err != nil { b.Fatal(err) } diff --git a/pkg/blobs/client_test.go b/pkg/blobs/client_test.go index d16ee278320a..ce3a356fbcfb 100644 --- a/pkg/blobs/client_test.go +++ b/pkg/blobs/client_test.go @@ -41,7 +41,7 @@ func createTestResources(t testing.TB) (string, string, *stop.Stopper, func()) { return localExternalDir, remoteExternalDir, stopper, func() { cleanupFn() cleanupFn2() - stopper.Stop(context.TODO()) + stopper.Stop(context.Background()) leaktest.AfterTest(t)() } } @@ -173,7 +173,7 @@ func TestBlobClientReadFile(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - ctx := context.TODO() + ctx := context.Background() blobClient, err := blobClientFactory(ctx, tc.nodeID) if err != nil { t.Fatal(err) @@ -244,7 +244,7 @@ func TestBlobClientWriteFile(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - ctx := context.TODO() + ctx := context.Background() blobClient, err := blobClientFactory(ctx, tc.nodeID) if err != nil { t.Fatal(err) @@ -359,7 +359,7 @@ func TestBlobClientList(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - ctx := context.TODO() + ctx := context.Background() blobClient, err := blobClientFactory(ctx, tc.nodeID) if err != nil { t.Fatal(err) @@ -441,7 +441,7 @@ func TestBlobClientDeleteFrom(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - ctx := context.TODO() + ctx := context.Background() blobClient, err := blobClientFactory(ctx, tc.nodeID) if err != nil { t.Fatal(err) @@ -517,7 +517,7 @@ func TestBlobClientStat(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - ctx := context.TODO() + ctx := context.Background() blobClient, err := blobClientFactory(ctx, tc.nodeID) if err != nil { t.Fatal(err) diff --git a/pkg/blobs/service_test.go b/pkg/blobs/service_test.go index 7da1a1ff1ec1..8a8288514368 100644 --- a/pkg/blobs/service_test.go +++ b/pkg/blobs/service_test.go @@ -34,7 +34,7 @@ func TestBlobServiceList(t *testing.T) { if err != nil { t.Fatal(err) } - ctx := context.TODO() + ctx := context.Background() t.Run("list-correct-files", func(t *testing.T) { resp, err := service.List(ctx, &blobspb.GlobRequest{ @@ -78,7 +78,7 @@ func TestBlobServiceDelete(t *testing.T) { if err != nil { t.Fatal(err) } - ctx := context.TODO() + ctx := context.Background() t.Run("delete-correct-file", func(t *testing.T) { _, err := service.Delete(ctx, &blobspb.DeleteRequest{ @@ -127,7 +127,7 @@ func TestBlobServiceStat(t *testing.T) { if err != nil { t.Fatal(err) } - ctx := context.TODO() + ctx := context.Background() t.Run("get-correct-file-size", func(t *testing.T) { resp, err := service.Stat(ctx, &blobspb.StatRequest{ diff --git a/pkg/ccl/backupccl/backup_test.go b/pkg/ccl/backupccl/backup_test.go index d4e73257ccf9..f87dffdc9c8e 100644 --- a/pkg/ccl/backupccl/backup_test.go +++ b/pkg/ccl/backupccl/backup_test.go @@ -90,7 +90,7 @@ func backupRestoreTestSetupEmptyWithParams( sqlDB = sqlutils.MakeSQLRunner(tc.Conns[0]) cleanupFn := func() { - tc.Stopper().Stop(context.TODO()) // cleans up in memory storage's auxiliary dirs + tc.Stopper().Stop(context.Background()) // cleans up in memory storage's auxiliary dirs } return ctx, tc, sqlDB, cleanupFn @@ -136,8 +136,8 @@ func backupRestoreTestSetupWithParams( } cleanupFn := func() { - tc.Stopper().Stop(context.TODO()) // cleans up in memory storage's auxiliary dirs - dirCleanupFn() // cleans up dir, which is the nodelocal:// storage + tc.Stopper().Stop(context.Background()) // cleans up in memory storage's auxiliary dirs + dirCleanupFn() // cleans up dir, which is the nodelocal:// storage } return ctx, tc, sqlDB, dir, cleanupFn @@ -1203,7 +1203,7 @@ func TestBackupRestoreControlJob(t *testing.T) { // than make a huge table, dial down the zone config for the bank table. init := func(tc *testcluster.TestCluster) { config.TestingSetupZoneConfigHook(tc.Stopper()) - v, err := tc.Servers[0].DB().Get(context.TODO(), keys.SystemSQLCodec.DescIDSequenceKey()) + v, err := tc.Servers[0].DB().Get(context.Background(), keys.SystemSQLCodec.DescIDSequenceKey()) if err != nil { t.Fatal(err) } @@ -1452,7 +1452,7 @@ func TestBackupRestoreInterleaved(t *testing.T) { t.Run("all tables in interleave hierarchy", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tcRestore.Stopper().Stop(context.TODO()) + defer tcRestore.Stopper().Stop(context.Background()) sqlDBRestore := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) // Create a dummy database to verify rekeying is correctly performed. sqlDBRestore.Exec(t, `CREATE DATABASE ignored`) @@ -1490,7 +1490,7 @@ func TestBackupRestoreInterleaved(t *testing.T) { sqlDB.ExpectErr(t, "without interleave parent", `BACKUP data.i0 TO $1`, localFoo) tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tcRestore.Stopper().Stop(context.TODO()) + defer tcRestore.Stopper().Stop(context.Background()) sqlDBRestore := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) sqlDBRestore.Exec(t, `CREATE DATABASE data`) sqlDBRestore.ExpectErr( @@ -1503,7 +1503,7 @@ func TestBackupRestoreInterleaved(t *testing.T) { sqlDB.ExpectErr(t, "without interleave child", `BACKUP data.bank TO $1`, localFoo) tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tcRestore.Stopper().Stop(context.TODO()) + defer tcRestore.Stopper().Stop(context.Background()) sqlDBRestore := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) sqlDBRestore.Exec(t, `CREATE DATABASE data`) sqlDBRestore.ExpectErr(t, "without interleave child", `RESTORE TABLE data.bank FROM $1`, localFoo) @@ -1591,7 +1591,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore everything to new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(tc.Conns[0]) db.Exec(t, createStore) @@ -1629,7 +1629,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore customers to new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(tc.Conns[0]) db.Exec(t, createStore) db.Exec(t, `RESTORE store.customers, store.orders FROM $1`, localFoo) @@ -1647,7 +1647,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore orders to new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(tc.Conns[0]) db.Exec(t, createStore) @@ -1667,7 +1667,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore receipts to new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(tc.Conns[0]) db.Exec(t, createStore) db.Exec(t, `RESTORE store.receipts FROM $1 WITH OPTIONS ('skip_missing_foreign_keys')`, localFoo) @@ -1685,7 +1685,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore receipts and customers to new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(tc.Conns[0]) db.Exec(t, createStore) db.Exec(t, `RESTORE store.receipts, store.customers FROM $1 WITH OPTIONS ('skip_missing_foreign_keys')`, localFoo) @@ -1715,7 +1715,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore simple view", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(tc.Conns[0]) db.Exec(t, createStore) db.ExpectErr( @@ -1746,7 +1746,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore multi-table view", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(tc.Conns[0]) db.ExpectErr( @@ -1806,7 +1806,7 @@ func TestBackupRestoreCrossTableReferences(t *testing.T) { t.Run("restore and skip missing views", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(tc.Conns[0]) // Test cases where, after filtering out views that can't be restored, there are no other tables to restore @@ -1923,7 +1923,7 @@ func TestBackupRestoreIncremental(t *testing.T) { // Start a new cluster to restore into. { restoreTC := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer restoreTC.Stopper().Stop(context.TODO()) + defer restoreTC.Stopper().Stop(context.Background()) sqlDBRestore := sqlutils.MakeSQLRunner(restoreTC.Conns[0]) sqlDBRestore.Exec(t, `CREATE DATABASE data`) @@ -2015,7 +2015,7 @@ func TestBackupRestorePartitionedIncremental(t *testing.T) { // Start a new cluster to restore into. { restoreTC := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer restoreTC.Stopper().Stop(context.TODO()) + defer restoreTC.Stopper().Stop(context.Background()) sqlDBRestore := sqlutils.MakeSQLRunner(restoreTC.Conns[0]) sqlDBRestore.Exec(t, `CREATE DATABASE data`) @@ -2098,7 +2098,7 @@ func TestBackupRestoreWithConcurrentWrites(t *testing.T) { var allowErrors int32 for task := 0; task < numBackgroundTasks; task++ { taskNum := task - tc.Stopper().RunWorker(context.TODO(), func(context.Context) { + tc.Stopper().RunWorker(context.Background(), func(context.Context) { conn := tc.Conns[taskNum%len(tc.Conns)] // Use different sql gateways to make sure leasing is right. if err := startBackgroundWrites(tc.Stopper(), conn, rows, bgActivity, &allowErrors); err != nil { @@ -2797,7 +2797,7 @@ func TestRestoredPrivileges(t *testing.T) { t.Run("into fresh db", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) sqlDBRestore := sqlutils.MakeSQLRunner(tc.Conns[0]) sqlDBRestore.Exec(t, `CREATE DATABASE data`) sqlDBRestore.Exec(t, `RESTORE data.bank FROM $1`, localFoo) @@ -2806,7 +2806,7 @@ func TestRestoredPrivileges(t *testing.T) { t.Run("into db with added grants", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) sqlDBRestore := sqlutils.MakeSQLRunner(tc.Conns[0]) sqlDBRestore.Exec(t, `CREATE DATABASE data`) sqlDBRestore.Exec(t, `CREATE USER someone`) @@ -2817,7 +2817,7 @@ func TestRestoredPrivileges(t *testing.T) { t.Run("into db on db grants", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) sqlDBRestore := sqlutils.MakeSQLRunner(tc.Conns[0]) sqlDBRestore.Exec(t, `CREATE USER someone`) sqlDBRestore.Exec(t, `RESTORE DATABASE data2 FROM $1`, localFoo) @@ -2948,7 +2948,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) { t.Run("incomplete-db", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tcRestore.Stopper().Stop(context.TODO()) + defer tcRestore.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) sqlDB.Exec(t, `create database d5`) @@ -2984,14 +2984,14 @@ func TestRestoreDatabaseVersusTable(t *testing.T) { t.Run("db", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tcRestore.Stopper().Stop(context.TODO()) + defer tcRestore.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) sqlDB.Exec(t, `RESTORE DATABASE data, d2, d3 FROM $1`, localFoo) }) t.Run("db-exists", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tcRestore.Stopper().Stop(context.TODO()) + defer tcRestore.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) sqlDB.Exec(t, `CREATE DATABASE data`) @@ -3000,7 +3000,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) { t.Run("tables", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tcRestore.Stopper().Stop(context.TODO()) + defer tcRestore.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) sqlDB.Exec(t, `CREATE DATABASE data`) @@ -3009,7 +3009,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) { t.Run("tables-needs-db", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tcRestore.Stopper().Stop(context.TODO()) + defer tcRestore.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) sqlDB.ExpectErr(t, "needs to exist", `RESTORE data.*, d4.* FROM $1`, localFoo) @@ -3017,7 +3017,7 @@ func TestRestoreDatabaseVersusTable(t *testing.T) { t.Run("into_db", func(t *testing.T) { tcRestore := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tcRestore.Stopper().Stop(context.TODO()) + defer tcRestore.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(tcRestore.Conns[0]) sqlDB.ExpectErr( @@ -3341,7 +3341,7 @@ func TestBackupRestoreSequence(t *testing.T) { t.Run("restore both table & sequence to a new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) newDB := sqlutils.MakeSQLRunner(tc.Conns[0]) newDB.Exec(t, `RESTORE DATABASE data FROM $1`, backupLoc) @@ -3369,7 +3369,7 @@ func TestBackupRestoreSequence(t *testing.T) { t.Run("restore just the table to a new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) newDB := sqlutils.MakeSQLRunner(tc.Conns[0]) newDB.Exec(t, `CREATE DATABASE data`) @@ -3402,7 +3402,7 @@ func TestBackupRestoreSequence(t *testing.T) { t.Run("restore just the sequence to a new cluster", func(t *testing.T) { tc := testcluster.StartTestCluster(t, singleNode, base.TestClusterArgs{ServerArgs: args}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) newDB := sqlutils.MakeSQLRunner(tc.Conns[0]) newDB.Exec(t, `CREATE DATABASE data`) diff --git a/pkg/ccl/backupccl/targets_test.go b/pkg/ccl/backupccl/targets_test.go index 57f14a17d6f6..7888fbaaf694 100644 --- a/pkg/ccl/backupccl/targets_test.go +++ b/pkg/ccl/backupccl/targets_test.go @@ -118,7 +118,7 @@ func TestDescriptorsMatchingTargets(t *testing.T) { } targets := stmt.AST.(*tree.Grant).Targets - matched, err := descriptorsMatchingTargets(context.TODO(), + matched, err := descriptorsMatchingTargets(context.Background(), test.sessionDatabase, searchPath, descriptors, targets) if test.err != "" { if !testutils.IsError(err, test.err) { diff --git a/pkg/ccl/changefeedccl/changefeed_test.go b/pkg/ccl/changefeedccl/changefeed_test.go index 7fcc6947f39f..052d6596ee00 100644 --- a/pkg/ccl/changefeedccl/changefeed_test.go +++ b/pkg/ccl/changefeedccl/changefeed_test.go @@ -984,7 +984,7 @@ func TestChangefeedStopOnSchemaChange(t *testing.T) { t.Helper() for { if ev, err := f.Next(); err != nil { - log.Infof(context.TODO(), "got event %v %v", ev, err) + log.Infof(context.Background(), "got event %v %v", ev, err) tsStr = timestampStrFromError(t, err) _ = f.Close() return tsStr @@ -2412,7 +2412,7 @@ func TestChangefeedProtectedTimestampsVerificationFails(t *testing.T) { args.Knobs.Store = storeKnobs }, func(t *testing.T, db *gosql.DB, f cdctest.TestFeedFactory) { - ctx := context.TODO() + ctx := context.Background() sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, `CREATE TABLE foo (a INT PRIMARY KEY, b STRING)`) _, err := f.Feed(`CREATE CHANGEFEED FOR foo WITH resolved`) diff --git a/pkg/ccl/changefeedccl/encoder_test.go b/pkg/ccl/changefeedccl/encoder_test.go index 8809cf83f8de..512e56c4f9ea 100644 --- a/pkg/ccl/changefeedccl/encoder_test.go +++ b/pkg/ccl/changefeedccl/encoder_test.go @@ -226,10 +226,10 @@ func TestEncoders(t *testing.T) { prevDatums: nil, prevTableDesc: tableDesc, } - keyInsert, err := e.EncodeKey(context.TODO(), rowInsert) + keyInsert, err := e.EncodeKey(context.Background(), rowInsert) require.NoError(t, err) keyInsert = append([]byte(nil), keyInsert...) - valueInsert, err := e.EncodeValue(context.TODO(), rowInsert) + valueInsert, err := e.EncodeValue(context.Background(), rowInsert) require.NoError(t, err) require.Equal(t, expected.insert, rowStringFn(keyInsert, valueInsert)) @@ -241,14 +241,14 @@ func TestEncoders(t *testing.T) { tableDesc: tableDesc, prevTableDesc: tableDesc, } - keyDelete, err := e.EncodeKey(context.TODO(), rowDelete) + keyDelete, err := e.EncodeKey(context.Background(), rowDelete) require.NoError(t, err) keyDelete = append([]byte(nil), keyDelete...) - valueDelete, err := e.EncodeValue(context.TODO(), rowDelete) + valueDelete, err := e.EncodeValue(context.Background(), rowDelete) require.NoError(t, err) require.Equal(t, expected.delete, rowStringFn(keyDelete, valueDelete)) - resolved, err := e.EncodeResolvedTimestamp(context.TODO(), tableDesc.Name, ts) + resolved, err := e.EncodeResolvedTimestamp(context.Background(), tableDesc.Name, ts) require.NoError(t, err) require.Equal(t, expected.resolved, resolvedStringFn(resolved)) }) diff --git a/pkg/ccl/changefeedccl/helpers_test.go b/pkg/ccl/changefeedccl/helpers_test.go index d1385ba17d45..4a130142b671 100644 --- a/pkg/ccl/changefeedccl/helpers_test.go +++ b/pkg/ccl/changefeedccl/helpers_test.go @@ -60,7 +60,7 @@ func readNextMessages(t testing.TB, f cdctest.TestFeed, numMessages int, stripTs for len(actual) < numMessages { m, err := f.Next() if log.V(1) { - log.Infof(context.TODO(), `%v %s: %s->%s`, err, m.Topic, m.Key, m.Value) + log.Infof(context.Background(), `%v %s: %s->%s`, err, m.Topic, m.Key, m.Value) } if err != nil { t.Fatal(err) @@ -352,7 +352,7 @@ func forceTableGC( database, table string, ) { t.Helper() - if err := tsi.ForceTableGC(context.TODO(), database, table, tsi.Clock().Now()); err != nil { + if err := tsi.ForceTableGC(context.Background(), database, table, tsi.Clock().Now()); err != nil { t.Fatal(err) } } diff --git a/pkg/ccl/followerreadsccl/followerreads_test.go b/pkg/ccl/followerreadsccl/followerreads_test.go index 1b1ecc6a9cbb..e1610df71b1d 100644 --- a/pkg/ccl/followerreadsccl/followerreads_test.go +++ b/pkg/ccl/followerreadsccl/followerreads_test.go @@ -152,13 +152,13 @@ func TestOracleFactory(t *testing.T) { kvserver.FollowerReadsEnabled.Override(&st.SV, true) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) c := kv.NewDB(log.AmbientContext{ Tracer: tracing.NewTracer(), }, kv.MockTxnSenderFactory{}, hlc.NewClock(hlc.UnixNano, time.Nanosecond)) - txn := kv.NewTxn(context.TODO(), c, 0) + txn := kv.NewTxn(context.Background(), c, 0) of := replicaoracle.NewOracleFactory(followerReadAwareChoice, replicaoracle.Config{ Settings: st, RPCContext: rpcContext, @@ -167,7 +167,7 @@ func TestOracleFactory(t *testing.T) { old := hlc.Timestamp{ WallTime: timeutil.Now().Add(2 * expectedFollowerReadOffset).UnixNano(), } - txn.SetFixedTimestamp(context.TODO(), old) + txn.SetFixedTimestamp(context.Background(), old) followerReadOracle := of.Oracle(txn) if reflect.TypeOf(followerReadOracle) == reflect.TypeOf(noFollowerReadOracle) { t.Fatalf("expected types of %T and %T to differ", followerReadOracle, diff --git a/pkg/ccl/importccl/exportcsv_test.go b/pkg/ccl/importccl/exportcsv_test.go index 504ed3600933..56e4a5a12a44 100644 --- a/pkg/ccl/importccl/exportcsv_test.go +++ b/pkg/ccl/importccl/exportcsv_test.go @@ -51,7 +51,7 @@ func setupExportableBank(t *testing.T, nodes, rows int) (*sqlutils.SQLRunner, st } config.TestingSetupZoneConfigHook(tc.Stopper()) - v, err := tc.Servers[0].DB().Get(context.TODO(), keys.SystemSQLCodec.DescIDSequenceKey()) + v, err := tc.Servers[0].DB().Get(context.Background(), keys.SystemSQLCodec.DescIDSequenceKey()) if err != nil { t.Fatal(err) } diff --git a/pkg/ccl/importccl/import_processor_test.go b/pkg/ccl/importccl/import_processor_test.go index a6e521d0e506..76c716287950 100644 --- a/pkg/ccl/importccl/import_processor_test.go +++ b/pkg/ccl/importccl/import_processor_test.go @@ -526,7 +526,7 @@ func queryJob(db sqlutils.DBHandle, jobID int64) (js jobState) { } var progressBytes, payloadBytes []byte js.err = db.QueryRowContext( - context.TODO(), "SELECT status, payload, progress FROM system.jobs WHERE id = $1", jobID).Scan( + context.Background(), "SELECT status, payload, progress FROM system.jobs WHERE id = $1", jobID).Scan( &js.status, &payloadBytes, &progressBytes) if js.err != nil { return @@ -584,7 +584,7 @@ func TestCSVImportCanBeResumed(t *testing.T) { }, }) registry := s.JobRegistry().(*jobs.Registry) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) sqlDB := sqlutils.MakeSQLRunner(db) @@ -690,7 +690,7 @@ func TestCSVImportMarksFilesFullyProcessed(t *testing.T) { }, }) registry := s.JobRegistry().(*jobs.Registry) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) sqlDB := sqlutils.MakeSQLRunner(db) diff --git a/pkg/ccl/importccl/import_stmt_test.go b/pkg/ccl/importccl/import_stmt_test.go index f13a753c5550..d7ad7fcaf1ba 100644 --- a/pkg/ccl/importccl/import_stmt_test.go +++ b/pkg/ccl/importccl/import_stmt_test.go @@ -2596,7 +2596,7 @@ var _ importRowProducer = &csvBenchmarkStream{} // BenchmarkConvertRecord-16 500000 2376 ns/op 50.49 MB/s 3606 B/op 101 allocs/op // BenchmarkConvertRecord-16 500000 2390 ns/op 50.20 MB/s 3606 B/op 101 allocs/op func BenchmarkCSVConvertRecord(b *testing.B) { - ctx := context.TODO() + ctx := context.Background() tpchLineItemDataRows := [][]string{ {"1", "155190", "7706", "1", "17", "21168.23", "0.04", "0.02", "N", "O", "1996-03-13", "1996-02-12", "1996-03-22", "DELIVER IN PERSON", "TRUCK", "egular courts above the"}, @@ -2691,7 +2691,7 @@ func BenchmarkCSVConvertRecord(b *testing.B) { // BenchmarkDelimitedConvertRecord-16 500000 3004 ns/op 39.94 MB/s // BenchmarkDelimitedConvertRecord-16 500000 2966 ns/op 40.45 MB/s func BenchmarkDelimitedConvertRecord(b *testing.B) { - ctx := context.TODO() + ctx := context.Background() tpchLineItemDataRows := [][]string{ {"1", "155190", "7706", "1", "17", "21168.23", "0.04", "0.02", "N", "O", "1996-03-13", "1996-02-12", "1996-03-22", "DELIVER IN PERSON", "TRUCK", "egular courts above the"}, diff --git a/pkg/ccl/importccl/read_import_avro_test.go b/pkg/ccl/importccl/read_import_avro_test.go index bd55b62a2754..3af2074deec8 100644 --- a/pkg/ccl/importccl/read_import_avro_test.go +++ b/pkg/ccl/importccl/read_import_avro_test.go @@ -536,7 +536,7 @@ func BenchmarkBinaryJSONImport(b *testing.B) { } func benchmarkAvroImport(b *testing.B, avroOpts roachpb.AvroOptions, testData string) { - ctx := context.TODO() + ctx := context.Background() b.SetBytes(120) // Raw input size. With 8 indexes, expect more on output side. diff --git a/pkg/ccl/importccl/read_import_mysql_test.go b/pkg/ccl/importccl/read_import_mysql_test.go index 60b38d0be2a0..2e70d7fd2f37 100644 --- a/pkg/ccl/importccl/read_import_mysql_test.go +++ b/pkg/ccl/importccl/read_import_mysql_test.go @@ -37,7 +37,7 @@ func TestMysqldumpDataReader(t *testing.T) { files := getMysqldumpTestdata(t) - ctx := context.TODO() + ctx := context.Background() table := descForTable(t, `CREATE TABLE simple (i INT PRIMARY KEY, s text, b bytea)`, 10, 20, NoFKs) tables := map[string]*execinfrapb.ReadImportDataSpec_ImportTable{"simple": {Desc: table}} @@ -110,7 +110,7 @@ func readMysqlCreateFrom( } defer f.Close() - tbl, err := readMysqlCreateTable(context.TODO(), f, testEvalCtx, nil, id, expectedParent, name, fks, map[sqlbase.ID]int64{}) + tbl, err := readMysqlCreateTable(context.Background(), f, testEvalCtx, nil, id, expectedParent, name, fks, map[sqlbase.ID]int64{}) if err != nil { t.Fatal(err) } diff --git a/pkg/ccl/importccl/testutils_test.go b/pkg/ccl/importccl/testutils_test.go index 24c76f109fcf..3bcbd09829a1 100644 --- a/pkg/ccl/importccl/testutils_test.go +++ b/pkg/ccl/importccl/testutils_test.go @@ -69,7 +69,7 @@ func descForTable( } else { stmt = parsed[0].AST.(*tree.CreateTable) } - table, err := MakeSimpleTableDescriptor(context.TODO(), settings, stmt, parent, id, fks, nanos) + table, err := MakeSimpleTableDescriptor(context.Background(), settings, stmt, parent, id, fks, nanos) if err != nil { t.Fatalf("could not interpret %q: %v", create, err) } diff --git a/pkg/ccl/partitionccl/drop_test.go b/pkg/ccl/partitionccl/drop_test.go index a3f1e96e8fc3..3f2da720b5c5 100644 --- a/pkg/ccl/partitionccl/drop_test.go +++ b/pkg/ccl/partitionccl/drop_test.go @@ -122,7 +122,7 @@ func TestDropIndexWithZoneConfigCCL(t *testing.T) { // Wait for index drop to complete so zone configs are updated. testutils.SucceedsSoon(t, func() error { - if kvs, err := kvDB.Scan(context.TODO(), indexSpan.Key, indexSpan.EndKey, 0); err != nil { + if kvs, err := kvDB.Scan(context.Background(), indexSpan.Key, indexSpan.EndKey, 0); err != nil { return err } else if l := 0; len(kvs) != l { return errors.Errorf("expected %d key value pairs, but got %d", l, len(kvs)) diff --git a/pkg/ccl/partitionccl/zone_test.go b/pkg/ccl/partitionccl/zone_test.go index 090249316803..6e21b08869b7 100644 --- a/pkg/ccl/partitionccl/zone_test.go +++ b/pkg/ccl/partitionccl/zone_test.go @@ -34,7 +34,7 @@ func TestValidIndexPartitionSetShowZones(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, ` @@ -234,7 +234,7 @@ func TestInvalidIndexPartitionSetShowZones(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) for i, tc := range []struct { query string diff --git a/pkg/ccl/serverccl/role_authentication_test.go b/pkg/ccl/serverccl/role_authentication_test.go index 23d52c31201c..7fe62378e2ba 100644 --- a/pkg/ccl/serverccl/role_authentication_test.go +++ b/pkg/ccl/serverccl/role_authentication_test.go @@ -33,7 +33,7 @@ func TestVerifyPassword(t *testing.T) { defer s.Stopper().Stop(ctx) ie := sql.MakeInternalExecutor( - context.TODO(), + context.Background(), s.(*server.TestServer).Server.PGServer().SQLServer, sql.MemoryMetrics{}, s.ExecutorConfig().(sql.ExecutorConfig).Settings, @@ -112,7 +112,7 @@ func TestVerifyPassword(t *testing.T) { {"cthon98", "12345", true, ""}, } { t.Run("", func(t *testing.T) { - exists, canLogin, pwRetrieveFn, validUntilFn, err := sql.GetUserHashedPassword(context.TODO(), &ie, tc.username) + exists, canLogin, pwRetrieveFn, validUntilFn, err := sql.GetUserHashedPassword(context.Background(), &ie, tc.username) if err != nil { t.Errorf( diff --git a/pkg/cli/cli_test.go b/pkg/cli/cli_test.go index 64d2d1e57f71..cda8d70c5de3 100644 --- a/pkg/cli/cli_test.go +++ b/pkg/cli/cli_test.go @@ -142,8 +142,8 @@ func newCLITest(params cliTestParams) cliTest { } c.TestServer = s.(*server.TestServer) - log.Infof(context.TODO(), "server started at %s", c.ServingRPCAddr()) - log.Infof(context.TODO(), "SQL listener at %s", c.ServingSQLAddr()) + log.Infof(context.Background(), "server started at %s", c.ServingRPCAddr()) + log.Infof(context.Background(), "SQL listener at %s", c.ServingSQLAddr()) } baseCfg.User = security.NodeUser @@ -171,7 +171,7 @@ func setCLIDefaultsForTests() { // stopServer stops the test server. func (c *cliTest) stopServer() { if c.TestServer != nil { - log.Infof(context.TODO(), "stopping server at %s / %s", + log.Infof(context.Background(), "stopping server at %s / %s", c.ServingRPCAddr(), c.ServingSQLAddr()) select { case <-c.Stopper().ShouldStop(): @@ -179,7 +179,7 @@ func (c *cliTest) stopServer() { // called Stop(). We just need to wait. <-c.Stopper().IsStopped() default: - c.Stopper().Stop(context.TODO()) + c.Stopper().Stop(context.Background()) } } } @@ -188,7 +188,7 @@ func (c *cliTest) stopServer() { // have changed after this method returns. func (c *cliTest) restartServer(params cliTestParams) { c.stopServer() - log.Info(context.TODO(), "restarting server") + log.Info(context.Background(), "restarting server") s, err := serverutils.StartServerRaw(base.TestServerArgs{ Insecure: params.insecure, SSLCertsDir: c.certsDir, @@ -198,7 +198,7 @@ func (c *cliTest) restartServer(params cliTestParams) { c.fail(err) } c.TestServer = s.(*server.TestServer) - log.Infof(context.TODO(), "restarted server at %s / %s", + log.Infof(context.Background(), "restarted server at %s / %s", c.ServingRPCAddr(), c.ServingSQLAddr()) } @@ -212,7 +212,7 @@ func (c *cliTest) cleanup() { // Restore stderr. stderr = log.OrigStderr - log.Info(context.TODO(), "stopping server and cleaning up CLI test") + log.Info(context.Background(), "stopping server and cleaning up CLI test") c.stopServer() diff --git a/pkg/cli/flags_test.go b/pkg/cli/flags_test.go index f2c0323586c7..8b51c8eb4cdb 100644 --- a/pkg/cli/flags_test.go +++ b/pkg/cli/flags_test.go @@ -167,7 +167,7 @@ func TestSQLMemoryPoolFlagValue(t *testing.T) { } // Check fractional values. - maxMem, err := status.GetTotalMemory(context.TODO()) + maxMem, err := status.GetTotalMemory(context.Background()) if err != nil { t.Logf("total memory unknown: %v", err) return diff --git a/pkg/cmd/roachtest/tpchbench.go b/pkg/cmd/roachtest/tpchbench.go index 0665d2256d3e..b56839f6bc96 100644 --- a/pkg/cmd/roachtest/tpchbench.go +++ b/pkg/cmd/roachtest/tpchbench.go @@ -23,27 +23,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/workload/querybench" ) -// tpchBench is a benchmark run on tpch data. There are different groups of -// queries we run against tpch data, represented by different tpchBench values. -type tpchBench int - -//go:generate stringer -type=tpchBench - -const ( - sql20 tpchBench = iota - tpch -) - -var urlMap = map[tpchBench]string{ - sql20: `https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/workload/querybench/2.1-sql-20`, - tpch: `https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/workload/querybench/tpch-queries`, -} - type tpchBenchSpec struct { Nodes int CPUs int ScaleFactor int - benchType tpchBench + benchType string + url string numRunsPerQuery int // minVersion specifies the minimum version of CRDB nodes. If omitted, it // will default to maybeMinVersionForFixturesImport. @@ -71,10 +56,9 @@ func runTPCHBench(ctx context.Context, t *test, c *cluster, b tpchBenchSpec) { c.Put(ctx, cockroach, "./cockroach", roachNodes) c.Put(ctx, workload, "./workload", loadNode) - url := urlMap[b.benchType] - filename := b.benchType.String() - t.Status(fmt.Sprintf("downloading %s query file from %s", filename, url)) - if err := c.RunE(ctx, loadNode, fmt.Sprintf("curl %s > %s", url, filename)); err != nil { + filename := b.benchType + t.Status(fmt.Sprintf("downloading %s query file from %s", filename, b.url)) + if err := c.RunE(ctx, loadNode, fmt.Sprintf("curl %s > %s", b.url, filename)); err != nil { t.Fatal(err) } @@ -91,7 +75,7 @@ func runTPCHBench(ctx context.Context, t *test, c *cluster, b tpchBenchSpec) { t.l.Printf("running %s benchmark on tpch scale-factor=%d", filename, b.ScaleFactor) - numQueries, err := getNumQueriesInFile(filename, url) + numQueries, err := getNumQueriesInFile(filename, b.url) if err != nil { t.Fatal(err) } @@ -165,7 +149,7 @@ func downloadFile(filename string, url string) (*os.File, error) { func registerTPCHBenchSpec(r *testRegistry, b tpchBenchSpec) { nameParts := []string{ "tpchbench", - b.benchType.String(), + b.benchType, fmt.Sprintf("nodes=%d", b.Nodes), fmt.Sprintf("cpu=%d", b.CPUs), fmt.Sprintf("sf=%d", b.ScaleFactor), @@ -195,7 +179,8 @@ func registerTPCHBench(r *testRegistry) { Nodes: 3, CPUs: 4, ScaleFactor: 1, - benchType: sql20, + benchType: `sql20`, + url: `https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/workload/querybench/2.1-sql-20`, numRunsPerQuery: 3, maxLatency: 100 * time.Second, }, @@ -203,7 +188,8 @@ func registerTPCHBench(r *testRegistry) { Nodes: 3, CPUs: 4, ScaleFactor: 1, - benchType: tpch, + benchType: `tpch`, + url: `https://raw.githubusercontent.com/cockroachdb/cockroach/master/pkg/workload/querybench/tpch-queries`, numRunsPerQuery: 3, minVersion: `v19.2.0`, maxLatency: 500 * time.Second, diff --git a/pkg/cmd/roachtest/tpchbench_string.go b/pkg/cmd/roachtest/tpchbench_string.go deleted file mode 100644 index 3a1d01e3f6ec..000000000000 --- a/pkg/cmd/roachtest/tpchbench_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=tpchBench"; DO NOT EDIT. - -package main - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[sql20-0] - _ = x[tpch-1] -} - -const _tpchBench_name = "sql20tpch" - -var _tpchBench_index = [...]uint8{0, 5, 9} - -func (i tpchBench) String() string { - if i < 0 || i >= tpchBench(len(_tpchBench_index)-1) { - return "tpchBench(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _tpchBench_name[_tpchBench_index[i]:_tpchBench_index[i+1]] -} diff --git a/pkg/cmd/roachtest/tpchvec.go b/pkg/cmd/roachtest/tpchvec.go index d7728a306e1b..637b49d0d085 100644 --- a/pkg/cmd/roachtest/tpchvec.go +++ b/pkg/cmd/roachtest/tpchvec.go @@ -25,12 +25,12 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/binfetcher" "github.com/cockroachdb/cockroach/pkg/util/randutil" + "github.com/cockroachdb/cockroach/pkg/workload/tpch" "github.com/cockroachdb/errors" ) const ( - tpchVecNodeCount = 3 - tpchVecNumQueries = 22 + tpchVecNodeCount = 3 ) type crdbVersion int @@ -53,13 +53,19 @@ func toCRDBVersion(v string) (crdbVersion, error) { } } -var ( - vectorizeOnOptionByVersion = map[crdbVersion]string{ - tpchVecVersion19_2: "experimental_on", - tpchVecVersion20_1: "on", - tpchVecVersion20_2: "on", +func vectorizeOptionToSetting(vectorize bool, version crdbVersion) string { + if !vectorize { + return "off" + } + switch version { + case tpchVecVersion19_2: + return "experimental_on" + default: + return "on" } +} +var ( // queriesToSkipByVersion is a map keyed by version that contains query numbers // to be skipped for the given version (as well as the reasons for why they are skipped). queriesToSkipByVersion = map[crdbVersion]map[int]string{ @@ -209,10 +215,10 @@ func (p *tpchVecPerfTest) postQueryRunHook(t *test, output []byte, vectorized bo } } -func (p *tpchVecPerfTest) postTestRunHook(t *test, _ *gosql.DB, version crdbVersion) { +func (p *tpchVecPerfTest) postTestRunHook(t *test, conn *gosql.DB, version crdbVersion) { queriesToSkip := queriesToSkipByVersion[version] t.Status("comparing the runtimes (only median values for each query are compared)") - for queryNum := 1; queryNum <= tpchVecNumQueries; queryNum++ { + for queryNum := 1; queryNum <= tpch.NumQueries; queryNum++ { if _, skipped := queriesToSkip[queryNum]; skipped { continue } @@ -248,6 +254,33 @@ func (p *tpchVecPerfTest) postTestRunHook(t *test, _ *gosql.DB, version crdbVers vecOnTime, vecOffTime, vecOnTimes, vecOffTimes)) } if vecOnTime >= slownessThresholdByVersion[version]*vecOffTime { + // For some reason, the vectorized engine executed the query a lot + // slower than the row-by-row engine which is unexpected. In order + // to understand where the slowness comes from, we will run EXPLAIN + // ANALYZE of the query with all `vectorize` options + // tpchPerfTestNumRunsPerQuery times (hoping at least one will + // "catch" the slowness). + for _, vectorize := range p.vectorizeOptions() { + vectorizeSetting := vectorizeOptionToSetting(vectorize, version) + if _, err := conn.Exec(fmt.Sprintf("SET vectorize=%s;", vectorizeSetting)); err != nil { + t.Fatal(err) + } + for i := 0; i < tpchPerfTestNumRunsPerQuery; i++ { + rows, err := conn.Query(fmt.Sprintf( + "SELECT url FROM [EXPLAIN ANALYZE %s];", tpch.QueriesByNumber[queryNum], + )) + if err != nil { + t.Fatal(err) + } + defer rows.Close() + var url string + rows.Next() + if err = rows.Scan(&url); err != nil { + t.Fatal(err) + } + t.Status(fmt.Sprintf("EXPLAIN ANALYZE with vectorize=%s url:\n%s", vectorizeSetting, url)) + } + } t.Fatal(fmt.Sprintf( "[q%d] vec ON is slower by %.2f%% than vec OFF\n"+ "vec ON times: %v\nvec OFF times: %v", @@ -303,16 +336,13 @@ func baseTestRun( ) { firstNode := c.Node(1) queriesToSkip := queriesToSkipByVersion[version] - for queryNum := 1; queryNum <= tpchVecNumQueries; queryNum++ { + for queryNum := 1; queryNum <= tpch.NumQueries; queryNum++ { for _, vectorize := range tc.vectorizeOptions() { if reason, skip := queriesToSkip[queryNum]; skip { t.Status(fmt.Sprintf("skipping q%d because of %q", queryNum, reason)) continue } - vectorizeSetting := "off" - if vectorize { - vectorizeSetting = vectorizeOnOptionByVersion[version] - } + vectorizeSetting := vectorizeOptionToSetting(vectorize, version) cmd := fmt.Sprintf("./workload run tpch --concurrency=1 --db=tpch "+ "--max-ops=%d --queries=%d --vectorize=%s {pgurl:1-%d}", tc.numRunsPerQuery(), queryNum, vectorizeSetting, tpchVecNodeCount) diff --git a/pkg/gossip/client_test.go b/pkg/gossip/client_test.go index 121932716bfd..4dd560f5983d 100644 --- a/pkg/gossip/client_test.go +++ b/pkg/gossip/client_test.go @@ -58,7 +58,7 @@ func startGossipAtAddr( ) *Gossip { clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContextWithClusterID(clock, stopper, clusterID) - rpcContext.NodeID.Set(context.TODO(), nodeID) + rpcContext.NodeID.Set(context.Background(), nodeID) server := rpc.NewServer(rpcContext) g := NewTest(nodeID, rpcContext, server, stopper, registry, zonepb.DefaultZoneConfigRef()) @@ -188,7 +188,7 @@ func TestClientGossip(t *testing.T) { c := newClient(log.AmbientContext{Tracer: tracing.NewTracer()}, remote.GetNodeAddr(), makeMetrics()) defer func() { - stopper.Stop(context.TODO()) + stopper.Stop(context.Background()) if c != <-disconnected { t.Errorf("expected client disconnect after remote close") } @@ -218,7 +218,7 @@ func TestClientGossip(t *testing.T) { func TestClientGossipMetrics(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all gossipers (this ensures that the gossipers // don't talk to servers from unrelated tests by accident). @@ -298,7 +298,7 @@ func TestClientNodeID(t *testing.T) { disconnected <- c defer func() { - stopper.Stop(context.TODO()) + stopper.Stop(context.Background()) if c != <-disconnected { t.Errorf("expected client disconnect after remote close") } @@ -336,7 +336,7 @@ func verifyServerMaps(g *Gossip, expCount int) bool { func TestClientDisconnectLoopback(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) local := startGossip(uuid.Nil, 1, stopper, t, metric.NewRegistry()) local.mu.Lock() lAddr := local.mu.is.NodeAddr @@ -358,7 +358,7 @@ func TestClientDisconnectLoopback(t *testing.T) { func TestClientDisconnectRedundant(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all gossipers (this ensures that the gossipers // don't talk to servers from unrelated tests by accident). @@ -417,7 +417,7 @@ func TestClientDisconnectRedundant(t *testing.T) { func TestClientDisallowMultipleConns(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all gossipers (this ensures that the gossipers // don't talk to servers from unrelated tests by accident). @@ -459,7 +459,7 @@ func TestClientDisallowMultipleConns(t *testing.T) { func TestClientRegisterWithInitNodeID(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) // Shared cluster ID by all gossipers (this ensures that the gossipers @@ -537,7 +537,7 @@ func (tr *testResolver) GetAddress() (net.Addr, error) { func TestClientRetryBootstrap(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all gossipers (this ensures that the gossipers // don't talk to servers from unrelated tests by accident). @@ -568,7 +568,7 @@ func TestClientRetryBootstrap(t *testing.T) { func TestClientForwardUnresolved(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) const nodeID = 1 local := startGossip(uuid.Nil, nodeID, stopper, t, metric.NewRegistry()) addr := local.GetNodeAddr() @@ -589,7 +589,7 @@ func TestClientForwardUnresolved(t *testing.T) { local.outgoing.addPlaceholder() // so that the resolvePlaceholder in handleResponse doesn't fail local.mu.Unlock() if err := client.handleResponse( - context.TODO(), local, reply, + context.Background(), local, reply, ); !testutils.IsError(err, "received forward") { t.Fatal(err) } diff --git a/pkg/gossip/convergence_test.go b/pkg/gossip/convergence_test.go index 34e922ea16f9..178c1522fc59 100644 --- a/pkg/gossip/convergence_test.go +++ b/pkg/gossip/convergence_test.go @@ -58,7 +58,7 @@ func TestConvergence(t *testing.T) { } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) network := simulation.NewNetwork(stopper, testConvergenceSize, true, zonepb.DefaultZoneConfigRef()) @@ -91,7 +91,7 @@ func TestNetworkReachesEquilibrium(t *testing.T) { } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) network := simulation.NewNetwork(stopper, testReachesEquilibriumSize, true, zonepb.DefaultZoneConfigRef()) diff --git a/pkg/gossip/gossip_test.go b/pkg/gossip/gossip_test.go index 172eacffad87..3fbbbae4ed66 100644 --- a/pkg/gossip/gossip_test.go +++ b/pkg/gossip/gossip_test.go @@ -43,7 +43,7 @@ import ( func TestGossipInfoStore(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) g := NewTest(1, rpcContext, rpc.NewServer(rpcContext), stopper, metric.NewRegistry(), zonepb.DefaultZoneConfigRef()) @@ -64,7 +64,7 @@ func TestGossipInfoStore(t *testing.T) { func TestGossipMoveNode(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) g := NewTest(1, rpcContext, rpc.NewServer(rpcContext), stopper, metric.NewRegistry(), zonepb.DefaultZoneConfigRef()) @@ -108,7 +108,7 @@ func TestGossipMoveNode(t *testing.T) { func TestGossipGetNextBootstrapAddress(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) resolverSpecs := []string{ "127.0.0.1:9000", @@ -153,7 +153,7 @@ func TestGossipGetNextBootstrapAddress(t *testing.T) { func TestGossipLocalityResolver(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -241,7 +241,7 @@ func TestGossipRaceLogStatus(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all gossipers (this ensures that the gossipers // don't talk to servers from unrelated tests by accident). clusterID := uuid.MakeV4() @@ -281,7 +281,7 @@ func TestGossipOutgoingLimitEnforced(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // This test has an implicit dependency on the maxPeers logic deciding that // maxPeers is 3 for a 5-node cluster, so let's go ahead and make that @@ -336,7 +336,7 @@ func TestGossipOutgoingLimitEnforced(t *testing.T) { t.Fatal(err) } for range peers { - local.tightenNetwork(context.TODO()) + local.tightenNetwork(context.Background()) } if outgoing := local.outgoing.gauge.Value(); outgoing > int64(maxPeers) { @@ -353,7 +353,7 @@ func TestGossipMostDistant(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) connect := func(from, to *Gossip) { to.mu.Lock() @@ -473,7 +473,7 @@ func TestGossipNoForwardSelf(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all gossipers (this ensures that the gossipers // don't talk to servers from unrelated tests by accident). @@ -558,7 +558,7 @@ func TestGossipCullNetwork(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all gossipers (this ensures that the gossipers // don't talk to servers from unrelated tests by accident). @@ -602,7 +602,7 @@ func TestGossipOrphanedStallDetection(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all gossipers (this ensures that the gossipers // don't talk to servers from unrelated tests by accident). @@ -649,7 +649,7 @@ func TestGossipOrphanedStallDetection(t *testing.T) { local.bootstrap() local.manage() - peerStopper.Stop(context.TODO()) + peerStopper.Stop(context.Background()) testutils.SucceedsSoon(t, func() error { for _, peerID := range local.Outgoing() { @@ -661,7 +661,7 @@ func TestGossipOrphanedStallDetection(t *testing.T) { }) peerStopper = stop.NewStopper() - defer peerStopper.Stop(context.TODO()) + defer peerStopper.Stop(context.Background()) startGossipAtAddr(clusterID, peerNodeID, peerAddr, peerStopper, t, metric.NewRegistry()) testutils.SucceedsSoon(t, func() error { @@ -702,7 +702,7 @@ func TestGossipJoinTwoClusters(t *testing.T) { select { case <-stopper.ShouldQuiesce(): default: - stopper.Stop(context.TODO()) + stopper.Stop(context.Background()) } }() @@ -725,7 +725,7 @@ func TestGossipJoinTwoClusters(t *testing.T) { g = append(g, gnode) gnode.SetStallInterval(interval) gnode.SetBootstrapInterval(interval) - gnode.clusterID.Set(context.TODO(), clusterIDs[i]) + gnode.clusterID.Set(context.Background(), clusterIDs[i]) ln, err := netutil.ListenAndServeGRPC(stopper, server, util.IsolatedTestAddr) if err != nil { @@ -767,7 +767,7 @@ func TestGossipJoinTwoClusters(t *testing.T) { }) // Kill node 0 to force node 2 to bootstrap with node 1. - stoppers[0].Stop(context.TODO()) + stoppers[0].Stop(context.Background()) // Wait for twice the bootstrap interval, and verify that // node 2 still has not connected to node 1. time.Sleep(2 * interval) @@ -784,7 +784,7 @@ func TestGossipJoinTwoClusters(t *testing.T) { func TestGossipPropagation(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all gossipers (this ensures that the gossipers // don't talk to servers from unrelated tests by accident). @@ -878,7 +878,7 @@ func TestGossipLoopbackInfoPropagation(t *testing.T) { defer leaktest.AfterTest(t)() t.Skipf("#34494") stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all gossipers (this ensures that the gossipers // don't talk to servers from unrelated tests by accident). diff --git a/pkg/gossip/infostore_test.go b/pkg/gossip/infostore_test.go index 4566b7110eed..6b9e5eff77d3 100644 --- a/pkg/gossip/infostore_test.go +++ b/pkg/gossip/infostore_test.go @@ -37,7 +37,7 @@ var emptyAddr = util.MakeUnresolvedAddr("test", "") func newTestInfoStore() (*infoStore, *stop.Stopper) { stopper := stop.NewStopper() nc := &base.NodeIDContainer{} - nc.Set(context.TODO(), 1) + nc.Set(context.Background(), 1) is := newInfoStore(log.AmbientContext{Tracer: tracing.NewTracer()}, nc, emptyAddr, stopper) return is, stopper } @@ -47,7 +47,7 @@ func newTestInfoStore() (*infoStore, *stop.Stopper) { func TestZeroDuration(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) info := is.newInfo(nil, 0) if info.TTLStamp != math.MaxInt64 { t.Errorf("expected zero duration to get max TTLStamp: %d", info.TTLStamp) @@ -58,7 +58,7 @@ func TestZeroDuration(t *testing.T) { func TestNewInfo(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) info1 := is.newInfo(nil, time.Second) info2 := is.newInfo(nil, time.Second) if err := is.addInfo("a", info1); err != nil { @@ -77,7 +77,7 @@ func TestNewInfo(t *testing.T) { func TestInfoStoreGetInfo(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) i := is.newInfo(nil, time.Second) i.NodeID = 1 if err := is.addInfo("a", i); err != nil { @@ -101,7 +101,7 @@ func TestInfoStoreGetInfo(t *testing.T) { func TestInfoStoreGetInfoTTL(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) i := is.newInfo(nil, time.Nanosecond) if err := is.addInfo("a", i); err != nil { t.Error(err) @@ -117,7 +117,7 @@ func TestInfoStoreGetInfoTTL(t *testing.T) { func TestAddInfoSameKeyLessThanEqualTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) info1 := is.newInfo(nil, time.Second) if err := is.addInfo("a", info1); err != nil { t.Error(err) @@ -141,7 +141,7 @@ func TestAddInfoSameKeyLessThanEqualTimestamp(t *testing.T) { func TestAddInfoSameKeyGreaterTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) info1 := is.newInfo(nil, time.Second) info2 := is.newInfo(nil, time.Second) if err1, err2 := is.addInfo("a", info1), is.addInfo("a", info2); err1 != nil || err2 != nil { @@ -154,7 +154,7 @@ func TestAddInfoSameKeyGreaterTimestamp(t *testing.T) { func TestAddInfoSameKeyDifferentHops(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) info1 := is.newInfo(nil, time.Second) info1.Hops = 1 info2 := is.newInfo(nil, time.Second) @@ -240,7 +240,7 @@ func TestCombineInfosRatchetMonotonic(t *testing.T) { // Helper method creates an infostore with 10 infos. func createTestInfoStore(t *testing.T) *infoStore { is, stopper := newTestInfoStore() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) for i := 0; i < 10; i++ { infoA := is.newInfo(nil, time.Second) @@ -316,7 +316,7 @@ func TestInfoStoreMostDistant(t *testing.T) { roachpb.NodeID(3), } is, stopper := newTestInfoStore() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Start with one very distant info that shouldn't affect mostDistant // calculations because it isn't a node ID key. @@ -376,7 +376,7 @@ func TestLeastUseful(t *testing.T) { roachpb.NodeID(2), } is, stopper := newTestInfoStore() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) set := makeNodeSet(3, metric.NewGauge(metric.Metadata{Name: ""})) if is.leastUseful(set) != 0 { @@ -446,7 +446,7 @@ func (cr *callbackRecord) Keys() []string { func TestCallbacks(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) wg := &sync.WaitGroup{} cb1 := callbackRecord{wg: wg} cb2 := callbackRecord{wg: wg} @@ -555,7 +555,7 @@ func TestCallbacks(t *testing.T) { func TestRegisterCallback(t *testing.T) { defer leaktest.AfterTest(t)() is, stopper := newTestInfoStore() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) wg := &sync.WaitGroup{} cb := callbackRecord{wg: wg} diff --git a/pkg/gossip/resolver/resolver_test.go b/pkg/gossip/resolver/resolver_test.go index 63af3ae28459..66bdbf662765 100644 --- a/pkg/gossip/resolver/resolver_test.go +++ b/pkg/gossip/resolver/resolver_test.go @@ -138,7 +138,7 @@ func TestSRV(t *testing.T) { func() { defer TestingOverrideSRVLookupFn(tc.lookuper)() - resolvers, err := SRV(context.TODO(), tc.address) + resolvers, err := SRV(context.Background(), tc.address) if err != nil { t.Errorf("#%d: expected success, got err=%v", tcNum, err) diff --git a/pkg/gossip/storage_test.go b/pkg/gossip/storage_test.go index 31141922c88d..ef4538fcede6 100644 --- a/pkg/gossip/storage_test.go +++ b/pkg/gossip/storage_test.go @@ -95,7 +95,7 @@ func (s unresolvedAddrSlice) Swap(i, j int) { func TestGossipStorage(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) defaultZoneConfig := zonepb.DefaultZoneConfigRef() network := simulation.NewNetwork(stopper, 3, true, defaultZoneConfig) @@ -213,7 +213,7 @@ func TestGossipStorage(t *testing.T) { func TestGossipStorageCleanup(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) const numNodes = 3 network := simulation.NewNetwork(stopper, numNodes, false, zonepb.DefaultZoneConfigRef()) diff --git a/pkg/jobs/jobs_test.go b/pkg/jobs/jobs_test.go index 05328fad91d6..2687e532ec4f 100644 --- a/pkg/jobs/jobs_test.go +++ b/pkg/jobs/jobs_test.go @@ -125,7 +125,7 @@ func (expected *expectation) verify(id *int64, expectedStatus jobs.Status) error func TestJobsTableProgressFamily(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) @@ -826,7 +826,7 @@ func TestJobLifecycle(t *testing.T) { defer leaktest.AfterTest(t)() defer jobs.ResetConstructors()() - ctx := context.TODO() + ctx := context.Background() s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) @@ -1386,7 +1386,7 @@ func TestShowJobs(t *testing.T) { params, _ := tests.CreateTestServerParams() s, rawSQLDB, _ := serverutils.StartServer(t, params) sqlDB := sqlutils.MakeSQLRunner(rawSQLDB) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // row represents a row returned from crdb_internal.jobs, but // *not* a row in system.jobs. @@ -1530,7 +1530,7 @@ func TestShowAutomaticJobs(t *testing.T) { params, _ := tests.CreateTestServerParams() s, rawSQLDB, _ := serverutils.StartServer(t, params) sqlDB := sqlutils.MakeSQLRunner(rawSQLDB) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // row represents a row returned from crdb_internal.jobs, but // *not* a row in system.jobs. @@ -1621,7 +1621,7 @@ func TestShowJobsWithError(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Create at least 6 rows, ensuring 3 rows are corrupted. // Ensure there is at least one row in system.jobs. @@ -1773,7 +1773,7 @@ func TestShowJobWhenComplete(t *testing.T) { jobs.DefaultAdoptInterval = oldInterval }(jobs.DefaultAdoptInterval) jobs.DefaultAdoptInterval = 10 * time.Millisecond - ctx := context.TODO() + ctx := context.Background() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) registry := s.JobRegistry().(*jobs.Registry) @@ -2260,7 +2260,7 @@ func TestUnmigratedSchemaChangeJobs(t *testing.T) { }(jobs.DefaultAdoptInterval) jobs.DefaultAdoptInterval = 10 * time.Millisecond - ctx := context.TODO() + ctx := context.Background() s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) defer s.Stopper().Stop(ctx) diff --git a/pkg/jobs/registry_test.go b/pkg/jobs/registry_test.go index 524dd35c1f4a..d53f33437daf 100644 --- a/pkg/jobs/registry_test.go +++ b/pkg/jobs/registry_test.go @@ -211,7 +211,7 @@ func TestRegistryGC(t *testing.T) { desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "to_be_mutated") desc.Mutations = mutations if err := kvDB.Put( - context.TODO(), + context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), sqlbase.WrapDescriptor(desc), ); err != nil { @@ -224,7 +224,7 @@ func TestRegistryGC(t *testing.T) { desc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "to_be_mutated") desc.GCMutations = gcMutations if err := kvDB.Put( - context.TODO(), + context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), sqlbase.WrapDescriptor(desc), ); err != nil { @@ -242,7 +242,7 @@ func TestRegistryGC(t *testing.T) { desc.DropJobID = 0 } if err := kvDB.Put( - context.TODO(), + context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, desc.GetID()), sqlbase.WrapDescriptor(desc), ); err != nil { diff --git a/pkg/kv/bulk/sst_batcher_test.go b/pkg/kv/bulk/sst_batcher_test.go index fcabeb154693..18705b9f1510 100644 --- a/pkg/kv/bulk/sst_batcher_test.go +++ b/pkg/kv/bulk/sst_batcher_test.go @@ -332,7 +332,7 @@ func TestAddBigSpanningSSTWithSplits(t *testing.T) { t.Logf("Adding %dkb sst spanning %d splits from %v to %v", len(sst)/kb, len(splits), start, end) if _, err := bulk.AddSSTable( - context.TODO(), mock, start, end, sst, false /* disallowShadowing */, enginepb.MVCCStats{}, cluster.MakeTestingClusterSettings(), + context.Background(), mock, start, end, sst, false /* disallowShadowing */, enginepb.MVCCStats{}, cluster.MakeTestingClusterSettings(), ); err != nil { t.Fatal(err) } diff --git a/pkg/kv/client_test.go b/pkg/kv/client_test.go index 5e6936fee53f..6c2800bbc423 100644 --- a/pkg/kv/client_test.go +++ b/pkg/kv/client_test.go @@ -118,7 +118,7 @@ func TestClientRetryNonTxn(t *testing.T) { }, } s, _, _ := serverutils.StartServer(t, args) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) testCases := []struct { args roachpb.Request @@ -142,7 +142,7 @@ func TestClientRetryNonTxn(t *testing.T) { // doneCall signals when the non-txn read or write has completed. doneCall := make(chan error) count := 0 // keeps track of retries - err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if test.canPush { if err := txn.SetUserPriority(roachpb.MinUserPriority); err != nil { t.Fatal(err) @@ -155,7 +155,7 @@ func TestClientRetryNonTxn(t *testing.T) { } // On the first iteration, send the non-txn put or get. if count == 1 { - nonTxnCtx := context.TODO() + nonTxnCtx := context.Background() // The channel lets us pause txn until after the non-txn // method has run once. Use a channel length of size 1 to @@ -173,7 +173,7 @@ func TestClientRetryNonTxn(t *testing.T) { } notify <- struct{}{} if err != nil { - log.Errorf(context.TODO(), "error on non-txn request: %s", err) + log.Errorf(context.Background(), "error on non-txn request: %s", err) } doneCall <- errors.Wrapf( err, "%d: expected success on non-txn call to %s", @@ -208,7 +208,7 @@ func TestClientRetryNonTxn(t *testing.T) { } // Get the current value to verify whether the txn happened first. - gr, err := db.Get(context.TODO(), key) + gr, err := db.Get(context.Background(), key) if err != nil { t.Fatalf("%d: expected success getting %q: %s", i, key, err) } @@ -233,14 +233,14 @@ func TestClientRetryNonTxn(t *testing.T) { func TestClientRunTransaction(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := createTestClient(t, s) for _, commit := range []bool{true, false} { value := []byte("value") key := []byte(fmt.Sprintf("%s/key-%t", testUser, commit)) - err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { // Put transactional value. if err := txn.Put(ctx, key, value); err != nil { return err @@ -272,7 +272,7 @@ func TestClientRunTransaction(t *testing.T) { } // Verify the value is now visible on commit == true, and not visible otherwise. - gr, err := db.Get(context.TODO(), key) + gr, err := db.Get(context.Background(), key) if commit { if err != nil || gr.Value == nil || !bytes.Equal(gr.ValueBytes(), value) { t.Errorf("expected success reading value: %+v, %v", gr.Value, err) @@ -290,7 +290,7 @@ func TestClientRunTransaction(t *testing.T) { func TestClientGetAndPutProto(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := createTestClient(t, s) zoneConfig := zonepb.ZoneConfig{ @@ -301,12 +301,12 @@ func TestClientGetAndPutProto(t *testing.T) { } key := roachpb.Key(testUser + "/zone-config") - if err := db.Put(context.TODO(), key, &zoneConfig); err != nil { + if err := db.Put(context.Background(), key, &zoneConfig); err != nil { t.Fatalf("unable to put proto: %s", err) } var readZoneConfig zonepb.ZoneConfig - if err := db.GetProto(context.TODO(), key, &readZoneConfig); err != nil { + if err := db.GetProto(context.Background(), key, &readZoneConfig); err != nil { t.Fatalf("unable to get proto: %s", err) } if !proto.Equal(&zoneConfig, &readZoneConfig) { @@ -319,14 +319,14 @@ func TestClientGetAndPutProto(t *testing.T) { func TestClientGetAndPut(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := createTestClient(t, s) value := []byte("value") - if err := db.Put(context.TODO(), testUser+"/key", value); err != nil { + if err := db.Put(context.Background(), testUser+"/key", value); err != nil { t.Fatalf("unable to put value: %s", err) } - gr, err := db.Get(context.TODO(), testUser+"/key") + gr, err := db.Get(context.Background(), testUser+"/key") if err != nil { t.Fatalf("unable to get value: %s", err) } @@ -341,14 +341,14 @@ func TestClientGetAndPut(t *testing.T) { func TestClientPutInline(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := createTestClient(t, s) value := []byte("value") - if err := db.PutInline(context.TODO(), testUser+"/key", value); err != nil { + if err := db.PutInline(context.Background(), testUser+"/key", value); err != nil { t.Fatalf("unable to put value: %s", err) } - gr, err := db.Get(context.TODO(), testUser+"/key") + gr, err := db.Get(context.Background(), testUser+"/key") if err != nil { t.Fatalf("unable to get value: %s", err) } @@ -368,22 +368,22 @@ func TestClientPutInline(t *testing.T) { func TestClientEmptyValues(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := createTestClient(t, s) - if err := db.Put(context.TODO(), testUser+"/a", []byte{}); err != nil { + if err := db.Put(context.Background(), testUser+"/a", []byte{}); err != nil { t.Error(err) } - if gr, err := db.Get(context.TODO(), testUser+"/a"); err != nil { + if gr, err := db.Get(context.Background(), testUser+"/a"); err != nil { t.Error(err) } else if bytes := gr.ValueBytes(); bytes == nil || len(bytes) != 0 { t.Errorf("expected non-nil empty byte slice; got %q", bytes) } - if _, err := db.Inc(context.TODO(), testUser+"/b", 0); err != nil { + if _, err := db.Inc(context.Background(), testUser+"/b", 0); err != nil { t.Error(err) } - if gr, err := db.Get(context.TODO(), testUser+"/b"); err != nil { + if gr, err := db.Get(context.Background(), testUser+"/b"); err != nil { t.Error(err) } else if gr.Value == nil { t.Errorf("expected non-nil integer") @@ -397,9 +397,9 @@ func TestClientEmptyValues(t *testing.T) { func TestClientBatch(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := createTestClient(t, s) - ctx := context.TODO() + ctx := context.Background() keys := []roachpb.Key{} { @@ -526,7 +526,7 @@ func TestClientBatch(t *testing.T) { // Induce a non-transactional failure. { key := roachpb.Key("conditionalPut") - if err := db.Put(context.TODO(), key, "hello"); err != nil { + if err := db.Put(context.Background(), key, "hello"); err != nil { t.Fatal(err) } @@ -551,7 +551,7 @@ func TestClientBatch(t *testing.T) { // Induce a transactional failure. { key := roachpb.Key("conditionalPut") - if err := db.Put(context.TODO(), key, "hello"); err != nil { + if err := db.Put(context.Background(), key, "hello"); err != nil { t.Fatal(err) } @@ -597,7 +597,7 @@ func concurrentIncrements(db *kv.DB, t *testing.T) { // Wait until the other goroutines are running. wgStart.Wait() - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { txn.SetDebugName(fmt.Sprintf("test-%d", i)) // Retrieve the other key. @@ -630,7 +630,7 @@ func concurrentIncrements(db *kv.DB, t *testing.T) { results := []int64(nil) for i := 0; i < 2; i++ { readKey := []byte(fmt.Sprintf(testUser+"/value-%d", i)) - gr, err := db.Get(context.TODO(), readKey) + gr, err := db.Get(context.Background(), readKey) if err != nil { t.Fatal(err) } @@ -653,13 +653,13 @@ func concurrentIncrements(db *kv.DB, t *testing.T) { func TestConcurrentIncrements(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := createTestClient(t, s) // Convenience loop: Crank up this number for testing this // more often. It'll increase test duration though. for k := 0; k < 5; k++ { - if err := db.DelRange(context.TODO(), testUser+"/value-0", testUser+"/value-1x"); err != nil { + if err := db.DelRange(context.Background(), testUser+"/value-0", testUser+"/value-1x"); err != nil { t.Fatalf("%d: unable to clean up: %s", k, err) } concurrentIncrements(db, t) @@ -691,7 +691,7 @@ func TestReadConsistencyTypes(t *testing.T) { clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) - ctx := context.TODO() + ctx := context.Background() prepWithRC := func() *kv.Batch { b := &kv.Batch{} @@ -736,7 +736,7 @@ func TestReadConsistencyTypes(t *testing.T) { func TestTxn_ReverseScan(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := createTestClient(t, s) keys := []roachpb.Key{} @@ -746,12 +746,12 @@ func TestTxn_ReverseScan(t *testing.T) { keys = append(keys, key) b.Put(key, i) } - if err := db.Run(context.TODO(), b); err != nil { + if err := db.Run(context.Background(), b); err != nil { t.Error(err) } // Try reverse scans for all keys. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { rows, err := txn.ReverseScan(ctx, testUser+"/key/00", testUser+"/key/10", 100) if err != nil { return err @@ -765,7 +765,7 @@ func TestTxn_ReverseScan(t *testing.T) { } // Try reverse scans for half of the keys. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { rows, err := txn.ReverseScan(ctx, testUser+"/key/00", testUser+"/key/05", 100) if err != nil { return err @@ -777,7 +777,7 @@ func TestTxn_ReverseScan(t *testing.T) { } // Try limit maximum rows. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { rows, err := txn.ReverseScan(ctx, testUser+"/key/00", testUser+"/key/05", 3) if err != nil { return err @@ -789,7 +789,7 @@ func TestTxn_ReverseScan(t *testing.T) { } // Try reverse scan with the same start and end key. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { rows, err := txn.ReverseScan(ctx, testUser+"/key/00", testUser+"/key/00", 100) if len(rows) > 0 { t.Errorf("expected empty, got %v", rows) @@ -802,7 +802,7 @@ func TestTxn_ReverseScan(t *testing.T) { } // Try reverse scan with non-existent key. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { rows, err := txn.ReverseScan(ctx, testUser+"/key/aa", testUser+"/key/bb", 100) if err != nil { return err @@ -895,9 +895,9 @@ func TestIntentCleanupUnblocksReaders(t *testing.T) { defer leaktest.AfterTest(t)() s, _, db := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) - ctx := context.TODO() + ctx := context.Background() key := roachpb.Key("a") // We're going to repeatedly lay down an intent and then race a reader with @@ -977,7 +977,7 @@ func TestRollbackWithCanceledContextBasic(t *testing.T) { // weren't blocked on an intent or, if we were, that intent was cleaned up by // someone else than the would-be pusher fast. Similar in // TestSessionFinishRollsBackTxn. - if _, err := db.Get(context.TODO(), key); err != nil { + if _, err := db.Get(context.Background(), key); err != nil { t.Fatal(err) } dur := timeutil.Since(start) @@ -1035,7 +1035,7 @@ func TestRollbackWithCanceledContextInsidious(t *testing.T) { // weren't blocked on an intent or, if we were, that intent was cleaned up by // someone else than the would-be pusher fast. Similar in // TestSessionFinishRollsBackTxn. - if _, err := db.Get(context.TODO(), key); err != nil { + if _, err := db.Get(context.Background(), key); err != nil { t.Fatal(err) } dur := timeutil.Since(start) diff --git a/pkg/kv/db_test.go b/pkg/kv/db_test.go index 362fa8aa5559..859f4ef8edb3 100644 --- a/pkg/kv/db_test.go +++ b/pkg/kv/db_test.go @@ -75,9 +75,9 @@ func checkLen(t *testing.T, expected, count int) { func TestDB_Get(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) - result, err := db.Get(context.TODO(), "aa") + result, err := db.Get(context.Background(), "aa") if err != nil { t.Fatal(err) } @@ -87,10 +87,10 @@ func TestDB_Get(t *testing.T) { func TestDB_Put(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() - if err := db.Put(context.TODO(), "aa", "1"); err != nil { + if err := db.Put(context.Background(), "aa", "1"); err != nil { t.Fatal(err) } result, err := db.Get(ctx, "aa") @@ -103,8 +103,8 @@ func TestDB_Put(t *testing.T) { func TestDB_CPut(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() if err := db.Put(ctx, "aa", "1"); err != nil { t.Fatal(err) @@ -149,8 +149,8 @@ func TestDB_CPut(t *testing.T) { func TestDB_InitPut(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() if err := db.InitPut(ctx, "aa", "1", false); err != nil { t.Fatal(err) @@ -180,8 +180,8 @@ func TestDB_InitPut(t *testing.T) { func TestDB_Inc(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() if _, err := db.Inc(ctx, "aa", 100); err != nil { t.Fatal(err) @@ -196,12 +196,12 @@ func TestDB_Inc(t *testing.T) { func TestBatch(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) b := &kv.Batch{} b.Get("aa") b.Put("bb", "2") - if err := db.Run(context.TODO(), b); err != nil { + if err := db.Run(context.Background(), b); err != nil { t.Fatal(err) } @@ -215,16 +215,16 @@ func TestBatch(t *testing.T) { func TestDB_Scan(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) b := &kv.Batch{} b.Put("aa", "1") b.Put("ab", "2") b.Put("bb", "3") - if err := db.Run(context.TODO(), b); err != nil { + if err := db.Run(context.Background(), b); err != nil { t.Fatal(err) } - rows, err := db.Scan(context.TODO(), "a", "b", 100) + rows, err := db.Scan(context.Background(), "a", "b", 100) if err != nil { t.Fatal(err) } @@ -240,16 +240,16 @@ func TestDB_Scan(t *testing.T) { func TestDB_ScanForUpdate(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) b := &kv.Batch{} b.Put("aa", "1") b.Put("ab", "2") b.Put("bb", "3") - if err := db.Run(context.TODO(), b); err != nil { + if err := db.Run(context.Background(), b); err != nil { t.Fatal(err) } - rows, err := db.ScanForUpdate(context.TODO(), "a", "b", 100) + rows, err := db.ScanForUpdate(context.Background(), "a", "b", 100) if err != nil { t.Fatal(err) } @@ -265,16 +265,16 @@ func TestDB_ScanForUpdate(t *testing.T) { func TestDB_ReverseScan(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) b := &kv.Batch{} b.Put("aa", "1") b.Put("ab", "2") b.Put("bb", "3") - if err := db.Run(context.TODO(), b); err != nil { + if err := db.Run(context.Background(), b); err != nil { t.Fatal(err) } - rows, err := db.ReverseScan(context.TODO(), "ab", "c", 100) + rows, err := db.ReverseScan(context.Background(), "ab", "c", 100) if err != nil { t.Fatal(err) } @@ -290,16 +290,16 @@ func TestDB_ReverseScan(t *testing.T) { func TestDB_ReverseScanForUpdate(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) b := &kv.Batch{} b.Put("aa", "1") b.Put("ab", "2") b.Put("bb", "3") - if err := db.Run(context.TODO(), b); err != nil { + if err := db.Run(context.Background(), b); err != nil { t.Fatal(err) } - rows, err := db.ReverseScanForUpdate(context.TODO(), "ab", "c", 100) + rows, err := db.ReverseScanForUpdate(context.Background(), "ab", "c", 100) if err != nil { t.Fatal(err) } @@ -315,13 +315,13 @@ func TestDB_ReverseScanForUpdate(t *testing.T) { func TestDB_TxnIterate(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) b := &kv.Batch{} b.Put("aa", "1") b.Put("ab", "2") b.Put("bb", "3") - if err := db.Run(context.TODO(), b); err != nil { + if err := db.Run(context.Background(), b); err != nil { t.Fatal(err) } @@ -332,10 +332,10 @@ func TestDB_TxnIterate(t *testing.T) { var rows []kv.KeyValue = nil var p int for _, c := range tc { - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { p = 0 rows = make([]kv.KeyValue, 0) - return txn.Iterate(context.TODO(), "a", "b", c.pageSize, + return txn.Iterate(context.Background(), "a", "b", c.pageSize, func(rs []kv.KeyValue) error { p++ rows = append(rows, rs...) @@ -360,19 +360,19 @@ func TestDB_TxnIterate(t *testing.T) { func TestDB_Del(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) b := &kv.Batch{} b.Put("aa", "1") b.Put("ab", "2") b.Put("ac", "3") - if err := db.Run(context.TODO(), b); err != nil { + if err := db.Run(context.Background(), b); err != nil { t.Fatal(err) } - if err := db.Del(context.TODO(), "ab"); err != nil { + if err := db.Del(context.Background(), "ab"); err != nil { t.Fatal(err) } - rows, err := db.Scan(context.TODO(), "a", "b", 100) + rows, err := db.Scan(context.Background(), "a", "b", 100) if err != nil { t.Fatal(err) } @@ -387,9 +387,9 @@ func TestDB_Del(t *testing.T) { func TestTxn_Commit(t *testing.T) { defer leaktest.AfterTest(t)() s, db := setup(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) - err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Put("aa", "1") b.Put("ab", "2") @@ -402,7 +402,7 @@ func TestTxn_Commit(t *testing.T) { b := &kv.Batch{} b.Get("aa") b.Get("ab") - if err := db.Run(context.TODO(), b); err != nil { + if err := db.Run(context.Background(), b); err != nil { t.Fatal(err) } expected := map[string][]byte{ @@ -415,10 +415,10 @@ func TestTxn_Commit(t *testing.T) { func TestDB_Put_insecure(t *testing.T) { defer leaktest.AfterTest(t)() s, _, db := serverutils.StartServer(t, base.TestServerArgs{Insecure: true}) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() - if err := db.Put(context.TODO(), "aa", "1"); err != nil { + if err := db.Put(context.Background(), "aa", "1"); err != nil { t.Fatal(err) } result, err := db.Get(ctx, "aa") diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go index 9908d380da05..27416c71a419 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_server_test.go @@ -72,7 +72,7 @@ func startNoSplitMergeServer(t *testing.T) (serverutils.TestServerInterface, *kv func TestRangeLookupWithOpenTransaction(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitMergeServer(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Create an intent on the meta1 record by writing directly to the // engine. @@ -115,7 +115,7 @@ func TestRangeLookupWithOpenTransaction(t *testing.T) { // intent error. If it did, it would go into a deadloop attempting // to push the transaction, which in turn requires another range // lookup, etc, ad nauseam. - if _, err := db.Get(context.TODO(), "a"); err != nil { + if _, err := db.Get(context.Background(), "a"); err != nil { t.Fatal(err) } } @@ -328,8 +328,8 @@ func checkReverseScanResults( func TestMultiRangeBoundedBatchScan(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitMergeServer(t) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() db := s.DB() splits := []string{"a", "b", "c", "d", "e", "f"} @@ -519,7 +519,7 @@ func TestMultiRangeBoundedBatchScan(t *testing.T) { func TestMultiRangeBoundedBatchScanPartialResponses(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitMergeServer(t) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) db := s.DB() @@ -763,7 +763,7 @@ func checkResumeSpanDelRangeResults( func TestMultiRangeBoundedBatchDelRange(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitMergeServer(t) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) db := s.DB() @@ -824,7 +824,7 @@ func TestMultiRangeBoundedBatchDelRange(t *testing.T) { func TestMultiRangeBoundedBatchDelRangePartialResponses(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitMergeServer(t) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) db := s.DB() @@ -1012,7 +1012,7 @@ func TestMultiRangeBoundedBatchDelRangePartialResponses(t *testing.T) { func TestMultiRangeBoundedBatchDelRangeBoundary(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitMergeServer(t) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) db := s.DB() @@ -1058,7 +1058,7 @@ func TestMultiRangeBoundedBatchDelRangeBoundary(t *testing.T) { func TestMultiRangeEmptyAfterTruncate(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitMergeServer(t) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) db := s.DB() if err := setupMultipleRanges(ctx, db, "c", "d"); err != nil { @@ -1081,7 +1081,7 @@ func TestMultiRangeEmptyAfterTruncate(t *testing.T) { func TestMultiRequestBatchWithFwdAndReverseRequests(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitMergeServer(t) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) db := s.DB() if err := setupMultipleRanges(ctx, db, "a", "b"); err != nil { @@ -1103,7 +1103,7 @@ func TestMultiRequestBatchWithFwdAndReverseRequests(t *testing.T) { func TestMultiRangeScanReverseScanDeleteResolve(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitMergeServer(t) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) db := s.DB() if err := setupMultipleRanges(ctx, db, "b"); err != nil { @@ -1167,7 +1167,7 @@ func TestMultiRangeScanReverseScanInconsistent(t *testing.T) { } { t.Run(rc.String(), func(t *testing.T) { s, _ := startNoSplitMergeServer(t) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) db := s.DB() if err := setupMultipleRanges(ctx, db, "b"); err != nil { @@ -1255,13 +1255,13 @@ func TestMultiRangeScanReverseScanInconsistent(t *testing.T) { func TestParallelSender(t *testing.T) { defer leaktest.AfterTest(t)() s, db := startNoSplitMergeServer(t) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() // Split into multiple ranges. splitKeys := []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"} for _, key := range splitKeys { - if err := db.AdminSplit(context.TODO(), key, key, hlc.MaxTimestamp /* expirationTime */); err != nil { + if err := db.AdminSplit(context.Background(), key, key, hlc.MaxTimestamp /* expirationTime */); err != nil { t.Fatal(err) } } @@ -1288,7 +1288,7 @@ func TestParallelSender(t *testing.T) { psCount = newPSCount // Scan across all rows. - if rows, err := db.Scan(context.TODO(), "a", "z", 0); err != nil { + if rows, err := db.Scan(context.Background(), "a", "z", 0); err != nil { t.Fatalf("unexpected error on Scan: %s", err) } else if l := len(rows); l != len(splitKeys) { t.Fatalf("expected %d rows; got %d", len(splitKeys), l) @@ -1306,13 +1306,13 @@ func initReverseScanTestEnv(s serverutils.TestServerInterface, t *testing.T) *kv // ["", "b"),["b", "e") ,["e", "g") and ["g", "\xff\xff"). for _, key := range []string{"b", "e", "g"} { // Split the keyspace at the given key. - if err := db.AdminSplit(context.TODO(), key, key, hlc.MaxTimestamp /* expirationTime */); err != nil { + if err := db.AdminSplit(context.Background(), key, key, hlc.MaxTimestamp /* expirationTime */); err != nil { t.Fatal(err) } } // Write keys before, at, and after the split key. for _, key := range []string{"a", "b", "c", "d", "e", "f", "g", "h"} { - if err := db.Put(context.TODO(), key, "value"); err != nil { + if err := db.Put(context.Background(), key, "value"); err != nil { t.Fatal(err) } } @@ -1324,9 +1324,9 @@ func initReverseScanTestEnv(s serverutils.TestServerInterface, t *testing.T) *kv func TestSingleRangeReverseScan(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitMergeServer(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := initReverseScanTestEnv(s, t) - ctx := context.TODO() + ctx := context.Background() // Case 1: Request.EndKey is in the middle of the range. if rows, err := db.ReverseScan(ctx, "b", "d", 0); err != nil { @@ -1369,9 +1369,9 @@ func TestSingleRangeReverseScan(t *testing.T) { func TestMultiRangeReverseScan(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitMergeServer(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := initReverseScanTestEnv(s, t) - ctx := context.TODO() + ctx := context.Background() // Case 1: Request.EndKey is in the middle of the range. if rows, pErr := db.ReverseScan(ctx, "a", "d", 0); pErr != nil { @@ -1399,16 +1399,16 @@ func TestMultiRangeReverseScan(t *testing.T) { func TestBatchPutWithConcurrentSplit(t *testing.T) { defer leaktest.AfterTest(t)() s, db := startNoSplitMergeServer(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Split first using the default client and scan to make sure that // the range descriptor cache reflects the split. for _, key := range []string{"b", "f"} { - if err := db.AdminSplit(context.TODO(), key, key, hlc.MaxTimestamp /* expirationTime */); err != nil { + if err := db.AdminSplit(context.Background(), key, key, hlc.MaxTimestamp /* expirationTime */); err != nil { t.Fatal(err) } } - if rows, err := db.Scan(context.TODO(), "a", "z", 0); err != nil { + if rows, err := db.Scan(context.Background(), "a", "z", 0); err != nil { t.Fatal(err) } else if l := len(rows); l != 0 { t.Fatalf("expected empty keyspace; got %d rows", l) @@ -1445,7 +1445,7 @@ func TestBatchPutWithConcurrentSplit(t *testing.T) { for i, key := range []string{"a1", "b1", "c1", "d1", "f1"} { b.Put(key, fmt.Sprintf("value-%d", i)) } - if err := db.Run(context.TODO(), b); err != nil { + if err := db.Run(context.Background(), b); err != nil { t.Fatal(err) } } @@ -1455,17 +1455,17 @@ func TestBatchPutWithConcurrentSplit(t *testing.T) { func TestReverseScanWithSplitAndMerge(t *testing.T) { defer leaktest.AfterTest(t)() s, _ := startNoSplitMergeServer(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := initReverseScanTestEnv(s, t) // Case 1: An encounter with a range split. // Split the range ["b", "e") at "c". - if err := db.AdminSplit(context.TODO(), "c", "c", hlc.MaxTimestamp /* expirationTime */); err != nil { + if err := db.AdminSplit(context.Background(), "c", "c", hlc.MaxTimestamp /* expirationTime */); err != nil { t.Fatal(err) } // The ReverseScan will run into a stale descriptor. - if rows, err := db.ReverseScan(context.TODO(), "a", "d", 0); err != nil { + if rows, err := db.ReverseScan(context.Background(), "a", "d", 0); err != nil { t.Fatalf("unexpected error on ReverseScan: %s", err) } else if l := len(rows); l != 3 { t.Errorf("expected 3 rows; got %d", l) @@ -1473,10 +1473,10 @@ func TestReverseScanWithSplitAndMerge(t *testing.T) { // Case 2: encounter with range merge . // Merge the range ["e", "g") and ["g", "\xff\xff") . - if err := db.AdminMerge(context.TODO(), "e"); err != nil { + if err := db.AdminMerge(context.Background(), "e"); err != nil { t.Fatal(err) } - if rows, err := db.ReverseScan(context.TODO(), "d", "g", 0); err != nil { + if rows, err := db.ReverseScan(context.Background(), "d", "g", 0); err != nil { t.Fatalf("unexpected error on ReverseScan: %s", err) } else if l := len(rows); l != 3 { t.Errorf("expected 3 rows; got %d", l) @@ -1489,8 +1489,8 @@ func TestBadRequest(t *testing.T) { "I suspect the reason is that there is no longer a single Range " + "that spans [KeyMin, z), so we're not hitting the error.") s, db := startNoSplitMergeServer(t) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() // Write key "a". if err := db.Put(ctx, "a", "value"); err != nil { @@ -1568,7 +1568,7 @@ func TestPropagateTxnOnError(t *testing.T) { s, _, _ := serverutils.StartServer(t, base.TestServerArgs{Knobs: base.TestingKnobs{Store: &storeKnobs}}) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) db := s.DB() @@ -1734,7 +1734,7 @@ func TestAsyncAbortPoisons(t *testing.T) { } // Run a high-priority txn that will abort the previous one. - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetUserPriority(roachpb.MaxUserPriority); err != nil { return err } diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_test.go index 3e16002ab10a..65ff53e147d4 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_test.go @@ -200,7 +200,7 @@ func newNodeDesc(nodeID roachpb.NodeID) *roachpb.NodeDescriptor { func TestSendRPCOrder(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -359,7 +359,7 @@ func TestSendRPCOrder(t *testing.T) { ds := NewDistSender(cfg, g) for n, tc := range testCases { - log.Infof(context.TODO(), "testcase %d", n) + log.Infof(context.Background(), "testcase %d", n) verifyCall = makeVerifier(tc.expReplica) { @@ -378,11 +378,11 @@ func TestSendRPCOrder(t *testing.T) { } ds.leaseHolderCache.Update( - context.TODO(), rangeID, roachpb.StoreID(0), + context.Background(), rangeID, roachpb.StoreID(0), ) if tc.leaseHolder > 0 { ds.leaseHolderCache.Update( - context.TODO(), rangeID, descriptor.InternalReplicas[tc.leaseHolder-1].StoreID, + context.Background(), rangeID, descriptor.InternalReplicas[tc.leaseHolder-1].StoreID, ) } @@ -496,7 +496,7 @@ var threeReplicaMockRangeDescriptorDB = mockRangeDescriptorDBForDescs( func TestImmutableBatchArgs(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -554,7 +554,7 @@ func TestImmutableBatchArgs(t *testing.T) { func TestRetryOnNotLeaseHolderError(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -606,7 +606,7 @@ func TestRetryOnNotLeaseHolderError(t *testing.T) { t.Errorf("The command did not retry") } rangeID := roachpb.RangeID(2) - if cur, ok := ds.leaseHolderCache.Lookup(context.TODO(), rangeID); !ok { + if cur, ok := ds.leaseHolderCache.Lookup(context.Background(), rangeID); !ok { t.Errorf("lease holder cache was not updated: expected %+v", leaseHolder) } else if cur != leaseHolder.StoreID { t.Errorf("lease holder cache was not updated: expected %d, got %d", leaseHolder.StoreID, cur) @@ -619,7 +619,7 @@ func TestRetryOnNotLeaseHolderError(t *testing.T) { func TestBackoffOnNotLeaseHolderErrorDuringTransfer(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -794,7 +794,7 @@ func TestDistSenderDownNodeEvictLeaseholder(t *testing.T) { func TestRetryOnDescriptorLookupError(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -844,7 +844,7 @@ func TestEvictOnFirstRangeGossip(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -960,7 +960,7 @@ func TestEvictCacheOnError(t *testing.T) { for i, tc := range testCases { stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -971,7 +971,7 @@ func TestEvictCacheOnError(t *testing.T) { } first := true - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(context.Background()) var testFn simpleSendFn = func( ctx context.Context, @@ -1013,14 +1013,14 @@ func TestEvictCacheOnError(t *testing.T) { Settings: cluster.MakeTestingClusterSettings(), } ds := NewDistSender(cfg, g) - ds.leaseHolderCache.Update(context.TODO(), 1, leaseHolder.StoreID) + ds.leaseHolderCache.Update(context.Background(), 1, leaseHolder.StoreID) key := roachpb.Key("a") put := roachpb.NewPut(key, roachpb.MakeValueFromString("value")) if _, pErr := kv.SendWrapped(ctx, ds, put); pErr != nil && !testutils.IsPError(pErr, errString) && !testutils.IsError(pErr.GoError(), ctx.Err().Error()) { t.Errorf("put encountered unexpected error: %s", pErr) } - if _, ok := ds.leaseHolderCache.Lookup(context.TODO(), 1); ok != !tc.shouldClearLeaseHolder { + if _, ok := ds.leaseHolderCache.Lookup(context.Background(), 1); ok != !tc.shouldClearLeaseHolder { t.Errorf("%d: lease holder cache eviction: shouldClearLeaseHolder=%t, but value is %t", i, tc.shouldClearLeaseHolder, ok) } if cachedDesc, err := ds.rangeCache.GetCachedRangeDescriptor(roachpb.RKey(key), false /* inverted */); err != nil { @@ -1034,7 +1034,7 @@ func TestEvictCacheOnError(t *testing.T) { func TestEvictCacheOnUnknownLeaseHolder(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -1099,7 +1099,7 @@ func TestEvictCacheOnUnknownLeaseHolder(t *testing.T) { func TestRetryOnWrongReplicaError(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -1197,7 +1197,7 @@ func TestRetryOnWrongReplicaError(t *testing.T) { func TestRetryOnWrongReplicaErrorWithSuggestion(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -1288,7 +1288,7 @@ func TestRetryOnWrongReplicaErrorWithSuggestion(t *testing.T) { func TestGetFirstRangeDescriptor(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) n := simulation.NewNetwork(stopper, 3, true, zonepb.DefaultZoneConfigRef()) for _, node := range n.Nodes { @@ -1339,7 +1339,7 @@ func TestGetFirstRangeDescriptor(t *testing.T) { func TestSendRPCRetry(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -1414,7 +1414,7 @@ func TestSendRPCRetry(t *testing.T) { func TestSendRPCRangeNotFoundError(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -1501,7 +1501,7 @@ func TestSendRPCRangeNotFoundError(t *testing.T) { func TestGetNodeDescriptor(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -1528,7 +1528,7 @@ func TestGetNodeDescriptor(t *testing.T) { func TestMultiRangeGapReverse(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -1620,7 +1620,7 @@ func TestMultiRangeGapReverse(t *testing.T) { func TestMultiRangeMergeStaleDescriptor(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -1724,7 +1724,7 @@ func TestMultiRangeMergeStaleDescriptor(t *testing.T) { func TestRangeLookupOptionOnReverseScan(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -1761,7 +1761,7 @@ func TestRangeLookupOptionOnReverseScan(t *testing.T) { func TestClockUpdateOnResponse(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -1818,7 +1818,7 @@ func TestClockUpdateOnResponse(t *testing.T) { func TestTruncateWithSpanAndDescriptor(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -1944,7 +1944,7 @@ func TestTruncateWithSpanAndDescriptor(t *testing.T) { func TestTruncateWithLocalSpanAndDescriptor(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -2072,7 +2072,7 @@ func TestTruncateWithLocalSpanAndDescriptor(t *testing.T) { func TestMultiRangeWithEndTxn(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -2272,7 +2272,7 @@ func TestMultiRangeWithEndTxn(t *testing.T) { func TestParallelCommitSplitFromQueryIntents(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -2391,7 +2391,7 @@ func TestParallelCommitSplitFromQueryIntents(t *testing.T) { func TestParallelCommitsDetectIntentMissingCause(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -2515,7 +2515,7 @@ func TestParallelCommitsDetectIntentMissingCause(t *testing.T) { func TestCountRanges(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -2616,7 +2616,7 @@ func TestSenderTransport(t *testing.T) { func TestGatewayNodeID(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -2669,7 +2669,7 @@ func TestGatewayNodeID(t *testing.T) { func TestMultipleErrorsMerged(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -2881,7 +2881,7 @@ func TestMultipleErrorsMerged(t *testing.T) { func TestErrorIndexAlignment(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -3023,7 +3023,7 @@ func TestErrorIndexAlignment(t *testing.T) { func TestCanSendToFollower(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) old := CanSendToFollower defer func() { CanSendToFollower = old }() @@ -3112,7 +3112,7 @@ func TestCanSendToFollower(t *testing.T) { ds := NewDistSender(cfg, g) ds.clusterID = &base.ClusterIDContainer{} // set 2 to be the leaseholder - ds.LeaseHolderCache().Update(context.TODO(), 2, 2) + ds.LeaseHolderCache().Update(context.Background(), 2, 2) if _, pErr := kv.SendWrappedWith(context.Background(), ds, c.header, c.msg); !testutils.IsPError(pErr, "boom") { t.Fatalf("%d: unexpected error: %v", i, pErr) } @@ -3127,7 +3127,7 @@ func TestCanSendToFollower(t *testing.T) { func TestEvictMetaRange(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) testutils.RunTrueAndFalse(t, "hasSuggestedRange", func(t *testing.T, hasSuggestedRange bool) { splitKey := keys.RangeMetaKey(roachpb.RKey("b")) @@ -3318,7 +3318,7 @@ func TestEvictMetaRange(t *testing.T) { func TestConnectionClass(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Create a mock range descriptor DB that can resolve made up meta1, node // liveness and user ranges. rDB := MockRangeDescriptorDB(func(key roachpb.RKey, _ bool) ( @@ -3402,7 +3402,7 @@ func TestConnectionClass(t *testing.T) { Key: key, }, }) - _, err := ds.Send(context.TODO(), ba) + _, err := ds.Send(context.Background(), ba) require.Nil(t, err) }) } @@ -3414,7 +3414,7 @@ func TestConnectionClass(t *testing.T) { func TestEvictionTokenCoalesce(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) initGen := int64(1) testUserRangeDescriptor := roachpb.RangeDescriptor{ diff --git a/pkg/kv/kvclient/kvcoord/leaseholder_cache_test.go b/pkg/kv/kvclient/kvcoord/leaseholder_cache_test.go index 9b309b8b4ee8..5f5ed80713cb 100644 --- a/pkg/kv/kvclient/kvcoord/leaseholder_cache_test.go +++ b/pkg/kv/kvclient/kvcoord/leaseholder_cache_test.go @@ -26,7 +26,7 @@ func staticSize(size int64) func() int64 { func TestLeaseHolderCache(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() cacheSize := (1 << 4) * defaultShards lc := NewLeaseHolderCache(staticSize(int64(cacheSize))) if repStoreID, ok := lc.Lookup(ctx, 12); ok { @@ -65,7 +65,7 @@ func TestLeaseHolderCache(t *testing.T) { func BenchmarkLeaseHolderCacheParallel(b *testing.B) { defer leaktest.AfterTest(b)() - ctx := context.TODO() + ctx := context.Background() cacheSize := (1 << 4) * defaultShards lc := NewLeaseHolderCache(staticSize(int64(cacheSize))) numRanges := 2 * len(lc.shards) diff --git a/pkg/kv/kvclient/kvcoord/range_cache_test.go b/pkg/kv/kvclient/kvcoord/range_cache_test.go index 3a96bb8ab3f2..68b7c89c4182 100644 --- a/pkg/kv/kvclient/kvcoord/range_cache_test.go +++ b/pkg/kv/kvclient/kvcoord/range_cache_test.go @@ -358,7 +358,7 @@ func TestRangeCacheAssumptions(t *testing.T) { func TestRangeCache(t *testing.T) { defer leaktest.AfterTest(t)() db := initTestDescriptorDB(t) - ctx := context.TODO() + ctx := context.Background() // Totally uncached range. // Retrieves [meta(min),meta(g)) and [a,b). @@ -452,7 +452,7 @@ func TestRangeCache(t *testing.T) { func TestRangeCacheCoalescedRequests(t *testing.T) { defer leaktest.AfterTest(t)() db := initTestDescriptorDB(t) - ctx := context.TODO() + ctx := context.Background() pauseLookupResumeAndAssert := func(key string, expected int64) { var wg sync.WaitGroup @@ -573,7 +573,7 @@ func TestRangeCacheContextCancellation(t *testing.T) { func TestRangeCacheDetectSplit(t *testing.T) { defer leaktest.AfterTest(t)() db := initTestDescriptorDB(t) - ctx := context.TODO() + ctx := context.Background() pauseLookupResumeAndAssert := func(key string, expected int64, evictToken *EvictionToken) { var wg sync.WaitGroup @@ -644,7 +644,7 @@ func TestRangeCacheDetectSplit(t *testing.T) { func TestRangeCacheDetectSplitReverseScan(t *testing.T) { defer leaktest.AfterTest(t)() db := initTestDescriptorDB(t) - ctx := context.TODO() + ctx := context.Background() // A request initially looks up the range descriptor ["a"-"b"). doLookup(ctx, db.cache, "aa") @@ -899,7 +899,7 @@ func TestRangeCacheHandleDoubleSplit(t *testing.T) { func TestRangeCacheUseIntents(t *testing.T) { defer leaktest.AfterTest(t)() db := initTestDescriptorDB(t) - ctx := context.TODO() + ctx := context.Background() // A request initially looks up the range descriptor ["a"-"b"). abDesc, evictToken := doLookup(ctx, db.cache, "aa") @@ -1037,7 +1037,7 @@ func TestRangeCacheClearOverlapping(t *testing.T) { // simply to increment the meta key for StartKey, not StartKey itself. func TestRangeCacheClearOverlappingMeta(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() firstDesc := roachpb.RangeDescriptor{ StartKey: roachpb.RKeyMin, @@ -1148,7 +1148,7 @@ func TestGetCachedRangeDescriptorInverted(t *testing.T) { func TestRangeCacheGeneration(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() descAM1 := &roachpb.RangeDescriptor{ StartKey: roachpb.RKey("a"), diff --git a/pkg/kv/kvclient/kvcoord/range_iter_test.go b/pkg/kv/kvclient/kvcoord/range_iter_test.go index c761ffed361d..dda122ff6bee 100644 --- a/pkg/kv/kvclient/kvcoord/range_iter_test.go +++ b/pkg/kv/kvclient/kvcoord/range_iter_test.go @@ -54,7 +54,7 @@ func init() { func TestRangeIterForward(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -89,7 +89,7 @@ func TestRangeIterForward(t *testing.T) { func TestRangeIterSeekForward(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -127,7 +127,7 @@ func TestRangeIterSeekForward(t *testing.T) { func TestRangeIterReverse(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -162,7 +162,7 @@ func TestRangeIterReverse(t *testing.T) { func TestRangeIterSeekReverse(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) diff --git a/pkg/kv/kvclient/kvcoord/send_test.go b/pkg/kv/kvclient/kvcoord/send_test.go index ceaadfaef3cf..6902cfd033fd 100644 --- a/pkg/kv/kvclient/kvcoord/send_test.go +++ b/pkg/kv/kvclient/kvcoord/send_test.go @@ -52,7 +52,7 @@ func TestSendToOneClient(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) @@ -122,7 +122,7 @@ func TestComplexScenarios(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) rpcContext := rpc.NewInsecureTestingContext(clock, stopper) diff --git a/pkg/kv/kvclient/kvcoord/split_test.go b/pkg/kv/kvclient/kvcoord/split_test.go index e4a32874761c..53100c21a167 100644 --- a/pkg/kv/kvclient/kvcoord/split_test.go +++ b/pkg/kv/kvclient/kvcoord/split_test.go @@ -62,7 +62,7 @@ func startTestWriter( return default: first := true - err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if first && txnChannel != nil { select { case txnChannel <- struct{}{}: @@ -99,7 +99,7 @@ func TestRangeSplitMeta(t *testing.T) { s := createTestDB(t) defer s.Stop() - ctx := context.TODO() + ctx := context.Background() splitKeys := []roachpb.RKey{roachpb.RKey("G"), keys.RangeMetaKey(roachpb.RKey("F")), keys.RangeMetaKey(roachpb.RKey("K")), keys.RangeMetaKey(roachpb.RKey("H"))} @@ -146,7 +146,7 @@ func TestRangeSplitsWithConcurrentTxns(t *testing.T) { go startTestWriter(s.DB, int64(i), 1<<7, &wg, &retries, txnChannel, done, t) } - ctx := context.TODO() + ctx := context.Background() // Execute the consecutive splits. for _, splitKey := range splitKeys { // Allow txns to start before initiating split. @@ -154,7 +154,7 @@ func TestRangeSplitsWithConcurrentTxns(t *testing.T) { <-txnChannel } log.Infof(ctx, "starting split at key %q...", splitKey) - if pErr := s.DB.AdminSplit(context.TODO(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); pErr != nil { + if pErr := s.DB.AdminSplit(context.Background(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); pErr != nil { t.Error(pErr) } log.Infof(ctx, "split at key %q complete", splitKey) @@ -200,7 +200,7 @@ func TestRangeSplitsWithWritePressure(t *testing.T) { wg.Add(1) go startTestWriter(s.DB, int64(0), 1<<15, &wg, nil, nil, done, t) - ctx := context.TODO() + ctx := context.Background() // Check that we split 5 times in allotted time. testutils.SucceedsSoon(t, func() error { @@ -243,7 +243,7 @@ func TestRangeSplitsWithSameKeyTwice(t *testing.T) { }) defer s.Stop() - ctx := context.TODO() + ctx := context.Background() splitKey := roachpb.Key("aa") log.Infof(ctx, "starting split at key %q...", splitKey) @@ -270,7 +270,7 @@ func TestRangeSplitsStickyBit(t *testing.T) { }) defer s.Stop() - ctx := context.TODO() + ctx := context.Background() splitKey := roachpb.RKey("aa") descKey := keys.RangeDescriptorKey(splitKey) diff --git a/pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go b/pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go index 977261e7f351..86e02ed2e489 100644 --- a/pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_coord_sender_test.go @@ -104,7 +104,7 @@ func TestTxnCoordSenderBeginTransaction(t *testing.T) { func TestTxnCoordSenderKeyRanges(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() ranges := []struct { start, end roachpb.Key }{ @@ -1072,7 +1072,7 @@ func TestTxnCommit(t *testing.T) { value := []byte("value") // Test a write txn commit. - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { key := []byte("key-commit") return txn.Put(ctx, key, value) }); err != nil { @@ -1081,7 +1081,7 @@ func TestTxnCommit(t *testing.T) { checkTxnMetrics(t, metrics, "commit txn", 1 /* commits */, 0 /* commits1PC */, 0, 0) // Test a read-only txn. - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { key := []byte("key-commit") _, err := txn.Get(ctx, key) return err @@ -1100,7 +1100,7 @@ func TestTxnOnePhaseCommit(t *testing.T) { value := []byte("value") - ctx := context.TODO() + ctx := context.Background() if err := s.DB.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { key := []byte("key-commit") b := txn.NewBatch() @@ -1135,7 +1135,7 @@ func TestTxnAbortCount(t *testing.T) { intentionalErrText := "intentional error to cause abort" // Test aborted transaction. - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { key := []byte("key-abort") if err := txn.Put(ctx, key, value); err != nil { @@ -1216,7 +1216,7 @@ func TestTxnDurations(t *testing.T) { const incr int64 = 1000 for i := 0; i < puts; i++ { key := roachpb.Key(fmt.Sprintf("key-txn-durations-%d", i)) - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.Put(ctx, key, []byte("val")); err != nil { return err } @@ -1710,7 +1710,7 @@ func TestAbortReadOnlyTransaction(t *testing.T) { sender, ) db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { return errors.New("foo") }); err == nil { t.Fatal("expected error on abort") @@ -1858,7 +1858,7 @@ func TestTransactionKeyNotChangedInRestart(t *testing.T) { ) db := kv.NewDB(testutils.MakeAmbientCtx(), factory, clock) - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { defer func() { attempt++ }() b := txn.NewBatch() b.Put(keys[attempt], "b") diff --git a/pkg/kv/kvclient/kvcoord/txn_correctness_test.go b/pkg/kv/kvclient/kvcoord/txn_correctness_test.go index 51b621c6ae00..c62c4fdd6435 100644 --- a/pkg/kv/kvclient/kvcoord/txn_correctness_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_correctness_test.go @@ -731,7 +731,7 @@ func (hv *historyVerifier) runCmds( ) (string, map[string]int64, error) { var strs []string env := map[string]int64{} - err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { txn.SetDebugName(txnName) for _, c := range cmds { c.historyIdx = hv.idx @@ -763,7 +763,7 @@ func (hv *historyVerifier) runTxn( prev.ch <- err } - err := db.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + err := db.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { // If this is 2nd attempt, and a retry wasn't expected, return a // retry error which results in further histories being enumerated. if retry++; retry > 1 { diff --git a/pkg/kv/kvclient/kvcoord/txn_test.go b/pkg/kv/kvclient/kvcoord/txn_test.go index e410994d3d28..1847d0e7d5e1 100644 --- a/pkg/kv/kvclient/kvcoord/txn_test.go +++ b/pkg/kv/kvclient/kvcoord/txn_test.go @@ -45,7 +45,7 @@ func TestTxnDBBasics(t *testing.T) { for _, commit := range []bool{true, false} { key := []byte(fmt.Sprintf("key-%t", commit)) - err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { // Put transactional value. if err := txn.Put(ctx, key, value); err != nil { return err @@ -80,7 +80,7 @@ func TestTxnDBBasics(t *testing.T) { } // Verify the value is now visible on commit == true, and not visible otherwise. - gr, err := s.DB.Get(context.TODO(), key) + gr, err := s.DB.Get(context.Background(), key) if commit { if err != nil || !gr.Exists() || !bytes.Equal(gr.ValueBytes(), value) { t.Errorf("expected success reading value: %+v, %s", gr.ValueBytes(), err) @@ -107,7 +107,7 @@ func BenchmarkSingleRoundtripWithLatency(b *testing.B) { key := roachpb.Key("key") b.ResetTimer() for i := 0; i < b.N; i++ { - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() b.Put(key, fmt.Sprintf("value-%d", i)) return txn.CommitInBatch(ctx, b) @@ -136,13 +136,13 @@ func TestLostUpdate(t *testing.T) { start := make(chan struct{}) go func() { <-start - done <- s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + done <- s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { return txn.Put(ctx, key, "hi") }) }() firstAttempt := true - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { // Issue a read to get initial value. gr, err := txn.Get(ctx, key) if err != nil { @@ -179,7 +179,7 @@ func TestLostUpdate(t *testing.T) { } // Verify final value. - gr, err := s.DB.Get(context.TODO(), key) + gr, err := s.DB.Get(context.Background(), key) if err != nil { t.Fatal(err) } @@ -200,7 +200,7 @@ func TestPriorityRatchetOnAbortOrPush(t *testing.T) { defer s.Stop() pushByReading := func(key roachpb.Key) { - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetUserPriority(roachpb.MaxUserPriority); err != nil { t.Fatal(err) } @@ -211,7 +211,7 @@ func TestPriorityRatchetOnAbortOrPush(t *testing.T) { } } abortByWriting := func(key roachpb.Key) { - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetUserPriority(roachpb.MaxUserPriority); err != nil { t.Fatal(err) } @@ -224,7 +224,7 @@ func TestPriorityRatchetOnAbortOrPush(t *testing.T) { // Try both read and write. for _, read := range []bool{true, false} { var iteration int - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { defer func() { iteration++ }() key := roachpb.Key(fmt.Sprintf("read=%t", read)) @@ -278,7 +278,7 @@ func TestTxnTimestampRegression(t *testing.T) { keyA := "a" keyB := "b" - err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { // Put transactional value. if err := txn.Put(ctx, keyA, "value1"); err != nil { return err @@ -287,12 +287,12 @@ func TestTxnTimestampRegression(t *testing.T) { // Attempt to read in another txn (this will push timestamp of transaction). conflictTxn := kv.NewTxn(ctx, s.DB, 0 /* gatewayNodeID */) conflictTxn.TestingSetPriority(enginepb.MaxTxnPriority) - if _, err := conflictTxn.Get(context.TODO(), keyA); err != nil { + if _, err := conflictTxn.Get(context.Background(), keyA); err != nil { return err } // Now, read again outside of txn to warmup timestamp cache with higher timestamp. - if _, err := s.DB.Get(context.TODO(), keyB); err != nil { + if _, err := s.DB.Get(context.Background(), keyB); err != nil { return err } @@ -318,7 +318,7 @@ func TestTxnLongDelayBetweenWritesWithConcurrentRead(t *testing.T) { ch := make(chan struct{}) errChan := make(chan error) go func() { - errChan <- s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + errChan <- s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { // Put transactional value. if err := txn.Put(ctx, keyA, "value1"); err != nil { return err @@ -336,7 +336,7 @@ func TestTxnLongDelayBetweenWritesWithConcurrentRead(t *testing.T) { <-ch // Delay for longer than the cache window. s.Manual.Increment((tscache.MinRetentionWindow + time.Second).Nanoseconds()) - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { // Attempt to get first keyB. gr1, err := txn.Get(ctx, keyB) if err != nil { @@ -383,7 +383,7 @@ func TestTxnRepeatGetWithRangeSplit(t *testing.T) { ch := make(chan struct{}) errChan := make(chan error) go func() { - errChan <- s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + errChan <- s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { // Put transactional value. if err := txn.Put(ctx, keyA, "value1"); err != nil { return err @@ -400,7 +400,7 @@ func TestTxnRepeatGetWithRangeSplit(t *testing.T) { // Wait till txnA finish put(a). <-ch - if err := s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { // First get keyC, value will be nil. gr1, err := txn.Get(ctx, keyC) if err != nil { @@ -408,14 +408,14 @@ func TestTxnRepeatGetWithRangeSplit(t *testing.T) { } s.Manual.Increment(time.Second.Nanoseconds()) // Split range by keyB. - if err := s.DB.AdminSplit(context.TODO(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); err != nil { + if err := s.DB.AdminSplit(context.Background(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); err != nil { t.Fatal(err) } // Wait till split complete. // Check that we split 1 times in allotted time. testutils.SucceedsSoon(t, func() error { // Scan the meta records. - rows, serr := s.DB.Scan(context.TODO(), keys.Meta2Prefix, keys.MetaMax, 0) + rows, serr := s.DB.Scan(context.Background(), keys.Meta2Prefix, keys.MetaMax, 0) if serr != nil { t.Fatalf("failed to scan meta2 keys: %s", serr) } @@ -459,7 +459,7 @@ func TestTxnRestartedSerializableTimestampRegression(t *testing.T) { errChan := make(chan error) var count int go func() { - errChan <- s.DB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + errChan <- s.DB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { count++ // Use a low priority for the transaction so that it can be pushed. if err := txn.SetUserPriority(roachpb.MinUserPriority); err != nil { @@ -484,11 +484,11 @@ func TestTxnRestartedSerializableTimestampRegression(t *testing.T) { // Wait until txnA finishes put(a). <-ch // Attempt to get keyA, which will push txnA. - if _, err := s.DB.Get(context.TODO(), keyA); err != nil { + if _, err := s.DB.Get(context.Background(), keyA); err != nil { t.Fatal(err) } // Do a read at keyB to cause txnA to forward timestamp. - if _, err := s.DB.Get(context.TODO(), keyB); err != nil { + if _, err := s.DB.Get(context.Background(), keyB); err != nil { t.Fatal(err) } // Notify txnA to commit. diff --git a/pkg/kv/kvserver/addressing_test.go b/pkg/kv/kvserver/addressing_test.go index 85a8f36b3c7d..9979a43784b3 100644 --- a/pkg/kv/kvserver/addressing_test.go +++ b/pkg/kv/kvserver/addressing_test.go @@ -56,7 +56,7 @@ func meta2Key(key roachpb.RKey) []byte { func TestUpdateRangeAddressing(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: false}, stopper) // When split is false, merging treats the right range as the merged // range. With merging, expNewLeft indicates the addressing keys we diff --git a/pkg/kv/kvserver/allocator_test.go b/pkg/kv/kvserver/allocator_test.go index f529937cedbf..2d42cbf2b42f 100644 --- a/pkg/kv/kvserver/allocator_test.go +++ b/pkg/kv/kvserver/allocator_test.go @@ -5516,7 +5516,7 @@ func TestAllocatorRebalanceAway(t *testing.T) { } stopper, g, _, a, _ := createTestAllocator(10, false /* deterministic */) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) gossiputil.NewStoreGossiper(g).GossipStores(stores, t) ctx := context.Background() @@ -5738,7 +5738,7 @@ func TestAllocatorFullDisks(t *testing.T) { func Example_rebalancing() { stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) st := cluster.MakeTestingClusterSettings() clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) @@ -5834,7 +5834,7 @@ func Example_rebalancing() { storeFilterThrottled, ) if ok { - log.Infof(context.TODO(), "rebalancing to %v; details: %s", target, details) + log.Infof(context.Background(), "rebalancing to %v; details: %s", target, details) testStores[j].rebalance(&testStores[int(target.StoreID)], alloc.randGen.Int63n(1<<20)) } } diff --git a/pkg/kv/kvserver/client_lease_test.go b/pkg/kv/kvserver/client_lease_test.go index ada548f82e9f..11e8c617b952 100644 --- a/pkg/kv/kvserver/client_lease_test.go +++ b/pkg/kv/kvserver/client_lease_test.go @@ -108,8 +108,8 @@ func TestStoreRangeLeaseSwitcheroo(t *testing.T) { // Allow leases to expire and send commands to ensure we // re-acquire, then check types again. - mtc.advanceClock(context.TODO()) - if _, err := mtc.dbs[0].Inc(context.TODO(), splitKey, 1); err != nil { + mtc.advanceClock(context.Background()) + if _, err := mtc.dbs[0].Inc(context.Background(), splitKey, 1); err != nil { t.Fatalf("failed to increment: %+v", err) } @@ -125,8 +125,8 @@ func TestStoreRangeLeaseSwitcheroo(t *testing.T) { sc.EnableEpochRangeLeases = false mtc.restartStore(0) - mtc.advanceClock(context.TODO()) - if _, err := mtc.dbs[0].Inc(context.TODO(), splitKey, 1); err != nil { + mtc.advanceClock(context.Background()) + if _, err := mtc.dbs[0].Inc(context.Background(), splitKey, 1); err != nil { t.Fatalf("failed to increment: %+v", err) } @@ -142,8 +142,8 @@ func TestStoreRangeLeaseSwitcheroo(t *testing.T) { sc.EnableEpochRangeLeases = true mtc.restartStore(0) - mtc.advanceClock(context.TODO()) - if _, err := mtc.dbs[0].Inc(context.TODO(), splitKey, 1); err != nil { + mtc.advanceClock(context.Background()) + if _, err := mtc.dbs[0].Inc(context.Background(), splitKey, 1); err != nil { t.Fatalf("failed to increment: %+v", err) } @@ -171,7 +171,7 @@ func TestStoreGossipSystemData(t *testing.T) { if _, pErr := kv.SendWrapped(context.Background(), mtc.distSenders[0], splitArgs); pErr != nil { t.Fatal(pErr) } - if _, err := mtc.dbs[0].Inc(context.TODO(), splitKey, 1); err != nil { + if _, err := mtc.dbs[0].Inc(context.Background(), splitKey, 1); err != nil { t.Fatalf("failed to increment: %+v", err) } @@ -251,7 +251,7 @@ func TestGossipSystemConfigOnLeaseChange(t *testing.T) { } newStoreIdx := (initialStoreIdx + 1) % numStores - mtc.transferLease(context.TODO(), rangeID, initialStoreIdx, newStoreIdx) + mtc.transferLease(context.Background(), rangeID, initialStoreIdx, newStoreIdx) testutils.SucceedsSoon(t, func() error { if mtc.stores[initialStoreIdx].Gossip().InfoOriginatedHere(gossip.KeySystemConfig) { @@ -354,7 +354,7 @@ func TestCannotTransferLeaseToVoterOutgoing(t *testing.T) { desc := tc.AddReplicasOrFatal(t, scratchStartKey, tc.Targets(1, 2)...) scratchRangeID.Store(desc.RangeID) // Make sure n1 has the lease to start with. - err := tc.Server(0).DB().AdminTransferLease(context.TODO(), + err := tc.Server(0).DB().AdminTransferLease(context.Background(), scratchStartKey, tc.Target(0).StoreID) require.NoError(t, err) @@ -384,7 +384,7 @@ func TestCannotTransferLeaseToVoterOutgoing(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - err := tc.Server(0).DB().AdminTransferLease(context.TODO(), + err := tc.Server(0).DB().AdminTransferLease(context.Background(), scratchStartKey, tc.Target(2).StoreID) require.Error(t, err) require.Regexp(t, diff --git a/pkg/kv/kvserver/client_merge_test.go b/pkg/kv/kvserver/client_merge_test.go index 07762b93fc67..4bfe302ec53b 100644 --- a/pkg/kv/kvserver/client_merge_test.go +++ b/pkg/kv/kvserver/client_merge_test.go @@ -796,7 +796,7 @@ func TestStoreRangeSplitMergeGeneration(t *testing.T) { }, }, }) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) leftKey := roachpb.Key("z") rightKey := leftKey.Next().Next() @@ -2971,7 +2971,7 @@ func TestStoreRangeMergeRaftSnapshot(t *testing.T) { } tombstoneKey := keys.RangeTombstoneKey(rangeID) tombstoneValue := &roachpb.RangeTombstone{NextReplicaID: math.MaxInt32} - if err := storage.MVCCBlindPutProto(context.TODO(), &sst, nil, tombstoneKey, hlc.Timestamp{}, tombstoneValue, nil); err != nil { + if err := storage.MVCCBlindPutProto(context.Background(), &sst, nil, tombstoneKey, hlc.Timestamp{}, tombstoneValue, nil); err != nil { return err } err := sst.Finish() diff --git a/pkg/kv/kvserver/client_metrics_test.go b/pkg/kv/kvserver/client_metrics_test.go index fe97055e7bcb..4bb46ffceb0b 100644 --- a/pkg/kv/kvserver/client_metrics_test.go +++ b/pkg/kv/kvserver/client_metrics_test.go @@ -129,7 +129,7 @@ func verifyStats(t *testing.T, mtc *multiTestContext, storeIdxSlice ...int) { } func verifyRocksDBStats(t *testing.T, s *kvserver.Store) { - if err := s.ComputeMetrics(context.TODO(), 0); err != nil { + if err := s.ComputeMetrics(context.Background(), 0); err != nil { t.Fatal(err) } @@ -289,7 +289,7 @@ func TestStoreMetrics(t *testing.T) { // Add some data to the "right" range. dataKey := []byte("z") - if _, err := mtc.dbs[0].Inc(context.TODO(), dataKey, 5); err != nil { + if _, err := mtc.dbs[0].Inc(context.Background(), dataKey, 5); err != nil { t.Fatal(err) } mtc.waitForValues(roachpb.Key("z"), []int64{5, 5, 5}) @@ -298,7 +298,7 @@ func TestStoreMetrics(t *testing.T) { verifyStats(t, mtc, 0, 1, 2) // Create a transaction statement that fails. Regression test for #4969. - if err := mtc.dbs[0].Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := mtc.dbs[0].Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() var expVal roachpb.Value expVal.SetInt(6) @@ -315,7 +315,7 @@ func TestStoreMetrics(t *testing.T) { // Unreplicate range from the first store. testutils.SucceedsSoon(t, func() error { // This statement can fail if store 0 is not the leaseholder. - if err := mtc.transferLeaseNonFatal(context.TODO(), replica.RangeID, 0, 1); err != nil { + if err := mtc.transferLeaseNonFatal(context.Background(), replica.RangeID, 0, 1); err != nil { t.Log(err) } // This statement will fail if store 0 IS the leaseholder. This can happen diff --git a/pkg/kv/kvserver/client_raft_test.go b/pkg/kv/kvserver/client_raft_test.go index f25f6e5361e8..0415d4fa9e83 100644 --- a/pkg/kv/kvserver/client_raft_test.go +++ b/pkg/kv/kvserver/client_raft_test.go @@ -87,7 +87,7 @@ func TestStoreRecoverFromEngine(t *testing.T) { key2 := roachpb.Key("z") engineStopper := stop.NewStopper() - defer engineStopper.Stop(context.TODO()) + defer engineStopper.Stop(context.Background()) eng := storage.NewDefaultInMem() engineStopper.AddCloser(eng) var rangeID2 roachpb.RangeID @@ -115,7 +115,7 @@ func TestStoreRecoverFromEngine(t *testing.T) { // that both predate and postdate the split. func() { stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithOpts(t, testStoreOpts{ eng: eng, @@ -201,7 +201,7 @@ func TestStoreRecoverWithErrors(t *testing.T) { func() { stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) keyA := roachpb.Key("a") storeCfg := storeCfg // copy storeCfg.TestingKnobs.EvalKnobs.TestingEvalFilter = @@ -236,7 +236,7 @@ func TestStoreRecoverWithErrors(t *testing.T) { } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Recover from the engine. store := createTestStoreWithOpts(t, @@ -1192,7 +1192,7 @@ func TestReplicateAfterRemoveAndSplit(t *testing.T) { t.Fatal(err) } - mtc.advanceClock(context.TODO()) + mtc.advanceClock(context.Background()) // Restart store 2. mtc.restartStore(2) @@ -1204,7 +1204,7 @@ func TestReplicateAfterRemoveAndSplit(t *testing.T) { startKey := roachpb.RKey(splitKey) var desc roachpb.RangeDescriptor - if err := mtc.dbs[0].GetProto(context.TODO(), keys.RangeDescriptorKey(startKey), &desc); err != nil { + if err := mtc.dbs[0].GetProto(context.Background(), keys.RangeDescriptorKey(startKey), &desc); err != nil { t.Fatal(err) } @@ -1455,7 +1455,7 @@ func TestLogGrowthWhenRefreshingPendingCommands(t *testing.T) { propIdx, otherIdx = 1, 0 } propNode := mtc.stores[propIdx].TestSender() - mtc.transferLease(context.TODO(), rangeID, otherIdx, propIdx) + mtc.transferLease(context.Background(), rangeID, otherIdx, propIdx) testutils.SucceedsSoon(t, func() error { // Lease transfers may not be immediately observed by the new // leaseholder. Wait until the new leaseholder is aware. @@ -1624,7 +1624,7 @@ func TestUnreplicateFirstRange(t *testing.T) { // Replicate the range to store 1. mtc.replicateRange(rangeID, 1) // Move the lease away from store 0 before removing its replica. - mtc.transferLease(context.TODO(), rangeID, 0, 1) + mtc.transferLease(context.Background(), rangeID, 0, 1) // Unreplicate the from from store 0. mtc.unreplicateRange(rangeID, 0) // Replicate the range to store 2. The first range is no longer available on @@ -2007,7 +2007,7 @@ func testReplicaAddRemove(t *testing.T, addFirst bool) { })) // Wait out the range lease and the unleased duration to make the replica GC'able. - mtc.advanceClock(context.TODO()) + mtc.advanceClock(context.Background()) mtc.manualClock.Increment(int64(kvserver.ReplicaGCQueueInactivityThreshold + 1)) mtc.stores[1].SetReplicaGCQueueActive(true) mtc.stores[1].MustForceReplicaGCScanAndProcess() @@ -2428,7 +2428,7 @@ outer: t.Fatal(err) } if lease, _ := repl.GetLease(); lease.Replica.Equal(repDesc) { - mtc.transferLease(context.TODO(), rangeID, leaderIdx, replicaIdx) + mtc.transferLease(context.Background(), rangeID, leaderIdx, replicaIdx) } mtc.unreplicateRange(rangeID, leaderIdx) cb := mtc.transport.GetCircuitBreaker(toStore.Ident.NodeID, rpc.DefaultClass) @@ -2667,7 +2667,7 @@ func TestRaftAfterRemoveRange(t *testing.T) { // Expire leases to ensure any remaining intent resolutions can complete. // TODO(bdarnell): understand why some tests need this. - mtc.advanceClock(context.TODO()) + mtc.advanceClock(context.Background()) } // TestRaftRemoveRace adds and removes a replica repeatedly in an attempt to @@ -3147,7 +3147,7 @@ func TestReplicateRogueRemovedNode(t *testing.T) { // moved under the lock, then the GC scan can be moved out of this loop. mtc.stores[1].SetReplicaGCQueueActive(true) testutils.SucceedsSoon(t, func() error { - mtc.advanceClock(context.TODO()) + mtc.advanceClock(context.Background()) mtc.manualClock.Increment(int64( kvserver.ReplicaGCQueueInactivityThreshold) + 1) mtc.stores[1].MustForceReplicaGCScanAndProcess() @@ -3233,7 +3233,7 @@ func TestReplicateRogueRemovedNode(t *testing.T) { // will see that the range has been moved and delete the old // replica. mtc.stores[2].SetReplicaGCQueueActive(true) - mtc.advanceClock(context.TODO()) + mtc.advanceClock(context.Background()) mtc.manualClock.Increment(int64( kvserver.ReplicaGCQueueInactivityThreshold) + 1) mtc.stores[2].MustForceReplicaGCScanAndProcess() @@ -3288,7 +3288,7 @@ func TestReplicateRemovedNodeDisruptiveElection(t *testing.T) { // Move the first range from the first node to the other three. const rangeID = roachpb.RangeID(1) mtc.replicateRange(rangeID, 1, 2, 3) - mtc.transferLease(context.TODO(), rangeID, 0, 1) + mtc.transferLease(context.Background(), rangeID, 0, 1) mtc.unreplicateRange(rangeID, 0) // Ensure that we have a stable lease and raft leader so we can tell if the @@ -3636,7 +3636,7 @@ func TestRemovedReplicaError(t *testing.T) { raftID := roachpb.RangeID(1) mtc.replicateRange(raftID, 1) - mtc.transferLease(context.TODO(), raftID, 0, 1) + mtc.transferLease(context.Background(), raftID, 0, 1) mtc.unreplicateRange(raftID, 0) mtc.manualClock.Increment(mtc.storeConfig.LeaseExpiration()) @@ -3739,7 +3739,7 @@ func TestTransferRaftLeadership(t *testing.T) { // expire-request in a loop until we get our foot in the door. origCount0 := store0.Metrics().RangeRaftLeaderTransfers.Count() for { - mtc.advanceClock(context.TODO()) + mtc.advanceClock(context.Background()) if _, pErr := kv.SendWrappedWith( context.Background(), store1, roachpb.Header{RangeID: repl0.RangeID}, getArgs, ); pErr == nil { @@ -3964,7 +3964,7 @@ func TestInitRaftGroupOnRequest(t *testing.T) { // problem. // Verify the raft group isn't initialized yet. if repl.IsRaftGroupInitialized() { - log.Errorf(context.TODO(), "expected raft group to be uninitialized") + log.Errorf(context.Background(), "expected raft group to be uninitialized") } // Send an increment and verify that initializes the Raft group. diff --git a/pkg/kv/kvserver/client_replica_gc_test.go b/pkg/kv/kvserver/client_replica_gc_test.go index 63bc31013eac..6526ebde4eb3 100644 --- a/pkg/kv/kvserver/client_replica_gc_test.go +++ b/pkg/kv/kvserver/client_replica_gc_test.go @@ -177,7 +177,7 @@ func TestReplicaGCQueueDropReplicaGCOnScan(t *testing.T) { mtc.stores[1].SetReplicaGCQueueActive(true) // Increment the clock's timestamp to make the replica GC queue process the range. - mtc.advanceClock(context.TODO()) + mtc.advanceClock(context.Background()) mtc.manualClock.Increment(int64(kvserver.ReplicaGCQueueInactivityThreshold + 1)) // Make sure the range is removed from the store. diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index 7c21e6d58d15..15491977ad61 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -218,7 +218,7 @@ func TestTxnPutOutOfOrder(t *testing.T) { var numGets int32 stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) manual := hlc.NewManualClock(123) cfg := kvserver.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond)) // Splits can cause our chosen key to end up on a range other than range 1, @@ -249,7 +249,7 @@ func TestTxnPutOutOfOrder(t *testing.T) { // Put an initial value. initVal := []byte("initVal") - err := store.DB().Put(context.TODO(), key, initVal) + err := store.DB().Put(context.Background(), key, initVal) if err != nil { t.Fatalf("failed to put: %+v", err) } @@ -266,7 +266,7 @@ func TestTxnPutOutOfOrder(t *testing.T) { // Start a txn that does read-after-write. // The txn will be restarted twice, and the out-of-order put // will happen in the second epoch. - errChan <- store.DB().Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + errChan <- store.DB().Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { epoch++ if epoch == 1 { @@ -282,7 +282,7 @@ func TestTxnPutOutOfOrder(t *testing.T) { updatedVal := []byte("updatedVal") if err := txn.CPut(ctx, key, updatedVal, strToValue("initVal")); err != nil { - log.Errorf(context.TODO(), "failed put value: %+v", err) + log.Errorf(context.Background(), "failed put value: %+v", err) return err } @@ -380,7 +380,7 @@ func TestRangeLookupUseReverse(t *testing.T) { storeCfg.TestingKnobs.DisableSplitQueue = true storeCfg.TestingKnobs.DisableMergeQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithOpts( t, testStoreOpts{ @@ -610,7 +610,7 @@ func (l *leaseTransferTest) sendRead(storeIdx int) *roachpb.Error { getArgs(l.leftKey), ) if pErr != nil { - log.Warningf(context.TODO(), "%v", pErr) + log.Warningf(context.Background(), "%v", pErr) } return pErr } @@ -1036,7 +1036,7 @@ func TestLeaseMetricsOnSplitAndTransfer(t *testing.T) { // Now, a successful transfer from LHS replica 0 to replica 1. injectLeaseTransferError.Store(false) if err := mtc.dbs[0].AdminTransferLease( - context.TODO(), keyMinReplica0.Desc().StartKey.AsRawKey(), mtc.stores[1].StoreID(), + context.Background(), keyMinReplica0.Desc().StartKey.AsRawKey(), mtc.stores[1].StoreID(), ); err != nil { t.Fatalf("unable to transfer lease to replica 1: %+v", err) } @@ -1055,7 +1055,7 @@ func TestLeaseMetricsOnSplitAndTransfer(t *testing.T) { injectLeaseTransferError.Store(true) keyAReplica0 := mtc.stores[0].LookupReplica(splitKey) if err := mtc.dbs[0].AdminTransferLease( - context.TODO(), keyAReplica0.Desc().StartKey.AsRawKey(), mtc.stores[1].StoreID(), + context.Background(), keyAReplica0.Desc().StartKey.AsRawKey(), mtc.stores[1].StoreID(), ); err == nil { t.Fatal("expected an error transferring to an unknown store ID") } @@ -1071,8 +1071,8 @@ func TestLeaseMetricsOnSplitAndTransfer(t *testing.T) { // Expire current leases and put a key to RHS of split to request // an epoch-based lease. testutils.SucceedsSoon(t, func() error { - mtc.advanceClock(context.TODO()) - if err := mtc.stores[0].DB().Put(context.TODO(), "a", "foo"); err != nil { + mtc.advanceClock(context.Background()) + if err := mtc.stores[0].DB().Put(context.Background(), "a", "foo"); err != nil { return err } @@ -1201,7 +1201,7 @@ func TestLeaseExtensionNotBlockedByRead(t *testing.T) { }, }) s := srv.(*server.TestServer) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) store, err := s.GetStores().(*kvserver.Stores).GetStore(s.GetFirstStoreID()) if err != nil { @@ -1254,7 +1254,7 @@ func TestLeaseExtensionNotBlockedByRead(t *testing.T) { } for { - curLease, _, err := s.GetRangeLease(context.TODO(), key) + curLease, _, err := s.GetRangeLease(context.Background(), key) if err != nil { t.Fatal(err) } @@ -1305,7 +1305,7 @@ func LeaseInfo( func TestLeaseInfoRequest(t *testing.T) { defer leaktest.AfterTest(t)() tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) kvDB0 := tc.Servers[0].DB() kvDB1 := tc.Servers[1].DB() @@ -1438,7 +1438,7 @@ func TestErrorHandlingForNonKVCommand(t *testing.T) { }, }) s := srv.(*server.TestServer) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Send the lease request. key := roachpb.Key("a") @@ -1613,7 +1613,7 @@ func TestRangeInfo(t *testing.T) { if err != nil { t.Fatal(err) } - if err = mtc.dbs[0].AdminTransferLease(context.TODO(), + if err = mtc.dbs[0].AdminTransferLease(context.Background(), r.Desc().StartKey.AsRawKey(), replDesc.StoreID); err != nil { t.Fatalf("unable to transfer lease to replica %s: %+v", r, err) } @@ -2576,7 +2576,7 @@ func TestReplicaTombstone(t *testing.T) { } tombstoneKey := keys.RangeTombstoneKey(rhsDesc.RangeID) ok, err := storage.MVCCGetProto( - context.TODO(), store.Engine(), tombstoneKey, hlc.Timestamp{}, &tombstone, storage.MVCCGetOptions{}, + context.Background(), store.Engine(), tombstoneKey, hlc.Timestamp{}, &tombstone, storage.MVCCGetOptions{}, ) require.NoError(t, err) if !ok { @@ -2912,7 +2912,7 @@ func TestTransferLeaseBlocksWrites(t *testing.T) { }, ReplicationMode: base.ReplicationManual, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) scratch := tc.ScratchRange(t) makeKey := func() roachpb.Key { @@ -2926,7 +2926,7 @@ func TestTransferLeaseBlocksWrites(t *testing.T) { // filter. incErr := make(chan error) go func() { - _, err := tc.Server(1).DB().Inc(context.TODO(), makeKey(), 1) + _, err := tc.Server(1).DB().Inc(context.Background(), makeKey(), 1) incErr <- err }() diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index 3e1c27628ead..92b76c30b7a8 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -77,7 +77,7 @@ func adminSplitArgs(splitKey roachpb.Key) *roachpb.AdminSplitRequest { func TestStoreRangeSplitAtIllegalKeys(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) cfg := kvserver.TestStoreConfig(nil) cfg.TestingKnobs.DisableSplitQueue = true @@ -112,7 +112,7 @@ func TestStoreSplitAbortSpan(t *testing.T) { storeCfg.TestingKnobs.DisableMergeQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, storeCfg) ctx := context.Background() @@ -241,7 +241,7 @@ func TestStoreRangeSplitAtTablePrefix(t *testing.T) { storeCfg.TestingKnobs.DisableSplitQueue = true storeCfg.TestingKnobs.DisableMergeQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, storeCfg) key := keys.UserTableDataMin @@ -257,7 +257,7 @@ func TestStoreRangeSplitAtTablePrefix(t *testing.T) { } // Update SystemConfig to trigger gossip. - if err := store.DB().Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := store.DB().Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } @@ -297,7 +297,7 @@ func TestStoreRangeSplitInsideRow(t *testing.T) { storeCfg.TestingKnobs.DisableSplitQueue = true storeCfg.TestingKnobs.DisableMergeQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, storeCfg) // Manually create some the column keys corresponding to the table: @@ -316,10 +316,10 @@ func TestStoreRangeSplitInsideRow(t *testing.T) { } // We don't care about the value, so just store any old thing. - if err := store.DB().Put(context.TODO(), col1Key, "column 1"); err != nil { + if err := store.DB().Put(context.Background(), col1Key, "column 1"); err != nil { t.Fatal(err) } - if err := store.DB().Put(context.TODO(), col2Key, "column 2"); err != nil { + if err := store.DB().Put(context.Background(), col2Key, "column 2"); err != nil { t.Fatal(err) } @@ -357,7 +357,7 @@ func TestStoreRangeSplitIntents(t *testing.T) { storeCfg.TestingKnobs.DisableSplitQueue = true storeCfg.TestingKnobs.DisableMergeQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, storeCfg) // First, write some values left and right of the proposed split key. @@ -429,7 +429,7 @@ func TestStoreRangeSplitAtRangeBounds(t *testing.T) { storeCfg.TestingKnobs.DisableSplitQueue = true storeCfg.TestingKnobs.DisableMergeQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, storeCfg) // Split range 1 at an arbitrary key. @@ -537,7 +537,7 @@ func TestStoreRangeSplitIdempotency(t *testing.T) { storeCfg.TestingKnobs.DisableSplitQueue = true storeCfg.TestingKnobs.DisableMergeQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithOpts(t, testStoreOpts{ // This test was written before the test stores were able to start with @@ -697,7 +697,7 @@ func TestStoreRangeSplitStats(t *testing.T) { storeCfg.TestingKnobs.DisableSplitQueue = true storeCfg.TestingKnobs.DisableMergeQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, storeCfg) ctx := context.Background() @@ -897,7 +897,7 @@ func TestStoreRangeSplitStatsWithMerges(t *testing.T) { storeCfg := kvserver.TestStoreConfig(hlc.NewClock(manual.UnixNano, time.Nanosecond)) storeCfg.TestingKnobs.DisableSplitQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, storeCfg) ctx := context.Background() @@ -1006,7 +1006,7 @@ func fillRange( func TestStoreZoneUpdateAndRangeSplit(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) storeCfg := kvserver.TestStoreConfig(nil /* clock */) storeCfg.TestingKnobs.DisableMergeQueue = true store := createTestStoreWithConfig(t, stopper, storeCfg) @@ -1066,7 +1066,7 @@ func TestStoreZoneUpdateAndRangeSplit(t *testing.T) { func TestStoreRangeSplitWithMaxBytesUpdate(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) storeCfg := kvserver.TestStoreConfig(nil /* clock */) storeCfg.TestingKnobs.DisableMergeQueue = true store := createTestStoreWithConfig(t, stopper, storeCfg) @@ -1271,7 +1271,7 @@ func TestStoreRangeSplitBackpressureWrites(t *testing.T) { func TestStoreRangeSystemSplits(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Intentionally leave the merge queue enabled. This indirectly tests that the // merge queue respects these split points. store, _ := createTestStore(t, stopper) @@ -1286,7 +1286,7 @@ func TestStoreRangeSystemSplits(t *testing.T) { // - descriptor IDs are used to determine split keys // - the write triggers a SystemConfig update and gossip // We should end up with splits at each user table prefix. - if err := store.DB().Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := store.DB().Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } @@ -1340,7 +1340,7 @@ func TestStoreRangeSystemSplits(t *testing.T) { expKeys = append(expKeys, testutils.MakeKey(keys.Meta2Prefix, roachpb.RKeyMax)) testutils.SucceedsSoon(t, func() error { - rows, err := store.DB().Scan(context.TODO(), keys.Meta2Prefix, keys.MetaMax, 0) + rows, err := store.DB().Scan(context.Background(), keys.Meta2Prefix, keys.MetaMax, 0) if err != nil { return err } @@ -1360,7 +1360,7 @@ func TestStoreRangeSystemSplits(t *testing.T) { // Write another, disjoint (+3) descriptor for a user table. userTableMax += 3 exceptions = map[int]struct{}{userTableMax - 1: {}, userTableMax - 2: {}} - if err := store.DB().Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := store.DB().Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } @@ -1443,7 +1443,7 @@ func runSetupSplitSnapshotRace( // safe (or allowed) for a leaseholder to remove itself from a cluster // without first giving up its lease. mtc.replicateRange(leftRangeID, 1, 2, 3) - mtc.transferLease(context.TODO(), leftRangeID, 0, 1) + mtc.transferLease(context.Background(), leftRangeID, 0, 1) mtc.unreplicateRange(leftRangeID, 0) mtc.waitForValues(leftKey, []int64{0, 1, 1, 1, 0, 0}) @@ -1451,7 +1451,7 @@ func runSetupSplitSnapshotRace( // Stop node 3 so it doesn't hear about the split. mtc.stopStore(3) - mtc.advanceClock(context.TODO()) + mtc.advanceClock(context.Background()) // Split the data range. splitArgs = adminSplitArgs(roachpb.Key("m")) @@ -1473,7 +1473,7 @@ func runSetupSplitSnapshotRace( // Relocate the right range onto nodes 3-5. mtc.replicateRange(rightRangeID, 4, 5) mtc.unreplicateRange(rightRangeID, 2) - mtc.transferLease(context.TODO(), rightRangeID, 1, 4) + mtc.transferLease(context.Background(), rightRangeID, 1, 4) mtc.unreplicateRange(rightRangeID, 1) // Perform another increment after all the replication changes. This @@ -1642,7 +1642,7 @@ func TestStoreSplitTimestampCacheDifferentLeaseHolder(t *testing.T) { } tc := testcluster.StartTestCluster(t, 2, args) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) // Split the data range, mainly to avoid other splits getting in our way. for _, k := range []roachpb.Key{leftKey, rightKey} { @@ -1823,7 +1823,7 @@ func TestStoreSplitOnRemovedReplica(t *testing.T) { } tc := testcluster.StartTestCluster(t, 3, args) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) // Split the data range, mainly to avoid other splits getting in our way. for _, k := range []roachpb.Key{leftKey, rightKey} { @@ -1877,7 +1877,7 @@ func TestStoreSplitGCThreshold(t *testing.T) { storeCfg.TestingKnobs.DisableSplitQueue = true storeCfg.TestingKnobs.DisableMergeQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, storeCfg) leftKey := roachpb.Key("a") @@ -1920,7 +1920,7 @@ func TestStoreSplitGCThreshold(t *testing.T) { t.Fatalf("expected RHS's GCThreshold is equal to %v, but got %v", specifiedGCThreshold, gcThreshold) } - repl.AssertState(context.TODO(), store.Engine()) + repl.AssertState(context.Background(), store.Engine()) } // TestStoreRangeSplitRaceUninitializedRHS reproduces #7600 (before it was @@ -2196,7 +2196,7 @@ func TestStoreRangeGossipOnSplits(t *testing.T) { storeCfg.TestingKnobs.DisableMergeQueue = true storeCfg.TestingKnobs.DisableScanner = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, storeCfg) storeKey := gossip.MakeStoreKey(store.StoreID()) @@ -2229,7 +2229,7 @@ func TestStoreRangeGossipOnSplits(t *testing.T) { splitFunc := func(i int) *roachpb.Error { splitKey := roachpb.Key(fmt.Sprintf("%02d", i)) _, pErr := store.LookupReplica(roachpb.RKey(splitKey)).AdminSplit( - context.TODO(), + context.Background(), roachpb.AdminSplitRequest{ RequestHeader: roachpb.RequestHeader{ Key: splitKey, @@ -2247,7 +2247,7 @@ func TestStoreRangeGossipOnSplits(t *testing.T) { if pErr := splitFunc(i); pErr != nil { // Avoid flakes caused by bad clocks. if testutils.IsPError(pErr, "rejecting command with timestamp in the future") { - log.Warningf(context.TODO(), "ignoring split error: %s", pErr) + log.Warningf(context.Background(), "ignoring split error: %s", pErr) continue } t.Fatal(pErr) @@ -2273,7 +2273,7 @@ func TestStoreTxnWaitQueueEnabledOnSplit(t *testing.T) { storeCfg.TestingKnobs.DisableSplitQueue = true storeCfg.TestingKnobs.DisableMergeQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, storeCfg) key := keys.UserTableDataMin @@ -2296,7 +2296,7 @@ func TestDistributedTxnCleanup(t *testing.T) { storeCfg.TestingKnobs.DisableSplitQueue = true storeCfg.TestingKnobs.DisableMergeQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, storeCfg) // Split at "a". @@ -2497,7 +2497,7 @@ func TestTxnWaitQueueDependencyCycleWithRangeSplit(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, storeCfg) lhsKey := roachpb.Key("a") @@ -2590,7 +2590,7 @@ func TestTxnWaitQueueDependencyCycleWithRangeSplit(t *testing.T) { func TestStoreCapacityAfterSplit(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) manualClock := hlc.NewManualClock(123) cfg := kvserver.TestStoreConfig(hlc.NewClock(manualClock.UnixNano, time.Nanosecond)) cfg.TestingKnobs.DisableSplitQueue = true @@ -2998,7 +2998,7 @@ func TestRangeLookupAsyncResolveIntent(t *testing.T) { func TestStoreSplitDisappearingReplicas(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, stopper) go kvserver.WatchForDisappearingReplicas(t, store) for i := 0; i < 100; i++ { diff --git a/pkg/kv/kvserver/client_status_test.go b/pkg/kv/kvserver/client_status_test.go index d2a57f89eedd..ea71a0042caf 100644 --- a/pkg/kv/kvserver/client_status_test.go +++ b/pkg/kv/kvserver/client_status_test.go @@ -53,7 +53,7 @@ func TestComputeStatsForKeySpan(t *testing.T) { // Create some keys across the ranges. incKeys := []string{"b", "bb", "bbb", "d", "dd", "h"} for _, k := range incKeys { - if _, err := mtc.dbs[0].Inc(context.TODO(), []byte(k), 5); err != nil { + if _, err := mtc.dbs[0].Inc(context.Background(), []byte(k), 5); err != nil { t.Fatal(err) } } diff --git a/pkg/kv/kvserver/client_test.go b/pkg/kv/kvserver/client_test.go index b46cde7a916a..13373cc9dd0f 100644 --- a/pkg/kv/kvserver/client_test.go +++ b/pkg/kv/kvserver/client_test.go @@ -131,7 +131,7 @@ func createTestStoreWithOpts( // Ensure that tests using this test context and restart/shut down // their servers do not inadvertently start talking to servers from // unrelated concurrent tests. - rpcContext.ClusterID.Set(context.TODO(), uuid.MakeV4()) + rpcContext.ClusterID.Set(context.Background(), uuid.MakeV4()) nodeDesc := &roachpb.NodeDescriptor{ NodeID: 1, Address: util.MakeUnresolvedAddr("tcp", "invalid.invalid:26257"), @@ -368,7 +368,7 @@ func (m *multiTestContext) Start(t testing.TB, numStores int) { // Ensure that tests using this test context and restart/shut down // their servers do not inadvertently start talking to servers from // unrelated concurrent tests. - m.rpcContext.ClusterID.Set(context.TODO(), uuid.MakeV4()) + m.rpcContext.ClusterID.Set(context.Background(), uuid.MakeV4()) // We are sharing the same RPC context for all simulated nodes, so we can't enforce // some of the RPC check validation. m.rpcContext.TestingAllowNamedRPCToAnonymousServer = true @@ -431,7 +431,7 @@ func (m *multiTestContext) Stop() { // any test (TestRaftAfterRemove is a good example) results // in deadlocks where a task can't finish because of // getting stuck in addWriteCommand. - s.Quiesce(context.TODO()) + s.Quiesce(context.Background()) } }(s) } @@ -442,13 +442,13 @@ func (m *multiTestContext) Stop() { defer m.mu.RUnlock() for _, stopper := range m.stoppers { if stopper != nil { - stopper.Stop(context.TODO()) + stopper.Stop(context.Background()) } } - m.transportStopper.Stop(context.TODO()) + m.transportStopper.Stop(context.Background()) for _, s := range m.engineStoppers { - s.Stop(context.TODO()) + s.Stop(context.Background()) } close(done) }() @@ -1023,7 +1023,7 @@ func (m *multiTestContext) stopStore(i int) { stopper := m.stoppers[i] m.mu.RUnlock() - stopper.Stop(context.TODO()) + stopper.Stop(context.Background()) m.mu.Lock() m.stoppers[i] = nil @@ -1298,13 +1298,13 @@ func (m *multiTestContext) readIntFromEngines(key roachpb.Key) []int64 { val, _, err := storage.MVCCGet(context.Background(), eng, key, m.clocks[i].Now(), storage.MVCCGetOptions{}) if err != nil { - log.VEventf(context.TODO(), 1, "engine %d: error reading from key %s: %s", i, key, err) + log.VEventf(context.Background(), 1, "engine %d: error reading from key %s: %s", i, key, err) } else if val == nil { - log.VEventf(context.TODO(), 1, "engine %d: missing key %s", i, key) + log.VEventf(context.Background(), 1, "engine %d: missing key %s", i, key) } else { results[i], err = val.GetInt() if err != nil { - log.Errorf(context.TODO(), "engine %d: error decoding %s from key %s: %+v", i, val, key, err) + log.Errorf(context.Background(), "engine %d: error decoding %s from key %s: %+v", i, val, key, err) } } } @@ -1614,7 +1614,7 @@ func waitForTombstone( testutils.SucceedsSoon(t, func() error { tombstoneKey := keys.RangeTombstoneKey(rangeID) ok, err := storage.MVCCGetProto( - context.TODO(), reader, tombstoneKey, hlc.Timestamp{}, &tombstone, storage.MVCCGetOptions{}, + context.Background(), reader, tombstoneKey, hlc.Timestamp{}, &tombstone, storage.MVCCGetOptions{}, ) if err != nil { t.Fatalf("failed to read tombstone: %v", err) diff --git a/pkg/kv/kvserver/closedts/minprop/doc_test.go b/pkg/kv/kvserver/closedts/minprop/doc_test.go index 8ad08f3cd1a1..d51d77eb563c 100644 --- a/pkg/kv/kvserver/closedts/minprop/doc_test.go +++ b/pkg/kv/kvserver/closedts/minprop/doc_test.go @@ -22,7 +22,7 @@ import ( ) func Example() { - ctx := context.TODO() + ctx := context.Background() tracker := NewTracker() const ep1 ctpb.Epoch = 1 diff --git a/pkg/kv/kvserver/concurrency/lock_table_test.go b/pkg/kv/kvserver/concurrency/lock_table_test.go index e4379d350a49..ccc4b60a94d8 100644 --- a/pkg/kv/kvserver/concurrency/lock_table_test.go +++ b/pkg/kv/kvserver/concurrency/lock_table_test.go @@ -516,7 +516,7 @@ func doWork(ctx context.Context, item *workItem, e *workloadExecutor) error { // cancellation, the code makes sure to release latches when returning // early due to error. Otherwise other requests will get stuck and // group.Wait() will not return until the test times out. - lg, err = e.lm.Acquire(context.TODO(), item.request.LatchSpans) + lg, err = e.lm.Acquire(context.Background(), item.request.LatchSpans) if err != nil { return err } @@ -769,7 +769,7 @@ func (e *workloadExecutor) tryFinishTxn( func (e *workloadExecutor) execute(strict bool, maxNonStrictConcurrency int) error { numOutstanding := 0 i := 0 - group, ctx := errgroup.WithContext(context.TODO()) + group, ctx := errgroup.WithContext(context.Background()) timer := time.NewTimer(time.Second) timer.Stop() var err error @@ -1058,7 +1058,7 @@ func doBenchWork(item *benchWorkItem, env benchEnv, doneCh chan<- error) { var err error firstIter := true for { - if lg, err = env.lm.Acquire(context.TODO(), item.LatchSpans); err != nil { + if lg, err = env.lm.Acquire(context.Background(), item.LatchSpans); err != nil { doneCh <- err return } @@ -1093,7 +1093,7 @@ func doBenchWork(item *benchWorkItem, env benchEnv, doneCh chan<- error) { return } // Release locks. - if lg, err = env.lm.Acquire(context.TODO(), item.LatchSpans); err != nil { + if lg, err = env.lm.Acquire(context.Background(), item.LatchSpans); err != nil { doneCh <- err return } @@ -1240,7 +1240,7 @@ func BenchmarkLockTable(b *testing.B) { runRequests(b, iters, requestsPerGroup[0], env) } if log.V(1) { - log.Infof(context.TODO(), "num requests that waited: %d, num scan calls: %d\n", + log.Infof(context.Background(), "num requests that waited: %d, num scan calls: %d\n", atomic.LoadUint64(&numRequestsWaited), atomic.LoadUint64(&numScanCalls)) } }) diff --git a/pkg/kv/kvserver/consistency_queue_test.go b/pkg/kv/kvserver/consistency_queue_test.go index 1edb45abd1f9..1250bd6e3721 100644 --- a/pkg/kv/kvserver/consistency_queue_test.go +++ b/pkg/kv/kvserver/consistency_queue_test.go @@ -56,16 +56,16 @@ func TestConsistencyQueueRequiresLive(t *testing.T) { // Verify that queueing is immediately possible. if shouldQ, priority := mtc.stores[0].ConsistencyQueueShouldQueue( - context.TODO(), mtc.clock().Now(), repl, config.NewSystemConfig(sc.DefaultZoneConfig)); !shouldQ { + context.Background(), mtc.clock().Now(), repl, config.NewSystemConfig(sc.DefaultZoneConfig)); !shouldQ { t.Fatalf("expected shouldQ true; got %t, %f", shouldQ, priority) } // Stop a node and expire leases. mtc.stopStore(2) - mtc.advanceClock(context.TODO()) + mtc.advanceClock(context.Background()) if shouldQ, priority := mtc.stores[0].ConsistencyQueueShouldQueue( - context.TODO(), mtc.clock().Now(), repl, config.NewSystemConfig(sc.DefaultZoneConfig)); shouldQ { + context.Background(), mtc.clock().Now(), repl, config.NewSystemConfig(sc.DefaultZoneConfig)); shouldQ { t.Fatalf("expected shouldQ false; got %t, %f", shouldQ, priority) } } @@ -444,7 +444,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { rangeID := func() roachpb.RangeID { tc := testcluster.StartTestCluster(t, 1, clusterArgs) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db0 := tc.Servers[0].DB() diff --git a/pkg/kv/kvserver/gc/data_distribution_test.go b/pkg/kv/kvserver/gc/data_distribution_test.go index 0ab6d3d810a7..e6fb45420f0c 100644 --- a/pkg/kv/kvserver/gc/data_distribution_test.go +++ b/pkg/kv/kvserver/gc/data_distribution_test.go @@ -40,7 +40,7 @@ type dataDistribution func() (storage.MVCCKeyValue, *roachpb.Transaction, bool) func (ds dataDistribution) setupTest( t testing.TB, eng storage.Engine, desc roachpb.RangeDescriptor, ) enginepb.MVCCStats { - ctx := context.TODO() + ctx := context.Background() var maxTs hlc.Timestamp var ms enginepb.MVCCStats for { diff --git a/pkg/kv/kvserver/gossip_test.go b/pkg/kv/kvserver/gossip_test.go index b7a52451a32c..ba95f448c40e 100644 --- a/pkg/kv/kvserver/gossip_test.go +++ b/pkg/kv/kvserver/gossip_test.go @@ -37,7 +37,7 @@ func TestGossipFirstRange(t *testing.T) { base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) errors := make(chan error, 1) descs := make(chan *roachpb.RangeDescriptor) @@ -78,7 +78,7 @@ func TestGossipFirstRange(t *testing.T) { if reflect.DeepEqual(&desc, gossiped) { return } - log.Infof(context.TODO(), "expected\n%+v\nbut found\n%+v", desc, gossiped) + log.Infof(context.Background(), "expected\n%+v\nbut found\n%+v", desc, gossiped) } } } @@ -165,7 +165,7 @@ func TestGossipHandlesReplacedNode(t *testing.T) { base.TestClusterArgs{ ServerArgs: serverArgs, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) // Take down the first node and replace it with a new one. oldNodeIdx := 0 diff --git a/pkg/kv/kvserver/helpers_test.go b/pkg/kv/kvserver/helpers_test.go index 821f1893cc0a..28dbbca161f6 100644 --- a/pkg/kv/kvserver/helpers_test.go +++ b/pkg/kv/kvserver/helpers_test.go @@ -165,7 +165,7 @@ func manualQueue(s *Store, q queueImpl, repl *Replica) error { if cfg == nil { return fmt.Errorf("%s: system config not yet available", s) } - ctx := repl.AnnotateCtx(context.TODO()) + ctx := repl.AnnotateCtx(context.Background()) return q.process(ctx, repl, cfg) } @@ -182,7 +182,7 @@ func (s *Store) ManualReplicaGC(repl *Replica) error { // ManualRaftSnapshot will manually send a raft snapshot to the target replica. func (s *Store) ManualRaftSnapshot(repl *Replica, target roachpb.ReplicaID) error { - return s.raftSnapshotQueue.processRaftSnapshot(context.TODO(), repl, target) + return s.raftSnapshotQueue.processRaftSnapshot(context.Background(), repl, target) } func (s *Store) ReservationCount() int { diff --git a/pkg/kv/kvserver/node_liveness_test.go b/pkg/kv/kvserver/node_liveness_test.go index 68da2a66c4e2..2886cf3d4361 100644 --- a/pkg/kv/kvserver/node_liveness_test.go +++ b/pkg/kv/kvserver/node_liveness_test.go @@ -803,7 +803,7 @@ func TestNodeLivenessStatusMap(t *testing.T) { // node will wait forever. ReplicationMode: base.ReplicationManual, }) - ctx := context.TODO() + ctx := context.Background() defer tc.Stopper().Stop(ctx) ctx = logtags.AddTag(ctx, "in test", nil) diff --git a/pkg/kv/kvserver/queue_test.go b/pkg/kv/kvserver/queue_test.go index 8fc4d6ba2b7b..8a02c939ff2b 100644 --- a/pkg/kv/kvserver/queue_test.go +++ b/pkg/kv/kvserver/queue_test.go @@ -380,7 +380,7 @@ func TestBaseQueueProcess(t *testing.T) { tsc := TestStoreConfig(nil) tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) repls := createReplicas(t, &tc, 2) @@ -567,7 +567,7 @@ func TestNeedsSystemConfig(t *testing.T) { func TestAcceptsUnsplitRanges(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) s, _ := createTestStore(t, testStoreOpts{ // This test was written before test stores could start with more than one @@ -708,7 +708,7 @@ func TestBaseQueuePurgatory(t *testing.T) { tsc := TestStoreConfig(nil) tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) testQueue := &testQueueImpl{ @@ -847,7 +847,7 @@ func TestBaseQueueProcessTimeout(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) r, err := tc.store.GetReplica(1) @@ -963,7 +963,7 @@ func TestBaseQueueTimeMetric(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) r, err := tc.store.GetReplica(1) @@ -1104,7 +1104,7 @@ func TestBaseQueueProcessConcurrently(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) repls := createReplicas(t, &tc, 3) @@ -1211,7 +1211,7 @@ func TestBaseQueueRequeue(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) repls := createReplicas(t, &tc, 1) diff --git a/pkg/kv/kvserver/raft_log_queue_test.go b/pkg/kv/kvserver/raft_log_queue_test.go index 770909f903c5..cc094dd40a5a 100644 --- a/pkg/kv/kvserver/raft_log_queue_test.go +++ b/pkg/kv/kvserver/raft_log_queue_test.go @@ -410,7 +410,7 @@ func TestUpdateRaftStatusActivity(t *testing.T) { func TestNewTruncateDecisionMaxSize(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) cfg := TestStoreConfig(hlc.NewClock(hlc.NewManualClock(123).UnixNano, time.Nanosecond)) const exp = 1881 @@ -443,7 +443,7 @@ func TestNewTruncateDecision(t *testing.T) { t.Skip("https://github.com/cockroachdb/cockroach/issues/38584") stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{ // This test was written before test stores could start with more than one @@ -694,7 +694,7 @@ func TestTruncateLog(t *testing.T) { cfg := TestStoreConfig(nil) cfg.TestingKnobs.DisableRaftLogQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, cfg) // Populate the log with 10 entries. Save the LastIndex after each write. @@ -860,7 +860,7 @@ func TestTruncateLogRecompute(t *testing.T) { cfg := TestStoreConfig(nil) cfg.TestingKnobs.DisableRaftLogQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, cfg) key := roachpb.Key("a") diff --git a/pkg/kv/kvserver/raft_transport_test.go b/pkg/kv/kvserver/raft_transport_test.go index 207cf6d436a1..4c0ba0aa743c 100644 --- a/pkg/kv/kvserver/raft_transport_test.go +++ b/pkg/kv/kvserver/raft_transport_test.go @@ -120,7 +120,7 @@ func newRaftTransportTestContext(t testing.TB) *raftTransportTestContext { // Ensure that tests using this test context and restart/shut down // their servers do not inadvertently start talking to servers from // unrelated concurrent tests. - rttc.nodeRPCContext.ClusterID.Set(context.TODO(), uuid.MakeV4()) + rttc.nodeRPCContext.ClusterID.Set(context.Background(), uuid.MakeV4()) // We are sharing the same RPC context for all simulated nodes, so // we can't enforce some of the RPC check validation. @@ -135,7 +135,7 @@ func newRaftTransportTestContext(t testing.TB) *raftTransportTestContext { } func (rttc *raftTransportTestContext) Stop() { - rttc.stopper.Stop(context.TODO()) + rttc.stopper.Stop(context.Background()) } // AddNode registers a node with the cluster. Nodes must be added @@ -539,7 +539,7 @@ func TestReopenConnection(t *testing.T) { // Take down the old server and start a new one at the same address. serverTransport.Stop(serverReplica.StoreID) - serverStopper.Stop(context.TODO()) + serverStopper.Stop(context.Background()) // With the old server down, nothing is listening no the address right now // so the circuit breaker should trip. diff --git a/pkg/kv/kvserver/raft_transport_unit_test.go b/pkg/kv/kvserver/raft_transport_unit_test.go index b0bc28275e6f..22f951f76dea 100644 --- a/pkg/kv/kvserver/raft_transport_unit_test.go +++ b/pkg/kv/kvserver/raft_transport_unit_test.go @@ -44,7 +44,7 @@ func TestRaftTransportStartNewQueue(t *testing.T) { st := cluster.MakeTestingClusterSettings() rpcC := rpc.NewContext(log.AmbientContext{}, &base.Config{Insecure: true}, hlc.NewClock(hlc.UnixNano, 500*time.Millisecond), stopper, st) - rpcC.ClusterID.Set(context.TODO(), uuid.MakeV4()) + rpcC.ClusterID.Set(context.Background(), uuid.MakeV4()) // mrs := &dummyMultiRaftServer{} diff --git a/pkg/kv/kvserver/replica_consistency_test.go b/pkg/kv/kvserver/replica_consistency_test.go index ace17653dedf..1bf02167fc77 100644 --- a/pkg/kv/kvserver/replica_consistency_test.go +++ b/pkg/kv/kvserver/replica_consistency_test.go @@ -27,7 +27,7 @@ import ( func TestReplicaChecksumVersion(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() tc := testContext{} stopper := stop.NewStopper() defer stopper.Stop(ctx) diff --git a/pkg/kv/kvserver/replica_sideload_test.go b/pkg/kv/kvserver/replica_sideload_test.go index 80a95ecaf1fa..214ff6942083 100644 --- a/pkg/kv/kvserver/replica_sideload_test.go +++ b/pkg/kv/kvserver/replica_sideload_test.go @@ -627,7 +627,7 @@ func testRaftSSTableSideloadingProposal(t *testing.T, engineInMem, mockSideloade } stopper.AddCloser(tc.engine) } - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) ctx, collect, cancel := tracing.ContextWithRecordingSpan(context.Background(), "test-recording") @@ -963,7 +963,7 @@ func TestRaftSSTableSideloadingTruncation(t *testing.T) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) makeInMemSideloaded(tc.repl) ctx := context.Background() diff --git a/pkg/kv/kvserver/replica_sst_snapshot_storage_test.go b/pkg/kv/kvserver/replica_sst_snapshot_storage_test.go index d309ebb5f0fd..cbe3de01a06c 100644 --- a/pkg/kv/kvserver/replica_sst_snapshot_storage_test.go +++ b/pkg/kv/kvserver/replica_sst_snapshot_storage_test.go @@ -27,7 +27,7 @@ import ( func TestSSTSnapshotStorage(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() testRangeID := roachpb.RangeID(1) testSnapUUID := uuid.Must(uuid.FromBytes([]byte("foobar1234567890"))) testLimiter := rate.NewLimiter(rate.Inf, 0) diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index 1a69dfa8cb92..7a7bf37d4b6e 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -606,7 +606,7 @@ func sendLeaseRequest(r *Replica, l *roachpb.Lease) error { ba.Timestamp = r.store.Clock().Now() ba.Add(&roachpb.RequestLeaseRequest{Lease: *l}) exLease, _ := r.GetLease() - ch, _, _, pErr := r.evalAndPropose(context.TODO(), &ba, allSpansGuard(), &exLease) + ch, _, _, pErr := r.evalAndPropose(context.Background(), &ba, allSpansGuard(), &exLease) if pErr == nil { // Next if the command was committed, wait for the range to apply it. // TODO(bdarnell): refactor this to a more conventional error-handling pattern. @@ -622,14 +622,14 @@ func TestReplicaReadConsistency(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc := testContext{manualClock: hlc.NewManualClock(123)} cfg := TestStoreConfig(hlc.NewClock(tc.manualClock.UnixNano, time.Nanosecond)) cfg.TestingKnobs.DisableAutomaticLeaseRenewal = true tc.StartWithStoreConfig(t, stopper, cfg) - secondReplica, err := tc.addBogusReplicaToRangeDesc(context.TODO()) + secondReplica, err := tc.addBogusReplicaToRangeDesc(context.Background()) if err != nil { t.Fatal(err) } @@ -735,9 +735,9 @@ func TestBehaviorDuringLeaseTransfer(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) - secondReplica, err := tc.addBogusReplicaToRangeDesc(context.TODO()) + secondReplica, err := tc.addBogusReplicaToRangeDesc(context.Background()) if err != nil { t.Fatal(err) } @@ -846,14 +846,14 @@ func TestApplyCmdLeaseError(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc := testContext{manualClock: hlc.NewManualClock(123)} cfg := TestStoreConfig(hlc.NewClock(tc.manualClock.UnixNano, time.Nanosecond)) cfg.TestingKnobs.DisableAutomaticLeaseRenewal = true tc.StartWithStoreConfig(t, stopper, cfg) - secondReplica, err := tc.addBogusReplicaToRangeDesc(context.TODO()) + secondReplica, err := tc.addBogusReplicaToRangeDesc(context.Background()) if err != nil { t.Fatal(err) } @@ -883,7 +883,7 @@ func TestLeaseReplicaNotInDesc(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) lease, _ := tc.repl.GetLease() @@ -917,7 +917,7 @@ func TestReplicaRangeBoundsChecking(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := roachpb.RKey("a") @@ -957,7 +957,7 @@ func TestReplicaLease(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) var filterErr atomic.Value applyFilter := func(args kvserverbase.ApplyFilterArgs) (int, *roachpb.Error) { @@ -972,7 +972,7 @@ func TestReplicaLease(t *testing.T) { tsc.TestingKnobs.DisableAutomaticLeaseRenewal = true tsc.TestingKnobs.TestingApplyFilter = applyFilter tc.StartWithStoreConfig(t, stopper, tsc) - secondReplica, err := tc.addBogusReplicaToRangeDesc(context.TODO()) + secondReplica, err := tc.addBogusReplicaToRangeDesc(context.Background()) if err != nil { t.Fatal(err) } @@ -1039,14 +1039,14 @@ func TestReplicaNotLeaseHolderError(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc := testContext{manualClock: hlc.NewManualClock(123)} cfg := TestStoreConfig(hlc.NewClock(tc.manualClock.UnixNano, time.Nanosecond)) cfg.TestingKnobs.DisableAutomaticLeaseRenewal = true tc.StartWithStoreConfig(t, stopper, cfg) - secondReplica, err := tc.addBogusReplicaToRangeDesc(context.TODO()) + secondReplica, err := tc.addBogusReplicaToRangeDesc(context.Background()) if err != nil { t.Fatal(err) } @@ -1096,7 +1096,7 @@ func TestReplicaLeaseCounters(t *testing.T) { defer leaktest.AfterTest(t)() defer EnableLeaseHistory(100)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) var tc testContext cfg := TestStoreConfig(nil) @@ -1195,14 +1195,14 @@ func TestReplicaGossipConfigsOnLease(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc := testContext{manualClock: hlc.NewManualClock(123)} cfg := TestStoreConfig(hlc.NewClock(tc.manualClock.UnixNano, time.Nanosecond)) cfg.TestingKnobs.DisableAutomaticLeaseRenewal = true tc.StartWithStoreConfig(t, stopper, cfg) - secondReplica, err := tc.addBogusReplicaToRangeDesc(context.TODO()) + secondReplica, err := tc.addBogusReplicaToRangeDesc(context.Background()) if err != nil { t.Fatal(err) } @@ -1281,7 +1281,7 @@ func TestReplicaTSCacheLowWaterOnLease(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc := testContext{manualClock: hlc.NewManualClock(123)} cfg := TestStoreConfig(hlc.NewClock(tc.manualClock.UnixNano, time.Nanosecond)) @@ -1290,7 +1290,7 @@ func TestReplicaTSCacheLowWaterOnLease(t *testing.T) { cfg.TestingKnobs.DisableRaftLogQueue = true tc.StartWithStoreConfig(t, stopper, cfg) - secondReplica, err := tc.addBogusReplicaToRangeDesc(context.TODO()) + secondReplica, err := tc.addBogusReplicaToRangeDesc(context.Background()) if err != nil { t.Fatal(err) } @@ -1365,7 +1365,7 @@ func TestReplicaLeaseRejectUnknownRaftNodeID(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc := testContext{manualClock: hlc.NewManualClock(123)} cfg := TestStoreConfig(hlc.NewClock(tc.manualClock.UnixNano, time.Nanosecond)) @@ -1405,7 +1405,7 @@ func TestReplicaDrainLease(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // Acquire initial lease. @@ -1436,7 +1436,7 @@ func TestReplicaGossipFirstRange(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) for _, key := range []string{gossip.KeyClusterID, gossip.KeyFirstRangeDescriptor, gossip.KeySentinel} { bytes, err := tc.gossip.GetInfo(key) @@ -1463,7 +1463,7 @@ func TestReplicaGossipAllConfigs(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) if cfg := tc.gossip.GetSystemConfig(); cfg == nil { t.Fatal("config not set") @@ -1476,7 +1476,7 @@ func TestReplicaNoGossipConfig(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // Write some arbitrary data in the system span (up to, but not including MaxReservedID+1) @@ -1521,7 +1521,7 @@ func TestReplicaNoGossipFromNonLeader(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // Write some arbitrary data in the system span (up to, but not including MaxReservedID+1) @@ -1806,7 +1806,7 @@ func TestOptimizePuts(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) pArgs := make([]roachpb.PutRequest, optimizePutThreshold) @@ -2050,7 +2050,7 @@ func TestAcquireLease(t *testing.T) { testutils.RunTrueAndFalse(t, "withMinLeaseProposedTS", func(t *testing.T, withMinLeaseProposedTS bool) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) lease, _ := tc.repl.GetLease() @@ -2123,7 +2123,7 @@ func TestLeaseConcurrent(t *testing.T) { const origMsg = "boom" testutils.RunTrueAndFalse(t, "withError", func(t *testing.T, withError bool) { stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) var seen int32 var active int32 @@ -2210,7 +2210,7 @@ func TestReplicaUpdateTSCache(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) startNanos := tc.Clock().Now().WallTime @@ -2324,7 +2324,7 @@ func TestReplicaLatching(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) defer close(blockingDone) // make sure teardown can happen @@ -2496,7 +2496,7 @@ func TestReplicaLatchingInconsistent(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) cmd1Done := make(chan *roachpb.Error) go func() { @@ -2545,7 +2545,7 @@ func TestReplicaLatchingSelfOverlap(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) testutils.RunTrueAndFalse(t, "cmd1Read", func(t *testing.T, cmd1Read bool) { @@ -2556,7 +2556,7 @@ func TestReplicaLatchingSelfOverlap(t *testing.T) { ba.Add(readOrWriteArgs(roachpb.Key(key), cmd2Read)) // Set a deadline for nicer error behavior on deadlock. - ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _, pErr := tc.Sender().Send(ctx, ba) if pErr != nil { @@ -2601,7 +2601,7 @@ func TestReplicaLatchingTimestampNonInterference(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) testCases := []struct { @@ -2739,7 +2739,7 @@ func TestReplicaUseTSCache(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // Set clock to time 1s and do the read. t0 := 1 * time.Second @@ -2832,7 +2832,7 @@ func TestConditionalPutUpdatesTSCacheOnError(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{manualClock: hlc.NewManualClock(123)} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) cfg := TestStoreConfig(hlc.NewClock(tc.manualClock.UnixNano, time.Nanosecond)) cfg.TestingKnobs.DontPushOnWriteIntentError = true tc.StartWithStoreConfig(t, stopper, cfg) @@ -2914,7 +2914,7 @@ func TestInitPutUpdatesTSCacheOnError(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{manualClock: hlc.NewManualClock(123)} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) cfg := TestStoreConfig(hlc.NewClock(tc.manualClock.UnixNano, time.Nanosecond)) cfg.TestingKnobs.DontPushOnWriteIntentError = true tc.StartWithStoreConfig(t, stopper, cfg) @@ -3013,7 +3013,7 @@ func TestReplicaNoTSCacheInconsistent(t *testing.T) { t.Run(rc.String(), func(t *testing.T) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // Set clock to time 1s and do the read. t0 := 1 * time.Second @@ -3052,7 +3052,7 @@ func TestReplicaNoTSCacheUpdateOnFailure(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) cfg := TestStoreConfig(nil) cfg.TestingKnobs.DontPushOnWriteIntentError = true tc.StartWithStoreConfig(t, stopper, cfg) @@ -3104,7 +3104,7 @@ func TestReplicaNoTimestampIncrementWithinTxn(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // Test for both read & write attempts. @@ -3177,7 +3177,7 @@ func TestReplicaAbortSpanReadError(t *testing.T) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) k := []byte("a") @@ -3217,7 +3217,7 @@ func TestReplicaAbortSpanOnlyWithIntent(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) txn := newTransaction("test", []byte("test"), 10, tc.Clock()) @@ -3245,7 +3245,7 @@ func TestReplicaTxnIdempotency(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) runWithTxn := func(txn *roachpb.Transaction, reqs ...roachpb.Request) error { @@ -3674,7 +3674,7 @@ func TestEndTxnDeadline(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // 4 cases: no deadline, past deadline, equal deadline, future deadline. @@ -3741,7 +3741,7 @@ func TestSerializableDeadline(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // Create our txn. It will be pushed next. @@ -3791,7 +3791,7 @@ func TestCreateTxnRecordAfterPushAndGC(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := roachpb.Key("a") @@ -3888,7 +3888,7 @@ func TestEndTxnDeadline_1PC(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := roachpb.Key("a") @@ -3919,7 +3919,7 @@ func Test1PCTransactionWriteTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := roachpb.Key("key") @@ -3957,7 +3957,7 @@ func TestEndTxnWithMalformedSplitTrigger(t *testing.T) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := roachpb.Key("foo") @@ -4008,7 +4008,7 @@ func TestEndTxnBeforeHeartbeat(t *testing.T) { defer setTxnAutoGC(false)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := []byte("a") @@ -4058,7 +4058,7 @@ func TestEndTxnAfterHeartbeat(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := roachpb.Key("a") @@ -4112,7 +4112,7 @@ func TestEndTxnWithPushedTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) testCases := []struct { @@ -4175,7 +4175,7 @@ func TestEndTxnWithIncrementedEpoch(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := []byte("a") @@ -4315,7 +4315,7 @@ func TestEndTxnRollbackAbortedTransaction(t *testing.T) { testutils.RunTrueAndFalse(t, "populateAbortSpan", func(t *testing.T, populateAbortSpan bool) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) cfg := TestStoreConfig(nil) cfg.TestingKnobs.DontPushOnWriteIntentError = true tc.StartWithStoreConfig(t, stopper, cfg) @@ -4325,7 +4325,7 @@ func TestEndTxnRollbackAbortedTransaction(t *testing.T) { put := putArgs(key, key) assignSeqNumsForReqs(txn, &put) if _, pErr := kv.SendWrappedWith( - context.TODO(), tc.Sender(), roachpb.Header{Txn: txn}, &put, + context.Background(), tc.Sender(), roachpb.Header{Txn: txn}, &put, ); pErr != nil { t.Fatal(pErr) } @@ -4356,7 +4356,7 @@ func TestEndTxnRollbackAbortedTransaction(t *testing.T) { var txnRecord roachpb.Transaction txnKey := keys.TransactionKey(txn.Key, txn.ID) if ok, err := storage.MVCCGetProto( - context.TODO(), tc.repl.store.Engine(), + context.Background(), tc.repl.store.Engine(), txnKey, hlc.Timestamp{}, &txnRecord, storage.MVCCGetOptions{}, ); err != nil { t.Fatal(err) @@ -4364,7 +4364,7 @@ func TestEndTxnRollbackAbortedTransaction(t *testing.T) { t.Fatalf("unexpected txn record %v", txnRecord) } - if pErr := tc.store.intentResolver.ResolveIntents(context.TODO(), + if pErr := tc.store.intentResolver.ResolveIntents(context.Background(), []roachpb.LockUpdate{ roachpb.MakeLockUpdate(&txnRecord, roachpb.Span{Key: key}), }, intentresolver.ResolveOptions{Poison: true}); pErr != nil { @@ -4485,7 +4485,7 @@ func TestReplicaLaziness(t *testing.T) { testWithAction := func(action func() roachpb.Request) { tc := testContext{bootstrapMode: bootstrapRangeOnly} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) if status := tc.repl.RaftStatus(); status != nil { @@ -4534,7 +4534,7 @@ func TestBatchRetryCantCommitIntents(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) cfg := TestStoreConfig(nil) cfg.TestingKnobs.DontPushOnWriteIntentError = true tc.StartWithStoreConfig(t, stopper, cfg) @@ -4644,7 +4644,7 @@ func TestEndTxnLocalGC(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) splitKey := roachpb.RKey("c") @@ -4755,7 +4755,7 @@ func TestEndTxnResolveOnlyLocalIntents(t *testing.T) { } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) newRepl, txn := setupResolutionTest(t, tc, key, splitKey, true /* commit */) @@ -4804,7 +4804,7 @@ func TestEndTxnDirectGC(t *testing.T) { func() { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) ctx := logtags.AddTag(context.Background(), "testcase", i) @@ -4867,7 +4867,7 @@ func TestEndTxnDirectGCFailure(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) setupResolutionTest(t, tc, key, splitKey, true /* commit */) @@ -4880,7 +4880,7 @@ func TestEndTxnDirectGCFailure(t *testing.T) { testutils.SucceedsSoon(t, func() error { if atomic.LoadInt64(&count) == 0 { return errors.Errorf("intent resolution not attempted yet") - } else if err := tc.store.DB().Put(context.TODO(), "panama", "banana"); err != nil { + } else if err := tc.store.DB().Put(context.Background(), "panama", "banana"); err != nil { return err } return nil @@ -4895,7 +4895,7 @@ func TestEndTxnDirectGC_1PC(t *testing.T) { func() { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := roachpb.Key("a") @@ -4947,7 +4947,7 @@ func TestReplicaTransactionRequires1PC(t *testing.T) { } tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) testCases := []struct { @@ -5014,7 +5014,7 @@ func TestReplicaEndTxnWithRequire1PC(t *testing.T) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := roachpb.Key("a") @@ -5055,7 +5055,7 @@ func TestAbortSpanPoisonOnResolve(t *testing.T) { run := func(abort bool) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) pusher := newTransaction("test", key, 1, tc.Clock()) @@ -5165,7 +5165,7 @@ func TestAbortSpanError(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) txn := roachpb.Transaction{} @@ -5206,7 +5206,7 @@ func TestPushTxnBadKey(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) pusher := newTransaction("test", roachpb.Key("a"), 1, tc.Clock()) @@ -5243,7 +5243,7 @@ func TestPushTxnAlreadyCommittedOrAborted(t *testing.T) { testutils.RunTrueAndFalse(t, "commit", func(t *testing.T, commit bool) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := roachpb.Key(fmt.Sprintf("key-%t-%t", autoGC, commit)) @@ -5296,7 +5296,7 @@ func TestPushTxnUpgradeExistingTxn(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) now := tc.Clock().Now() @@ -5360,7 +5360,7 @@ func TestPushTxnQueryPusheeHasNewerVersion(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) cfg := TestStoreConfig(nil) cfg.TestingKnobs.DontRetryPushTxnFailures = true tc.StartWithStoreConfig(t, stopper, cfg) @@ -5403,7 +5403,7 @@ func TestPushTxnHeartbeatTimeout(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{manualClock: hlc.NewManualClock(123)} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) cfg := TestStoreConfig(hlc.NewClock(tc.manualClock.UnixNano, time.Nanosecond)) cfg.TestingKnobs.DontRetryPushTxnFailures = true cfg.TestingKnobs.DontRecoverIndeterminateCommits = true @@ -5552,7 +5552,7 @@ func TestResolveIntentPushTxnReplyTxn(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) b := tc.engine.NewBatch() @@ -5605,7 +5605,7 @@ func TestPushTxnPriorities(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) cfg := TestStoreConfig(nil) cfg.TestingKnobs.DontRetryPushTxnFailures = true tc.StartWithStoreConfig(t, stopper, cfg) @@ -5680,7 +5680,7 @@ func TestPushTxnPushTimestamp(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) pusher := newTransaction("test", roachpb.Key("a"), 1, tc.Clock()) @@ -5724,7 +5724,7 @@ func TestPushTxnPushTimestampAlreadyPushed(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) pusher := newTransaction("test", roachpb.Key("a"), 1, tc.Clock()) @@ -5836,7 +5836,7 @@ func TestQueryIntentRequest(t *testing.T) { testutils.RunTrueAndFalse(t, "errIfMissing", func(t *testing.T, errIfMissing bool) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key1 := roachpb.Key("a") @@ -5958,7 +5958,7 @@ func TestReplicaResolveIntentRange(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) keys := []roachpb.Key{roachpb.Key("a"), roachpb.Key("b")} @@ -6023,7 +6023,7 @@ func TestRangeStatsComputation(t *testing.T) { bootstrapMode: bootstrapRangeOnly, } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) baseStats := initialStats() @@ -6142,7 +6142,7 @@ func TestMerge(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := []byte("mergedkey") @@ -6204,7 +6204,7 @@ func TestConditionFailedError(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := []byte("k") @@ -6258,7 +6258,7 @@ func TestAppliedIndex(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) var appliedIndex uint64 @@ -6309,7 +6309,7 @@ func TestReplicaCorruption(t *testing.T) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) // First send a regular command. @@ -6342,7 +6342,7 @@ func TestChangeReplicasDuplicateError(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) chgs := roachpb.MakeReplicationChanges(roachpb.ADD_REPLICA, roachpb.ReplicationTarget{ @@ -6595,7 +6595,7 @@ func TestRangeLookup(t *testing.T) { func TestRequestLeaderEncounterGroupDeleteError(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Mock propose to return a roachpb.RaftGroupDeletedError. var active int32 @@ -6697,7 +6697,7 @@ func TestBatchErrorWithIndex(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) ba := roachpb.BatchRequest{} @@ -6731,7 +6731,7 @@ func TestReplicaLoadSystemConfigSpanIntent(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) scStartSddr, err := keys.Addr(keys.SystemConfigSpan.Key) if err != nil { @@ -6842,7 +6842,7 @@ func TestQuotaPoolReleasedOnFailedProposal(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // Flush a write all the way through the Raft proposal pipeline to ensure @@ -6886,7 +6886,7 @@ func TestQuotaPoolAccessOnDestroyedReplica(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) repl, err := tc.store.GetReplica(1) @@ -6923,7 +6923,7 @@ func TestEntries(t *testing.T) { cfg.RaftTickInterval = math.MaxInt32 cfg.TestingKnobs.DisableRaftLogQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, cfg) repl := tc.repl @@ -7074,7 +7074,7 @@ func TestTerm(t *testing.T) { tsc := TestStoreConfig(nil) tsc.TestingKnobs.DisableRaftLogQueue = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) repl := tc.repl @@ -7157,7 +7157,7 @@ func TestGCIncorrectRange(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // Split range into two ranges. @@ -7248,7 +7248,7 @@ func TestReplicaCancelRaft(t *testing.T) { } tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, cfg) if cancelEarly { cancel() @@ -7288,7 +7288,7 @@ func TestReplicaCancelRaft(t *testing.T) { func TestReplicaAbandonProposal(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc := testContext{} tc.Start(t, stopper) @@ -7488,7 +7488,7 @@ func TestSyncSnapshot(t *testing.T) { tsc := TestStoreConfig(nil) tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) // With enough time in BlockingSnapshotDuration, we succeed on the @@ -7508,10 +7508,10 @@ func TestSyncSnapshot(t *testing.T) { func TestReplicaRetryRaftProposal(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() var tc testContext stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) type magicKey struct{} @@ -7677,7 +7677,7 @@ func TestReplicaBurstPendingCommandsAndRepropose(t *testing.T) { cfg.TestingKnobs.DisableRefreshReasonNewLeader = true cfg.TestingKnobs.DisableRefreshReasonNewLeaderOrConfigChange = true stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, cfg) type magicKey struct{} @@ -7773,7 +7773,7 @@ func TestReplicaRefreshPendingCommandsTicks(t *testing.T) { // Disable ticks which would interfere with the manual ticking in this test. cfg.RaftTickInterval = math.MaxInt32 stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, cfg) // Flush a write all the way through the Raft proposal pipeline. This @@ -8232,7 +8232,7 @@ func TestCommandTimeThreshold(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) now := tc.Clock().Now() @@ -8310,10 +8310,10 @@ func TestReplicaTimestampCacheBumpNotLost(t *testing.T) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) - ctx := tc.store.AnnotateCtx(context.TODO()) + ctx := tc.store.AnnotateCtx(context.Background()) key := keys.LocalMax txn := newTransaction("test", key, 1, tc.Clock()) @@ -8367,10 +8367,10 @@ func TestReplicaEvaluationNotTxnMutation(t *testing.T) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) - ctx := tc.repl.AnnotateCtx(context.TODO()) + ctx := tc.repl.AnnotateCtx(context.Background()) key := keys.LocalMax txn := newTransaction("test", key, 1, tc.Clock()) @@ -8446,7 +8446,7 @@ func TestReplicaMetrics(t *testing.T) { var tc testContext stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) cfg := TestStoreConfig(nil) tc.StartWithStoreConfig(t, stopper, cfg) @@ -8951,7 +8951,7 @@ func TestCommandTooLarge(t *testing.T) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) st := tc.store.cfg.Settings @@ -8986,10 +8986,10 @@ func TestErrorInRaftApplicationClearsIntents(t *testing.T) { } s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{Store: &storeKnobs}}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) splitKey := roachpb.Key("b") - if err := kvDB.AdminSplit(context.TODO(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); err != nil { + if err := kvDB.AdminSplit(context.Background(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); err != nil { t.Fatal(err) } @@ -9059,7 +9059,7 @@ func TestProposeWithAsyncConsensus(t *testing.T) { } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) repl := tc.repl @@ -9470,7 +9470,7 @@ func TestReplicaRecomputeStats(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) key := roachpb.RKey("a") @@ -9591,7 +9591,7 @@ func TestReplicaServersideRefreshes(t *testing.T) { // have to use distinct keys. tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // Increment the clock so that all the transactions in the tests run at a @@ -10126,7 +10126,7 @@ func TestReplicaPushed1PC(t *testing.T) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) ctx := context.Background() diff --git a/pkg/kv/kvserver/replicate_queue_test.go b/pkg/kv/kvserver/replicate_queue_test.go index 5f13f3c627d2..d98cd975cc5f 100644 --- a/pkg/kv/kvserver/replicate_queue_test.go +++ b/pkg/kv/kvserver/replicate_queue_test.go @@ -68,7 +68,7 @@ func testReplicateQueueRebalanceInner(t *testing.T, atomic bool) { }, }, ) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) for _, server := range tc.Servers { st := server.ClusterSettings() diff --git a/pkg/kv/kvserver/scanner_test.go b/pkg/kv/kvserver/scanner_test.go index 87d65bcf3bb4..c2a8e1e8b873 100644 --- a/pkg/kv/kvserver/scanner_test.go +++ b/pkg/kv/kvserver/scanner_test.go @@ -120,7 +120,7 @@ func (tq *testQueue) setDisabled(d bool) { } func (tq *testQueue) Start(stopper *stop.Stopper) { - stopper.RunWorker(context.TODO(), func(context.Context) { + stopper.RunWorker(context.Background(), func(context.Context) { for { select { case <-time.After(1 * time.Millisecond): @@ -229,7 +229,7 @@ func TestScannerAddToQueues(t *testing.T) { }) // Stop scanner and verify both queues are stopped. - stopper.Stop(context.TODO()) + stopper.Stop(context.Background()) if !q1.isDone() || !q2.isDone() { t.Errorf("expected all queues to stop; got %t, %t", q1.isDone(), q2.isDone()) } @@ -257,7 +257,7 @@ func TestScannerTiming(t *testing.T) { stopper := stop.NewStopper() s.Start(stopper) time.Sleep(runTime) - stopper.Stop(context.TODO()) + stopper.Stop(context.Background()) avg := s.avgScan() log.Infof(context.Background(), "%d: average scan: %s", i, avg) @@ -336,7 +336,7 @@ func TestScannerDisabled(t *testing.T) { s := newReplicaScanner(makeAmbCtx(), clock, 1*time.Millisecond, 0, 0, ranges) s.AddQueues(q) stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) s.Start(stopper) // Verify queue gets all ranges. @@ -399,7 +399,7 @@ func TestScannerEmptyRangeSet(t *testing.T) { s := newReplicaScanner(makeAmbCtx(), clock, time.Hour, 0, 0, ranges) s.AddQueues(q) stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) s.Start(stopper) time.Sleep(time.Millisecond) // give it some time to (not) busy loop if count := s.scanCount(); count > 1 { diff --git a/pkg/kv/kvserver/scheduler_test.go b/pkg/kv/kvserver/scheduler_test.go index e450c880c439..0bf3168eee45 100644 --- a/pkg/kv/kvserver/scheduler_test.go +++ b/pkg/kv/kvserver/scheduler_test.go @@ -192,7 +192,7 @@ func TestSchedulerLoop(t *testing.T) { p := newTestProcessor() s := newRaftScheduler(nil, p, 1) stopper := stop.NewStopper() - ctx := context.TODO() + ctx := context.Background() defer stopper.Stop(ctx) s.Start(ctx, stopper) s.EnqueueRaftTick(1, 2, 3) @@ -214,7 +214,7 @@ func TestSchedulerBuffering(t *testing.T) { p := newTestProcessor() s := newRaftScheduler(nil, p, 1) stopper := stop.NewStopper() - ctx := context.TODO() + ctx := context.Background() defer stopper.Stop(ctx) s.Start(ctx, stopper) diff --git a/pkg/kv/kvserver/single_key_test.go b/pkg/kv/kvserver/single_key_test.go index 0063df4b526b..26337980edc2 100644 --- a/pkg/kv/kvserver/single_key_test.go +++ b/pkg/kv/kvserver/single_key_test.go @@ -40,7 +40,7 @@ func TestSingleKey(t *testing.T) { base.TestClusterArgs{ ReplicationMode: base.ReplicationAuto, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) ctx := context.Background() // Initialize the value for our test key to zero. diff --git a/pkg/kv/kvserver/split/finder_test.go b/pkg/kv/kvserver/split/finder_test.go index 98fb9a260167..6f6783868e78 100644 --- a/pkg/kv/kvserver/split/finder_test.go +++ b/pkg/kv/kvserver/split/finder_test.go @@ -28,7 +28,7 @@ import ( func TestSplitFinderKey(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) const ReservoirKeyOffset = 1000 @@ -165,7 +165,7 @@ func TestSplitFinderKey(t *testing.T) { func TestSplitFinderRecorder(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) const ReservoirKeyOffset = 1000 diff --git a/pkg/kv/kvserver/split_queue_test.go b/pkg/kv/kvserver/split_queue_test.go index 2ac8f305bb1b..26e22036f09d 100644 --- a/pkg/kv/kvserver/split_queue_test.go +++ b/pkg/kv/kvserver/split_queue_test.go @@ -31,7 +31,7 @@ func TestSplitQueueShouldQueue(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // Set zone configs. diff --git a/pkg/kv/kvserver/stateloader/initial_test.go b/pkg/kv/kvserver/stateloader/initial_test.go index 3424b80f578b..edf1a209069b 100644 --- a/pkg/kv/kvserver/stateloader/initial_test.go +++ b/pkg/kv/kvserver/stateloader/initial_test.go @@ -26,7 +26,7 @@ import ( func TestSynthesizeHardState(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) eng := storage.NewDefaultInMem() stopper.AddCloser(eng) diff --git a/pkg/kv/kvserver/stats_test.go b/pkg/kv/kvserver/stats_test.go index e49b2b8abef2..58faf264f3e9 100644 --- a/pkg/kv/kvserver/stats_test.go +++ b/pkg/kv/kvserver/stats_test.go @@ -37,7 +37,7 @@ func TestRangeStatsEmpty(t *testing.T) { bootstrapMode: bootstrapRangeOnly, } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) ms := tc.repl.GetMVCCStats() @@ -50,7 +50,7 @@ func TestRangeStatsInit(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) ms := enginepb.MVCCStats{ LiveBytes: 1, diff --git a/pkg/kv/kvserver/store_pool_test.go b/pkg/kv/kvserver/store_pool_test.go index b86d509dd6a8..45227b8599e9 100644 --- a/pkg/kv/kvserver/store_pool_test.go +++ b/pkg/kv/kvserver/store_pool_test.go @@ -127,7 +127,7 @@ func TestStorePoolGossipUpdate(t *testing.T) { TestTimeUntilStoreDead, false, /* deterministic */ func() int { return 0 }, /* NodeCount */ kvserverpb.NodeLivenessStatus_DEAD) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) sg := gossiputil.NewStoreGossiper(g) sp.detailsMu.RLock() @@ -195,7 +195,7 @@ func TestStorePoolGetStoreList(t *testing.T) { TestTimeUntilStoreDead, false, /* deterministic */ func() int { return 10 }, /* nodeCount */ kvserverpb.NodeLivenessStatus_DEAD) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) sg := gossiputil.NewStoreGossiper(g) constraints := []zonepb.ConstraintsConjunction{ { @@ -452,7 +452,7 @@ func TestStorePoolUpdateLocalStore(t *testing.T) { TestTimeUntilStoreDead, false, /* deterministic */ func() int { return 10 }, /* nodeCount */ kvserverpb.NodeLivenessStatus_DEAD) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) sg := gossiputil.NewStoreGossiper(g) stores := []*roachpb.StoreDescriptor{ { @@ -623,7 +623,7 @@ func TestStorePoolGetStoreDetails(t *testing.T) { TestTimeUntilStoreDead, false, /* deterministic */ func() int { return 10 }, /* nodeCount */ kvserverpb.NodeLivenessStatus_DEAD) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) sg := gossiputil.NewStoreGossiper(g) sg.GossipStores(uniqueStore, t) @@ -643,7 +643,7 @@ func TestStorePoolFindDeadReplicas(t *testing.T) { TestTimeUntilStoreDead, false, /* deterministic */ func() int { return 10 }, /* nodeCount */ kvserverpb.NodeLivenessStatus_DEAD) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) sg := gossiputil.NewStoreGossiper(g) stores := []*roachpb.StoreDescriptor{ @@ -746,7 +746,7 @@ func TestStorePoolDefaultState(t *testing.T) { TestTimeUntilStoreDead, false, /* deterministic */ func() int { return 10 }, /* nodeCount */ kvserverpb.NodeLivenessStatus_DEAD) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) liveReplicas, deadReplicas := sp.liveAndDeadReplicas(0, []roachpb.ReplicaDescriptor{{StoreID: 1}}) if len(liveReplicas) != 0 || len(deadReplicas) != 0 { @@ -771,7 +771,7 @@ func TestStorePoolThrottle(t *testing.T) { TestTimeUntilStoreDead, false, /* deterministic */ func() int { return 10 }, /* nodeCount */ kvserverpb.NodeLivenessStatus_DEAD) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) sg := gossiputil.NewStoreGossiper(g) sg.GossipStores(uniqueStore, t) @@ -809,7 +809,7 @@ func TestGetLocalities(t *testing.T) { TestTimeUntilStoreDead, false, /* deterministic */ func() int { return 10 }, /* nodeCount */ kvserverpb.NodeLivenessStatus_DEAD) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) sg := gossiputil.NewStoreGossiper(g) // Creates a node with a locality with the number of tiers passed in. The @@ -880,7 +880,7 @@ func TestStorePoolDecommissioningReplicas(t *testing.T) { TestTimeUntilStoreDead, false, /* deterministic */ func() int { return 10 }, /* nodeCount */ kvserverpb.NodeLivenessStatus_DEAD) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) sg := gossiputil.NewStoreGossiper(g) stores := []*roachpb.StoreDescriptor{ diff --git a/pkg/kv/kvserver/store_test.go b/pkg/kv/kvserver/store_test.go index e4d30ab9e3f0..b16ca22351a7 100644 --- a/pkg/kv/kvserver/store_test.go +++ b/pkg/kv/kvserver/store_test.go @@ -81,7 +81,7 @@ func (s *Store) TestSender() kv.Sender { // that. key, err := keys.Addr(ba.Requests[0].GetInner().Header().Key) if err != nil { - log.Fatalf(context.TODO(), "%v", err) + log.Fatalf(context.Background(), "%v", err) } ba.RangeID = roachpb.RangeID(1) @@ -229,12 +229,12 @@ func createTestStoreWithoutStart( cfg.Transport = NewDummyRaftTransport(cfg.Settings) factory := &testSenderFactory{} cfg.DB = kv.NewDB(cfg.AmbientCtx, factory, cfg.Clock) - store := NewStore(context.TODO(), *cfg, eng, &roachpb.NodeDescriptor{NodeID: 1}) + store := NewStore(context.Background(), *cfg, eng, &roachpb.NodeDescriptor{NodeID: 1}) factory.setStore(store) require.NoError(t, WriteClusterVersion(context.Background(), eng, clusterversion.TestingClusterVersion)) if err := InitEngine( - context.TODO(), eng, roachpb.StoreIdent{NodeID: 1, StoreID: 1}, + context.Background(), eng, roachpb.StoreIdent{NodeID: 1, StoreID: 1}, ); err != nil { t.Fatal(err) } @@ -250,7 +250,7 @@ func createTestStoreWithoutStart( }) } if err := WriteInitialClusterData( - context.TODO(), eng, kvs, /* initialValues */ + context.Background(), eng, kvs, /* initialValues */ clusterversion.TestingBinaryVersion, 1 /* numStores */, splits, cfg.Clock.PhysicalNow(), ); err != nil { @@ -425,7 +425,7 @@ func TestStoreInitAndBootstrap(t *testing.T) { // We need a fixed clock to avoid LastUpdateNanos drifting on us. cfg := TestStoreConfig(hlc.NewClock(func() int64 { return 123 }, time.Nanosecond)) stopper := stop.NewStopper() - ctx := context.TODO() + ctx := context.Background() defer stopper.Stop(ctx) eng := storage.NewDefaultInMem() stopper.AddCloser(eng) @@ -500,7 +500,7 @@ func TestStoreInitAndBootstrap(t *testing.T) { func TestInitializeEngineErrors(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - ctx := context.TODO() + ctx := context.Background() defer stopper.Stop(ctx) eng := storage.NewDefaultInMem() stopper.AddCloser(eng) @@ -557,7 +557,7 @@ func createReplica(s *Store, rangeID roachpb.RangeID, start, end roachpb.RKey) * func TestStoreAddRemoveRanges(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{ // This test was written before test stores could start with more than one @@ -633,7 +633,7 @@ func TestStoreAddRemoveRanges(t *testing.T) { func TestReplicasByKey(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{ // This test was written before test stores could start with more than one @@ -681,7 +681,7 @@ func TestReplicasByKey(t *testing.T) { func TestStoreRemoveReplicaDestroy(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) repl1, err := store.GetReplica(1) @@ -718,7 +718,7 @@ func TestStoreRemoveReplicaDestroy(t *testing.T) { func TestStoreReplicaVisitor(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{ // This test was written before test stores could start with more than one @@ -800,7 +800,7 @@ func TestStoreReplicaVisitor(t *testing.T) { func TestHasOverlappingReplica(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{ // This test was written before test stores could start with more than one @@ -934,7 +934,7 @@ func TestLookupPrecedingReplica(t *testing.T) { func TestMaybeMarkReplicaInitialized(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{ // This test was written before test stores could start with more than one @@ -973,7 +973,7 @@ func TestMaybeMarkReplicaInitialized(t *testing.T) { defer store.mu.Unlock() expectedResult := "attempted to process uninitialized range.*" - ctx := r.AnnotateCtx(context.TODO()) + ctx := r.AnnotateCtx(context.Background()) if err := store.maybeMarkReplicaInitializedLocked(ctx, r); !testutils.IsError(err, expectedResult) { t.Errorf("expected maybeMarkReplicaInitializedLocked with uninitialized replica to fail, got %v", err) } @@ -1006,7 +1006,7 @@ func TestMaybeMarkReplicaInitialized(t *testing.T) { func TestStoreSend(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) gArgs := getArgs([]byte("a")) @@ -1085,7 +1085,7 @@ func TestStoreObservedTimestamp(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg) txn := newTransaction("test", test.key, 1, store.cfg.Clock) txn.MaxTimestamp = hlc.MaxTimestamp @@ -1178,7 +1178,7 @@ func TestStoreAnnotateNow(t *testing.T) { func TestStoreVerifyKeys(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) // Try a start key == KeyMax. gArgs := getArgs(roachpb.KeyMax) @@ -1236,7 +1236,7 @@ func TestStoreVerifyKeys(t *testing.T) { func TestStoreSendUpdateTime(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) args := getArgs([]byte("a")) reqTS := store.cfg.Clock.Now().Add(store.cfg.Clock.MaxOffset().Nanoseconds(), 0) @@ -1255,7 +1255,7 @@ func TestStoreSendUpdateTime(t *testing.T) { func TestStoreSendWithZeroTime(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) args := getArgs([]byte("a")) @@ -1279,7 +1279,7 @@ func TestStoreSendWithZeroTime(t *testing.T) { func TestStoreSendWithClockOffset(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) args := getArgs([]byte("a")) // Set args timestamp to exceed max offset. @@ -1294,7 +1294,7 @@ func TestStoreSendWithClockOffset(t *testing.T) { func TestStoreSendBadRange(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) args := getArgs([]byte("0")) if _, pErr := kv.SendWrappedWith(context.Background(), store.TestSender(), roachpb.Header{ @@ -1329,7 +1329,7 @@ func splitTestRange(store *Store, key, splitKey roachpb.RKey, t *testing.T) *Rep require.NoError(t, err) newLeftDesc := *repl.Desc() newLeftDesc.EndKey = splitKey - err = store.SplitRange(repl.AnnotateCtx(context.TODO()), repl, newRng, &roachpb.SplitTrigger{ + err = store.SplitRange(repl.AnnotateCtx(context.Background()), repl, newRng, &roachpb.SplitTrigger{ RightDesc: *rhsDesc, LeftDesc: newLeftDesc, }) @@ -1342,7 +1342,7 @@ func splitTestRange(store *Store, key, splitKey roachpb.RKey, t *testing.T) *Rep func TestStoreSendOutOfRange(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) repl2 := splitTestRange(store, roachpb.RKeyMin, roachpb.RKey(roachpb.Key("b")), t) @@ -1396,7 +1396,7 @@ func TestStoreRangeIDAllocation(t *testing.T) { func TestStoreReplicasByKey(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{ // This test was written before test stores could start with more than one @@ -1446,7 +1446,7 @@ func TestStoreReplicasByKey(t *testing.T) { func TestStoreSetRangesMaxBytes(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) cfg := TestStoreConfig(nil) cfg.TestingKnobs.DisableMergeQueue = true store := createTestStoreWithConfig(t, stopper, @@ -1520,7 +1520,7 @@ func TestStoreResolveWriteIntent(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg) for i, resolvable := range []bool{true, false} { @@ -1589,7 +1589,7 @@ func TestStoreResolveWriteIntent(t *testing.T) { func TestStoreResolveWriteIntentRollback(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) key := roachpb.Key("a") @@ -1815,7 +1815,7 @@ func TestStoreResolveWriteIntentPushOnRead(t *testing.T) { func TestStoreResolveWriteIntentNoTxn(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) key := roachpb.Key("a") @@ -1918,7 +1918,7 @@ func TestStoreReadInconsistent(t *testing.T) { // automatic cleanup for this to work. defer setTxnAutoGC(false)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) for _, canPush := range []bool{true, false} { @@ -2112,7 +2112,7 @@ func TestStoreScanResumeTSCache(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, manualClock := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) // Write three keys at time t0. @@ -2207,7 +2207,7 @@ func TestStoreScanIntents(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg) testCases := []struct { @@ -2327,7 +2327,7 @@ func TestStoreScanInconsistentResolvesIntents(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg) // Lay down 10 intents to scan over. @@ -2376,7 +2376,7 @@ func TestStoreScanInconsistentResolvesIntents(t *testing.T) { func TestStoreScanIntentsFromTwoTxns(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, manualClock := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) // Lay down two intents from two txns to scan over. @@ -2429,7 +2429,7 @@ func TestStoreScanMultipleIntents(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithConfig(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg) // Lay down ten intents from a single txn. @@ -2473,7 +2473,7 @@ func TestStoreScanMultipleIntents(t *testing.T) { func TestStoreBadRequests(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(t, testStoreOpts{createSystemRanges: true}, stopper) txn := newTransaction("test", roachpb.Key("a"), 1 /* priority */, store.cfg.Clock) @@ -2568,7 +2568,7 @@ func TestMaybeRemove(t *testing.T) { defer leaktest.AfterTest(t)() cfg := TestStoreConfig(nil) stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store := createTestStoreWithoutStart(t, stopper, testStoreOpts{createSystemRanges: true}, &cfg) // Add a queue to the scanner before starting the store and running the scanner. @@ -2603,7 +2603,7 @@ func TestStoreGCThreshold(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) store := tc.store @@ -2682,7 +2682,7 @@ func TestStoreRangePlaceholders(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - ctx := context.TODO() + ctx := context.Background() defer stopper.Stop(ctx) tc.Start(t, stopper) s := tc.store @@ -2786,7 +2786,7 @@ func TestStoreRemovePlaceholderOnRaftIgnored(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) s := tc.store ctx := context.Background() @@ -2996,7 +2996,7 @@ func TestReserveSnapshotThrottling(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc := testContext{} tc.Start(t, stopper) s := tc.store @@ -3084,7 +3084,7 @@ func TestReserveSnapshotFullnessLimit(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc := testContext{} tc.Start(t, stopper) s := tc.store @@ -3289,7 +3289,7 @@ func TestPreemptiveSnapshotsAreRemoved(t *testing.T) { func BenchmarkStoreGetReplica(b *testing.B) { stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) store, _ := createTestStore(b, testStoreOpts{createSystemRanges: true}, stopper) b.RunParallel(func(pb *testing.PB) { diff --git a/pkg/kv/kvserver/stores_test.go b/pkg/kv/kvserver/stores_test.go index 04de1056b19d..e8b6b289ad7c 100644 --- a/pkg/kv/kvserver/stores_test.go +++ b/pkg/kv/kvserver/stores_test.go @@ -220,7 +220,7 @@ func createStores(count int, t *testing.T) (*hlc.ManualClock, []*Store, *Stores, cfg.Transport = NewDummyRaftTransport(cfg.Settings) eng := storage.NewDefaultInMem() stopper.AddCloser(eng) - s := NewStore(context.TODO(), cfg, eng, &roachpb.NodeDescriptor{NodeID: 1}) + s := NewStore(context.Background(), cfg, eng, &roachpb.NodeDescriptor{NodeID: 1}) storeIDAlloc++ s.Ident = &roachpb.StoreIdent{StoreID: storeIDAlloc} stores = append(stores, s) @@ -233,7 +233,7 @@ func createStores(count int, t *testing.T) (*hlc.ManualClock, []*Store, *Stores, func TestStoresGossipStorage(t *testing.T) { defer leaktest.AfterTest(t)() manual, stores, ls, stopper := createStores(2, t) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) ls.AddStore(stores[0]) // Verify initial read is empty. @@ -282,7 +282,7 @@ func TestStoresGossipStorage(t *testing.T) { func TestStoresGossipStorageReadLatest(t *testing.T) { defer leaktest.AfterTest(t)() manual, stores, ls, stopper := createStores(2, t) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) ls.AddStore(stores[0]) // Add a fake address and write. diff --git a/pkg/kv/kvserver/ts_maintenance_queue_test.go b/pkg/kv/kvserver/ts_maintenance_queue_test.go index b0211ec17d6e..1468e25a3608 100644 --- a/pkg/kv/kvserver/ts_maintenance_queue_test.go +++ b/pkg/kv/kvserver/ts_maintenance_queue_test.go @@ -223,7 +223,7 @@ func TestTimeSeriesMaintenanceQueueServer(t *testing.T) { }, }, }) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) tsrv := s.(*server.TestServer) tsdb := tsrv.TsDB() @@ -251,7 +251,7 @@ func TestTimeSeriesMaintenanceQueueServer(t *testing.T) { Value: 300.0, }, } - if err := tsdb.StoreData(context.TODO(), ts.Resolution10s, []tspb.TimeSeriesData{ + if err := tsdb.StoreData(context.Background(), ts.Resolution10s, []tspb.TimeSeriesData{ { Name: seriesName, Source: sourceName, @@ -268,7 +268,7 @@ func TestTimeSeriesMaintenanceQueueServer(t *testing.T) { // Force a range split in between near past and far past. This guarantees // that the pruning operation will issue a DeleteRange which spans ranges. - if err := db.AdminSplit(context.TODO(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); err != nil { + if err := db.AdminSplit(context.Background(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); err != nil { t.Fatal(err) } @@ -281,8 +281,8 @@ func TestTimeSeriesMaintenanceQueueServer(t *testing.T) { math.MaxInt64, /* noteworthy */ cluster.MakeTestingClusterSettings(), ) - memMon.Start(context.TODO(), nil /* pool */, mon.MakeStandaloneBudget(math.MaxInt64)) - defer memMon.Stop(context.TODO()) + memMon.Start(context.Background(), nil /* pool */, mon.MakeStandaloneBudget(math.MaxInt64)) + defer memMon.Stop(context.Background()) memContext := ts.MakeQueryMemoryContext( &memMon, &memMon, @@ -292,13 +292,13 @@ func TestTimeSeriesMaintenanceQueueServer(t *testing.T) { InterpolationLimitNanos: 0, }, ) - defer memContext.Close(context.TODO()) + defer memContext.Close(context.Background()) // getDatapoints queries all datapoints in the series from the beginning // of time to a point in the near future. getDatapoints := func() ([]tspb.TimeSeriesDatapoint, error) { dps, _, err := tsdb.Query( - context.TODO(), + context.Background(), tspb.Query{Name: seriesName}, ts.Resolution10s, ts.QueryTimespan{ diff --git a/pkg/kv/kvserver/txn_wait_queue_test.go b/pkg/kv/kvserver/txn_wait_queue_test.go index 80f5c40895d9..10ff470e3dee 100644 --- a/pkg/kv/kvserver/txn_wait_queue_test.go +++ b/pkg/kv/kvserver/txn_wait_queue_test.go @@ -74,7 +74,7 @@ func TestTxnWaitQueueEnableDisable(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) txn, err := createTxnForPushQueue(context.Background(), &tc) @@ -160,7 +160,7 @@ func TestTxnWaitQueueEnableDisable(t *testing.T) { t.Fatalf("expected update to silently fail since queue is disabled") } - if resp, pErr := q.MaybeWaitForPush(context.TODO(), &req); resp != nil || pErr != nil { + if resp, pErr := q.MaybeWaitForPush(context.Background(), &req); resp != nil || pErr != nil { t.Errorf("expected nil resp and err as queue is disabled; got %+v, %s", resp, pErr) } if err := checkAllGaugesZero(tc); err != nil { @@ -172,7 +172,7 @@ func TestTxnWaitQueueCancel(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) txn, err := createTxnForPushQueue(context.Background(), &tc) @@ -235,7 +235,7 @@ func TestTxnWaitQueueUpdateTxn(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) txn, err := createTxnForPushQueue(context.Background(), &tc) @@ -431,7 +431,7 @@ func TestTxnWaitQueueUpdateNotPushedTxn(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) txn, err := createTxnForPushQueue(context.Background(), &tc) @@ -502,7 +502,7 @@ func TestTxnWaitQueuePusheeExpires(t *testing.T) { return nil } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.StartWithStoreConfig(t, stopper, tsc) pusher1 := newTransaction("pusher1", roachpb.Key("a"), 1, tc.Clock()) @@ -599,7 +599,7 @@ func TestTxnWaitQueuePusherUpdate(t *testing.T) { t.Run(fmt.Sprintf("recordEpoch=%s", c.name), func(t *testing.T) { tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) txn, err := createTxnForPushQueue(context.Background(), &tc) @@ -696,7 +696,7 @@ func TestTxnWaitQueueDependencyCycle(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) txnA, err := createTxnForPushQueue(context.Background(), &tc) @@ -789,7 +789,7 @@ func TestTxnWaitQueueDependencyCycleWithPriorityInversion(t *testing.T) { defer leaktest.AfterTest(t)() tc := testContext{} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tc.Start(t, stopper) // Create txnA with a lower priority so it won't think it could push diff --git a/pkg/kv/txn_test.go b/pkg/kv/txn_test.go index 284b66317c9c..189b609f6c1d 100644 --- a/pkg/kv/txn_test.go +++ b/pkg/kv/txn_test.go @@ -157,7 +157,7 @@ func TestTransactionConfig(t *testing.T) { db := NewDBWithContext( testutils.MakeAmbientCtx(), newTestTxnFactory(nil), clock, dbCtx) - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *Txn) error { if txn.db.ctx.UserPriority != db.ctx.UserPriority { t.Errorf("expected txn user priority %f; got %f", db.ctx.UserPriority, txn.db.ctx.UserPriority) @@ -181,7 +181,7 @@ func TestCommitTransactionOnce(t *testing.T) { count++ return ba.CreateReply(), nil }), clock) - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *Txn) error { b := txn.NewBatch() b.Put("z", "adding a write exposed a bug in #1882") return txn.CommitInBatch(ctx, b) @@ -209,7 +209,7 @@ func TestAbortMutatingTransaction(t *testing.T) { return ba.CreateReply(), nil }), clock) - if err := db.Txn(context.TODO(), func(ctx context.Context, txn *Txn) error { + if err := db.Txn(context.Background(), func(ctx context.Context, txn *Txn) error { if err := txn.Put(ctx, "a", "b"); err != nil { return err } @@ -279,7 +279,7 @@ func TestRunTransactionRetryOnErrors(t *testing.T) { } return ba.CreateReply(), nil }), clock) - err := db.Txn(context.TODO(), func(ctx context.Context, txn *Txn) error { + err := db.Txn(context.Background(), func(ctx context.Context, txn *Txn) error { return txn.Put(ctx, "a", "b") }) if test.retry { @@ -420,7 +420,7 @@ func TestWrongTxnRetry(t *testing.T) { return roachpb.NewTransactionRetryWithProtoRefreshError("test error", uuid.MakeV4(), roachpb.Transaction{}) } - if err := db.Txn(context.TODO(), txnClosure); !testutils.IsError(err, "test error") { + if err := db.Txn(context.Background(), txnClosure); !testutils.IsError(err, "test error") { t.Fatal(err) } if retries != 1 { @@ -436,7 +436,7 @@ func TestBatchMixRawRequest(t *testing.T) { b := &Batch{} b.AddRawRequest(&roachpb.EndTxnRequest{}) b.Put("x", "y") - if err := db.Run(context.TODO(), b); !testutils.IsError(err, "non-raw operations") { + if err := db.Run(context.Background(), b); !testutils.IsError(err, "non-raw operations") { t.Fatal(err) } } diff --git a/pkg/rpc/clock_offset_test.go b/pkg/rpc/clock_offset_test.go index 5e0f2872de8d..6079a0ecb328 100644 --- a/pkg/rpc/clock_offset_test.go +++ b/pkg/rpc/clock_offset_test.go @@ -43,7 +43,7 @@ func TestUpdateOffset(t *testing.T) { Uncertainty: 20, MeasuredAt: monitor.clock.PhysicalTime().Add(-(monitor.offsetTTL + 1)).UnixNano(), } - monitor.UpdateOffset(context.TODO(), key, offset1, latency) + monitor.UpdateOffset(context.Background(), key, offset1, latency) monitor.mu.Lock() if o, ok := monitor.mu.offsets[key]; !ok { t.Errorf("expected key %s to be set in %v, but it was not", key, monitor.mu.offsets) @@ -58,7 +58,7 @@ func TestUpdateOffset(t *testing.T) { Uncertainty: 20, MeasuredAt: monitor.clock.PhysicalTime().Add(-(monitor.offsetTTL + 1)).UnixNano(), } - monitor.UpdateOffset(context.TODO(), key, offset2, latency) + monitor.UpdateOffset(context.Background(), key, offset2, latency) monitor.mu.Lock() if o, ok := monitor.mu.offsets[key]; !ok { t.Errorf("expected key %s to be set in %v, but it was not", key, monitor.mu.offsets) @@ -73,7 +73,7 @@ func TestUpdateOffset(t *testing.T) { Uncertainty: 10, MeasuredAt: offset2.MeasuredAt + 1, } - monitor.UpdateOffset(context.TODO(), key, offset3, latency) + monitor.UpdateOffset(context.Background(), key, offset3, latency) monitor.mu.Lock() if o, ok := monitor.mu.offsets[key]; !ok { t.Errorf("expected key %s to be set in %v, but it was not", key, monitor.mu.offsets) @@ -83,7 +83,7 @@ func TestUpdateOffset(t *testing.T) { monitor.mu.Unlock() // Larger error and offset3 is not stale, so no update. - monitor.UpdateOffset(context.TODO(), key, offset2, latency) + monitor.UpdateOffset(context.Background(), key, offset2, latency) monitor.mu.Lock() if o, ok := monitor.mu.offsets[key]; !ok { t.Errorf("expected key %s to be set in %v, but it was not", key, monitor.mu.offsets) @@ -116,11 +116,11 @@ func TestVerifyClockOffset(t *testing.T) { } if tc.expectedError { - if err := monitor.VerifyClockOffset(context.TODO()); !testutils.IsError(err, errOffsetGreaterThanMaxOffset) { + if err := monitor.VerifyClockOffset(context.Background()); !testutils.IsError(err, errOffsetGreaterThanMaxOffset) { t.Errorf("%d: unexpected error %v", idx, err) } } else { - if err := monitor.VerifyClockOffset(context.TODO()); err != nil { + if err := monitor.VerifyClockOffset(context.Background()); err != nil { t.Errorf("%d: unexpected error %s", idx, err) } } @@ -144,7 +144,7 @@ func TestIsHealthyOffsetInterval(t *testing.T) { {RemoteOffset{Offset: 15, Uncertainty: 4}, false}, {RemoteOffset{Offset: math.MaxInt64, Uncertainty: 0}, false}, } { - if isHealthy := tc.offset.isHealthy(context.TODO(), maxOffset); tc.expectedHealthy { + if isHealthy := tc.offset.isHealthy(context.Background(), maxOffset); tc.expectedHealthy { if !isHealthy { t.Errorf("%d: expected remote offset %s for maximum offset %s to be healthy", i, tc.offset, maxOffset) } @@ -159,7 +159,7 @@ func TestIsHealthyOffsetInterval(t *testing.T) { func TestClockOffsetMetrics(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.NewManualClock(123).UnixNano, 20*time.Nanosecond) monitor := newRemoteClockMonitor(clock, time.Hour, 0) @@ -171,7 +171,7 @@ func TestClockOffsetMetrics(t *testing.T) { }, } - if err := monitor.VerifyClockOffset(context.TODO()); err != nil { + if err := monitor.VerifyClockOffset(context.Background()); err != nil { t.Fatal(err) } diff --git a/pkg/rpc/context_test.go b/pkg/rpc/context_test.go index abbc59e1d52c..27069cdb517f 100644 --- a/pkg/rpc/context_test.go +++ b/pkg/rpc/context_test.go @@ -113,7 +113,7 @@ func TestHeartbeatCB(t *testing.T) { testutils.RunTrueAndFalse(t, "compression", func(t *testing.T, compression bool) { stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all RPC peers (this ensures that the peers // don't talk to servers from unrelated tests by accident). @@ -123,7 +123,7 @@ func TestHeartbeatCB(t *testing.T) { serverCtx := newTestContext(clusterID, clock, stopper) serverCtx.rpcCompression = compression const serverNodeID = 1 - serverCtx.NodeID.Set(context.TODO(), serverNodeID) + serverCtx.NodeID.Set(context.Background(), serverNodeID) s := newTestServer(t, serverCtx) RegisterHeartbeatServer(s, &HeartbeatService{ clock: clock, @@ -182,7 +182,7 @@ func TestInternalServerAddress(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Can't be zero because that'd be an empty offset. clock := hlc.NewClock(timeutil.Unix(0, 1).UnixNano, time.Nanosecond) @@ -190,7 +190,7 @@ func TestInternalServerAddress(t *testing.T) { serverCtx := newTestContext(uuid.MakeV4(), clock, stopper) serverCtx.Config.Addr = "127.0.0.1:9999" serverCtx.Config.AdvertiseAddr = "127.0.0.1:8888" - serverCtx.NodeID.Set(context.TODO(), 1) + serverCtx.NodeID.Set(context.Background(), 1) internal := &internalServer{} serverCtx.SetLocalInternalServer(internal) @@ -207,7 +207,7 @@ func TestHeartbeatHealth(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Can't be zero because that'd be an empty offset. clock := hlc.NewClock(timeutil.Unix(0, 1).UnixNano, time.Nanosecond) @@ -220,7 +220,7 @@ func TestHeartbeatHealth(t *testing.T) { const clientNodeID = 2 serverCtx := newTestContext(clusterID, clock, stop.NewStopper()) - serverCtx.NodeID.Set(context.TODO(), serverNodeID) + serverCtx.NodeID.Set(context.Background(), serverNodeID) s := newTestServer(t, serverCtx) heartbeat := &ManualHeartbeatService{ @@ -269,7 +269,7 @@ func TestHeartbeatHealth(t *testing.T) { } clientCtx := newTestContext(clusterID, clock, stopper) - clientCtx.NodeID.Set(context.TODO(), clientNodeID) + clientCtx.NodeID.Set(context.Background(), clientNodeID) clientCtx.Addr = lisNotLocalServer.Addr().String() clientCtx.AdvertiseAddr = lisLocalServer.Addr().String() // Make the interval shorter to speed up the test. @@ -466,7 +466,7 @@ func TestHeartbeatHealthTransport(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) ctx := context.Background() @@ -479,7 +479,7 @@ func TestHeartbeatHealthTransport(t *testing.T) { serverCtx := newTestContext(clusterID, clock, stopper) const serverNodeID = 1 - serverCtx.NodeID.Set(context.TODO(), serverNodeID) + serverCtx.NodeID.Set(context.Background(), serverNodeID) // newTestServer with a custom listener. tlsConfig, err := serverCtx.GetServerTLSConfig() if err != nil { @@ -655,7 +655,7 @@ func TestOffsetMeasurement(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all RPC peers (this ensures that the peers // don't talk to servers from unrelated tests by accident). @@ -665,7 +665,7 @@ func TestOffsetMeasurement(t *testing.T) { serverClock := hlc.NewClock(serverTime.UnixNano, time.Nanosecond) serverCtx := newTestContext(clusterID, serverClock, stopper) const serverNodeID = 1 - serverCtx.NodeID.Set(context.TODO(), serverNodeID) + serverCtx.NodeID.Set(context.Background(), serverNodeID) s := newTestServer(t, serverCtx) RegisterHeartbeatServer(s, &HeartbeatService{ clock: serverClock, @@ -725,7 +725,7 @@ func TestFailedOffsetMeasurement(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all RPC peers (this ensures that the peers // don't talk to servers from unrelated tests by accident). @@ -736,7 +736,7 @@ func TestFailedOffsetMeasurement(t *testing.T) { serverCtx := newTestContext(clusterID, clock, stopper) const serverNodeID = 1 - serverCtx.NodeID.Set(context.TODO(), serverNodeID) + serverCtx.NodeID.Set(context.Background(), serverNodeID) s := newTestServer(t, serverCtx) heartbeat := &ManualHeartbeatService{ clock: clock, @@ -815,7 +815,7 @@ func TestRemoteOffsetUnhealthy(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) const maxOffset = 100 * time.Millisecond @@ -844,7 +844,7 @@ func TestRemoteOffsetUnhealthy(t *testing.T) { nodeCtxs[i].errChan = make(chan error, 1) nodeCtxs[i].ctx = newTestContext(clusterID, clock, stopper) nodeCtxs[i].ctx.heartbeatInterval = maxOffset - nodeCtxs[i].ctx.NodeID.Set(context.TODO(), roachpb.NodeID(i+1)) + nodeCtxs[i].ctx.NodeID.Set(context.Background(), roachpb.NodeID(i+1)) s := newTestServer(t, nodeCtxs[i].ctx) RegisterHeartbeatServer(s, &HeartbeatService{ @@ -1012,7 +1012,7 @@ func grpcRunKeepaliveTestCase(testCtx context.Context, c grpcKeepaliveTestCase) } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) ctx, cancel := stopper.WithCancelOnQuiesce(testCtx) defer cancel() @@ -1025,7 +1025,7 @@ func grpcRunKeepaliveTestCase(testCtx context.Context, c grpcKeepaliveTestCase) clock := hlc.NewClock(timeutil.Unix(0, 20).UnixNano, time.Nanosecond) serverCtx := newTestContext(clusterID, clock, stopper) const serverNodeID = 1 - serverCtx.NodeID.Set(context.TODO(), serverNodeID) + serverCtx.NodeID.Set(context.Background(), serverNodeID) tlsConfig, err := serverCtx.GetServerTLSConfig() if err != nil { return err @@ -1231,12 +1231,12 @@ func TestClusterIDMismatch(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(timeutil.Unix(0, 20).UnixNano, time.Nanosecond) serverCtx := newTestContext(uuid.MakeV4(), clock, stopper) const serverNodeID = 1 - serverCtx.NodeID.Set(context.TODO(), serverNodeID) + serverCtx.NodeID.Set(context.Background(), serverNodeID) s := newTestServer(t, serverCtx) RegisterHeartbeatServer(s, &HeartbeatService{ clock: clock, @@ -1304,7 +1304,7 @@ func TestClusterNameMismatch(t *testing.T) { for i, c := range testData { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) serverCtx := newTestContext(uuid.MakeV4(), clock, stopper) serverCtx.clusterName = c.serverName @@ -1351,7 +1351,7 @@ func TestNodeIDMismatch(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all RPC peers (this ensures that the peers // don't talk to servers from unrelated tests by accident). @@ -1359,7 +1359,7 @@ func TestNodeIDMismatch(t *testing.T) { clock := hlc.NewClock(timeutil.Unix(0, 20).UnixNano, time.Nanosecond) serverCtx := newTestContext(clusterID, clock, stopper) - serverCtx.NodeID.Set(context.TODO(), 1) + serverCtx.NodeID.Set(context.Background(), 1) s := newTestServer(t, serverCtx) RegisterHeartbeatServer(s, &HeartbeatService{ clock: clock, @@ -1424,12 +1424,12 @@ func TestVersionCheckBidirectional(t *testing.T) { for _, td := range testData { t.Run(td.name, func(t *testing.T) { stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(timeutil.Unix(0, 20).UnixNano, time.Nanosecond) serverCtx := newTestContext(clusterID, clock, stopper) const serverNodeID = 1 - serverCtx.NodeID.Set(context.TODO(), serverNodeID) + serverCtx.NodeID.Set(context.Background(), serverNodeID) if err := setVersion(serverCtx, td.serverVersion); err != nil { t.Fatal(err) } @@ -1473,12 +1473,12 @@ func TestGRPCDialClass(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(timeutil.Unix(0, 20).UnixNano, time.Nanosecond) serverCtx := newTestContext(uuid.MakeV4(), clock, stopper) const serverNodeID = 1 - serverCtx.NodeID.Set(context.TODO(), serverNodeID) + serverCtx.NodeID.Set(context.Background(), serverNodeID) s := newTestServer(t, serverCtx) RegisterHeartbeatServer(s, &HeartbeatService{ clock: clock, @@ -1497,9 +1497,9 @@ func TestGRPCDialClass(t *testing.T) { sys1 := clientCtx.GRPCDialNode(remoteAddr, 1, SystemClass) require.False(t, sys1 == def1, "expected connections dialed with different classes to the same target to differ") - defConn1, err := def1.Connect(context.TODO()) + defConn1, err := def1.Connect(context.Background()) require.Nil(t, err, "expected successful connection") - sysConn1, err := sys1.Connect(context.TODO()) + sysConn1, err := sys1.Connect(context.Background()) require.Nil(t, err, "expected successful connection") require.False(t, sysConn1 == defConn1, "expected connections dialed with "+ "different classes to the sametarget to have separate underlying gRPC connections") @@ -1520,13 +1520,13 @@ func TestTestingKnobs(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clusterID := uuid.MakeV4() clock := hlc.NewClock(timeutil.Unix(0, 20).UnixNano, time.Nanosecond) serverCtx := newTestContext(clusterID, clock, stopper) const serverNodeID = 1 - serverCtx.NodeID.Set(context.TODO(), serverNodeID) + serverCtx.NodeID.Set(context.Background(), serverNodeID) // Register an UnknownServiceHandler that expects a BatchRequest and sends // a BatchResponse. It will be used both as a unary and stream handler below. s := newTestServer(t, serverCtx, grpc.UnknownServiceHandler( @@ -1610,9 +1610,9 @@ func TestTestingKnobs(t *testing.T) { ln, err := netutil.ListenAndServeGRPC(serverCtx.Stopper, s, util.TestAddr) require.Nil(t, err) remoteAddr := ln.Addr().String() - sysConn, err := clientCtx.GRPCDialNode(remoteAddr, 1, SystemClass).Connect(context.TODO()) + sysConn, err := clientCtx.GRPCDialNode(remoteAddr, 1, SystemClass).Connect(context.Background()) require.Nil(t, err) - defConn, err := clientCtx.GRPCDialNode(remoteAddr, 1, DefaultClass).Connect(context.TODO()) + defConn, err := clientCtx.GRPCDialNode(remoteAddr, 1, DefaultClass).Connect(context.Background()) require.Nil(t, err) const unaryMethod = "/cockroach.rpc.Testing/Foo" const streamMethod = "/cockroach.rpc.Testing/Bar" @@ -1620,7 +1620,7 @@ func TestTestingKnobs(t *testing.T) { for i := 0; i < numSysUnary; i++ { ba := roachpb.BatchRequest{} br := roachpb.BatchResponse{} - err := sysConn.Invoke(context.TODO(), unaryMethod, &ba, &br) + err := sysConn.Invoke(context.Background(), unaryMethod, &ba, &br) require.Nil(t, err) } const numDefStream = 4 @@ -1629,7 +1629,7 @@ func TestTestingKnobs(t *testing.T) { StreamName: "bar", ClientStreams: true, } - cs, err := defConn.NewStream(context.TODO(), &desc, streamMethod) + cs, err := defConn.NewStream(context.Background(), &desc, streamMethod) require.Nil(t, err) require.Nil(t, cs.SendMsg(&roachpb.BatchRequest{})) var br roachpb.BatchResponse @@ -1718,12 +1718,12 @@ func BenchmarkGRPCDial(b *testing.B) { b.Skip("TODO: fix benchmark") } stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, 250*time.Millisecond) ctx := newTestContext(uuid.MakeV4(), clock, stopper) const serverNodeID = 1 - ctx.NodeID.Set(context.TODO(), serverNodeID) + ctx.NodeID.Set(context.Background(), serverNodeID) s := newTestServer(b, ctx) ln, err := netutil.ListenAndServeGRPC(ctx.Stopper, s, util.TestAddr) diff --git a/pkg/rpc/nodedialer/nodedialer_test.go b/pkg/rpc/nodedialer/nodedialer_test.go index 58e0bb517e44..4247ccc2ad08 100644 --- a/pkg/rpc/nodedialer/nodedialer_test.go +++ b/pkg/rpc/nodedialer/nodedialer_test.go @@ -42,7 +42,7 @@ const staticNodeID = 1 func TestNodedialerPositive(t *testing.T) { defer leaktest.AfterTest(t)() stopper, _, _, _, nd := setUpNodedialerTest(t, staticNodeID) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Ensure that dialing works. breaker := nd.GetCircuitBreaker(1, rpc.DefaultClass) assert.True(t, breaker.Ready()) @@ -108,7 +108,7 @@ func TestDialNoBreaker(t *testing.T) { func TestConcurrentCancellationAndTimeout(t *testing.T) { defer leaktest.AfterTest(t)() stopper, _, _, _, nd := setUpNodedialerTest(t, staticNodeID) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) ctx := context.Background() breaker := nd.GetCircuitBreaker(staticNodeID, rpc.DefaultClass) // Test that when a context is canceled during dialing we always return that @@ -142,7 +142,7 @@ func TestConcurrentCancellationAndTimeout(t *testing.T) { func TestResolverErrorsTrip(t *testing.T) { defer leaktest.AfterTest(t)() stopper, rpcCtx, _, _, _ := setUpNodedialerTest(t, staticNodeID) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) boom := fmt.Errorf("boom") nd := New(rpcCtx, func(id roachpb.NodeID) (net.Addr, error) { return nil, boom @@ -156,7 +156,7 @@ func TestResolverErrorsTrip(t *testing.T) { func TestDisconnectsTrip(t *testing.T) { defer leaktest.AfterTest(t)() stopper, _, ln, hb, nd := setUpNodedialerTest(t, staticNodeID) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) ctx := context.Background() breaker := nd.GetCircuitBreaker(staticNodeID, rpc.DefaultClass) @@ -237,7 +237,7 @@ func setUpNodedialerTest( clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) // Create an rpc Context and then rpcCtx = newTestContext(clock, stopper) - rpcCtx.NodeID.Set(context.TODO(), nodeID) + rpcCtx.NodeID.Set(context.Background(), nodeID) _, ln, hb = newTestServer(t, clock, stopper, true /* useHeartbeat */) nd = New(rpcCtx, newSingleNodeResolver(nodeID, ln.Addr())) testutils.SucceedsSoon(t, func() error { @@ -295,7 +295,7 @@ func newTestContext(clock *hlc.Clock, stopper *stop.Stopper) *rpc.Context { // Ensure that tests using this test context and restart/shut down // their servers do not inadvertently start talking to servers from // unrelated concurrent tests. - rctx.ClusterID.Set(context.TODO(), uuid.MakeV4()) + rctx.ClusterID.Set(context.Background(), uuid.MakeV4()) return rctx } diff --git a/pkg/rpc/stats_handler_test.go b/pkg/rpc/stats_handler_test.go index 06551dfe6b99..aebb7e678b53 100644 --- a/pkg/rpc/stats_handler_test.go +++ b/pkg/rpc/stats_handler_test.go @@ -96,7 +96,7 @@ func TestStatsHandlerWithHeartbeats(t *testing.T) { // Can't be zero because that'd be an empty offset. clock := hlc.NewClock(timeutil.Unix(0, 1).UnixNano, time.Nanosecond) stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Shared cluster ID by all RPC peers (this ensures that the peers // don't talk to servers from unrelated tests by accident). @@ -104,7 +104,7 @@ func TestStatsHandlerWithHeartbeats(t *testing.T) { serverCtx := newTestContext(clusterID, clock, stopper) const serverNodeID = 1 - serverCtx.NodeID.Set(context.TODO(), serverNodeID) + serverCtx.NodeID.Set(context.Background(), serverNodeID) s := newTestServer(t, serverCtx) heartbeat := &ManualHeartbeatService{ @@ -171,7 +171,7 @@ func TestStatsHandlerWithHeartbeats(t *testing.T) { if s, c := serverVal.(*Stats).Outgoing(), clientVal.(*Stats).Incoming(); s == 0 || c == 0 || s > c { return fmt.Errorf("expected server.outgoing < client.incoming; got %d, %d", s, c) } - log.Infof(context.TODO(), "server incoming = %v, server outgoing = %v, client incoming = %v, client outgoing = %v", + log.Infof(context.Background(), "server incoming = %v, server outgoing = %v, client incoming = %v, client outgoing = %v", serverVal.(*Stats).Incoming(), serverVal.(*Stats).Outgoing(), clientVal.(*Stats).Incoming(), clientVal.(*Stats).Outgoing()) return nil }) diff --git a/pkg/security/certs_rotation_test.go b/pkg/security/certs_rotation_test.go index 631f35481eb8..1aa1a24b62d1 100644 --- a/pkg/security/certs_rotation_test.go +++ b/pkg/security/certs_rotation_test.go @@ -61,7 +61,7 @@ func TestRotateCerts(t *testing.T) { DisableWebSessionAuthentication: true, } s, _, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Client test function. clientTest := func(httpClient http.Client) error { diff --git a/pkg/security/certs_test.go b/pkg/security/certs_test.go index 7af1755d9057..5c50c609d04a 100644 --- a/pkg/security/certs_test.go +++ b/pkg/security/certs_test.go @@ -283,7 +283,7 @@ func TestUseCerts(t *testing.T) { DisableWebSessionAuthentication: true, } s, _, db := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Insecure mode. clientContext := testutils.NewNodeTestBaseContext() @@ -369,7 +369,7 @@ func TestUseSplitCACerts(t *testing.T) { DisableWebSessionAuthentication: true, } s, _, db := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Insecure mode. clientContext := testutils.NewNodeTestBaseContext() @@ -489,7 +489,7 @@ func TestUseWrongSplitCACerts(t *testing.T) { DisableWebSessionAuthentication: true, } s, _, db := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Insecure mode. clientContext := testutils.NewNodeTestBaseContext() diff --git a/pkg/server/admin_cluster_test.go b/pkg/server/admin_cluster_test.go index f8360146095f..037bf7653154 100644 --- a/pkg/server/admin_cluster_test.go +++ b/pkg/server/admin_cluster_test.go @@ -39,7 +39,7 @@ func TestAdminAPITableStats(t *testing.T) { ScanMaxIdleTime: time.Millisecond, }, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) server0 := tc.Server(0) // Create clients (SQL, HTTP) connected to server 0. @@ -142,7 +142,7 @@ func TestAdminAPITableStats(t *testing.T) { func TestLivenessAPI(t *testing.T) { defer leaktest.AfterTest(t)() tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) startTime := tc.Server(0).Clock().PhysicalNow() diff --git a/pkg/server/admin_test.go b/pkg/server/admin_test.go index 8aa46a0ac346..e1e6720a45e1 100644 --- a/pkg/server/admin_test.go +++ b/pkg/server/admin_test.go @@ -124,7 +124,7 @@ func debugURL(s serverutils.TestServerInterface) string { func TestAdminDebugExpVar(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) jI, err := getJSON(s, debugURL(s)+"vars") if err != nil { @@ -144,7 +144,7 @@ func TestAdminDebugExpVar(t *testing.T) { func TestAdminDebugMetrics(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) jI, err := getJSON(s, debugURL(s)+"metrics") if err != nil { @@ -164,7 +164,7 @@ func TestAdminDebugMetrics(t *testing.T) { func TestAdminDebugPprof(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) body, err := getText(s, debugURL(s)+"pprof/block?debug=1") if err != nil { @@ -180,7 +180,7 @@ func TestAdminDebugPprof(t *testing.T) { func TestAdminDebugTrace(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) tc := []struct { segment, search string @@ -205,7 +205,7 @@ func TestAdminDebugTrace(t *testing.T) { func TestAdminDebugRedirect(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) expURL := debugURL(s) origURL := expURL + "incorrect" @@ -246,7 +246,7 @@ func TestAdminDebugRedirect(t *testing.T) { func TestAdminAPIDatabases(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ts := s.(*TestServer) ac := log.AmbientContext{Tracer: s.ClusterSettings().Tracer} @@ -367,7 +367,7 @@ func TestAdminAPIDatabases(t *testing.T) { func TestAdminAPIDatabaseDoesNotExist(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) const errPattern = "database.+does not exist" if err := getAdminJSONProto(s, "databases/i_do_not_exist", nil); !testutils.IsError(err, errPattern) { @@ -378,7 +378,7 @@ func TestAdminAPIDatabaseDoesNotExist(t *testing.T) { func TestAdminAPIDatabaseSQLInjection(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) const fakedb = "system;DROP DATABASE system;" const path = "databases/" + fakedb @@ -524,7 +524,7 @@ select range_id, database_name, table_name, start_pretty, end_pretty from crdb_i func TestAdminAPITableDoesNotExist(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) const fakename = "i_do_not_exist" const badDBPath = "databases/" + fakename + "/tables/foo" @@ -543,7 +543,7 @@ func TestAdminAPITableDoesNotExist(t *testing.T) { func TestAdminAPITableSQLInjection(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) const fakeTable = "users;DROP DATABASE system;" const path = "databases/system/tables/" + fakeTable @@ -565,7 +565,7 @@ func TestAdminAPITableDetails(t *testing.T) { } { t.Run(tc.name, func(t *testing.T) { s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ts := s.(*TestServer) escDBName := tree.NameStringP(&tc.dbName) @@ -700,7 +700,7 @@ func TestAdminAPITableDetails(t *testing.T) { func TestAdminAPIZoneDetails(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ts := s.(*TestServer) // Create database and table. @@ -800,7 +800,7 @@ func TestAdminAPIZoneDetails(t *testing.T) { func TestAdminAPIUsers(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Create sample users. query := ` @@ -836,7 +836,7 @@ VALUES ('adminUser', 'abc'), ('bob', 'xyz')` func TestAdminAPIEvents(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) setupQueries := []string{ "CREATE DATABASE api_test", @@ -967,7 +967,7 @@ func TestAdminAPISettings(t *testing.T) { defer sc.Close(t) s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Any bool that defaults to true will work here. const settingKey = "sql.metrics.statement_details.enabled" @@ -1069,7 +1069,7 @@ func TestAdminAPISettings(t *testing.T) { func TestAdminAPIUIData(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) testutils.RunTrueAndFalse(t, "isAdmin", func(t *testing.T, isAdmin bool) { start := timeutil.Now() @@ -1175,7 +1175,7 @@ func TestAdminAPIUIData(t *testing.T) { func TestAdminAPIUISeparateData(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Make a setting for an admin user. if err := postAdminJSONProtoWithAdminOption(s, "uidata", @@ -1213,7 +1213,7 @@ func TestAdminAPIUISeparateData(t *testing.T) { func TestClusterAPI(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) testutils.RunTrueAndFalse(t, "reportingOn", func(t *testing.T, reportingOn bool) { testutils.RunTrueAndFalse(t, "enterpriseOn", func(t *testing.T, enterpriseOn bool) { @@ -1322,7 +1322,7 @@ func TestAdminAPIJobs(t *testing.T) { defer leaktest.AfterTest(t)() s, conn, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(conn) // Get list of existing jobs (migrations). Assumed to all have succeeded. @@ -1417,7 +1417,7 @@ func TestAdminAPILocations(t *testing.T) { defer leaktest.AfterTest(t)() s, conn, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(conn) testLocations := []struct { @@ -1457,7 +1457,7 @@ func TestAdminAPIQueryPlan(t *testing.T) { defer leaktest.AfterTest(t)() s, conn, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(conn) sqlDB.Exec(t, `CREATE DATABASE api_test`) diff --git a/pkg/server/authentication_test.go b/pkg/server/authentication_test.go index 246906daf947..ac540ccfef43 100644 --- a/pkg/server/authentication_test.go +++ b/pkg/server/authentication_test.go @@ -83,7 +83,7 @@ func TestSSLEnforcement(t *testing.T) { // clients being instantiated. DisableWebSessionAuthentication: true, }) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // HTTPS with client certs for security.RootUser. rootCertsContext := testutils.NewTestBaseContext(security.RootUser) @@ -254,7 +254,7 @@ func TestVerifyPassword(t *testing.T) { {"cthon98", "12345", true, ""}, } { t.Run("", func(t *testing.T) { - valid, expired, err := ts.authentication.verifyPassword(context.TODO(), tc.username, tc.password) + valid, expired, err := ts.authentication.verifyPassword(context.Background(), tc.username, tc.password) if err != nil { t.Errorf( "credentials %s/%s failed with error %s, wanted no error", @@ -279,7 +279,7 @@ func TestVerifyPassword(t *testing.T) { func TestCreateSession(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ts := s.(*TestServer) username := "testUser" @@ -287,7 +287,7 @@ func TestCreateSession(t *testing.T) { // Create an authentication, noting the time before and after creation. This // lets us ensure that the timestamps created are accurate. timeBoundBefore := ts.clock.PhysicalTime() - id, origSecret, err := ts.authentication.newAuthSession(context.TODO(), username) + id, origSecret, err := ts.authentication.newAuthSession(context.Background(), username) if err != nil { t.Fatalf("error creating auth session: %s", err) } @@ -370,11 +370,11 @@ WHERE id = $1` func TestVerifySession(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ts := s.(*TestServer) sessionUsername := "testUser" - id, origSecret, err := ts.authentication.newAuthSession(context.TODO(), sessionUsername) + id, origSecret, err := ts.authentication.newAuthSession(context.Background(), sessionUsername) if err != nil { t.Fatal(err) } @@ -429,7 +429,7 @@ func TestVerifySession(t *testing.T) { }, } { t.Run(tc.testname, func(t *testing.T) { - valid, username, err := ts.authentication.verifySession(context.TODO(), &tc.cookie) + valid, username, err := ts.authentication.verifySession(context.Background(), &tc.cookie) if err != nil { t.Fatalf("test got error %s, wanted no error", err) } @@ -446,7 +446,7 @@ func TestVerifySession(t *testing.T) { func TestAuthenticationAPIUserLogin(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ts := s.(*TestServer) const ( @@ -536,7 +536,7 @@ func TestAuthenticationAPIUserLogin(t *testing.T) { func TestLogout(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ts := s.(*TestServer) // Log in. @@ -617,7 +617,7 @@ func TestLogout(t *testing.T) { func TestAuthenticationMux(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) tsrv := s.(*TestServer) // Both the normal and authenticated client will be used for each test. diff --git a/pkg/server/config_test.go b/pkg/server/config_test.go index 27bec680863d..3d22244900a2 100644 --- a/pkg/server/config_test.go +++ b/pkg/server/config_test.go @@ -31,15 +31,15 @@ import ( func TestParseInitNodeAttributes(t *testing.T) { defer leaktest.AfterTest(t)() - cfg := MakeConfig(context.TODO(), cluster.MakeTestingClusterSettings()) + cfg := MakeConfig(context.Background(), cluster.MakeTestingClusterSettings()) cfg.Attrs = "attr1=val1::attr2=val2" cfg.Stores = base.StoreSpecList{Specs: []base.StoreSpec{{InMemory: true, Size: base.SizeSpec{InBytes: base.MinimumStoreSize * 100}}}} - engines, err := cfg.CreateEngines(context.TODO()) + engines, err := cfg.CreateEngines(context.Background()) if err != nil { t.Fatalf("Failed to initialize stores: %s", err) } defer engines.Close() - if err := cfg.InitNode(context.TODO()); err != nil { + if err := cfg.InitNode(context.Background()); err != nil { t.Fatalf("Failed to initialize node: %s", err) } @@ -52,15 +52,15 @@ func TestParseInitNodeAttributes(t *testing.T) { // correctly. func TestParseJoinUsingAddrs(t *testing.T) { defer leaktest.AfterTest(t)() - cfg := MakeConfig(context.TODO(), cluster.MakeTestingClusterSettings()) + cfg := MakeConfig(context.Background(), cluster.MakeTestingClusterSettings()) cfg.JoinList = []string{"localhost:12345", "localhost:23456", "localhost:34567", "localhost"} cfg.Stores = base.StoreSpecList{Specs: []base.StoreSpec{{InMemory: true, Size: base.SizeSpec{InBytes: base.MinimumStoreSize * 100}}}} - engines, err := cfg.CreateEngines(context.TODO()) + engines, err := cfg.CreateEngines(context.Background()) if err != nil { t.Fatalf("Failed to initialize stores: %s", err) } defer engines.Close() - if err := cfg.InitNode(context.TODO()); err != nil { + if err := cfg.InitNode(context.Background()); err != nil { t.Fatalf("Failed to initialize node: %s", err) } r1, err := resolver.NewResolver("localhost:12345") @@ -113,8 +113,8 @@ func TestReadEnvironmentVariables(t *testing.T) { st := cluster.MakeTestingClusterSettings() // Makes sure no values are set when no environment variables are set. - cfg := MakeConfig(context.TODO(), st) - cfgExpected := MakeConfig(context.TODO(), st) + cfg := MakeConfig(context.Background(), st) + cfgExpected := MakeConfig(context.Background(), st) resetEnvVar() cfg.readEnvironmentVariables() @@ -184,7 +184,7 @@ func TestFilterGossipBootstrapResolvers(t *testing.T) { resolvers = append(resolvers, resolver) } } - cfg := MakeConfig(context.TODO(), cluster.MakeTestingClusterSettings()) + cfg := MakeConfig(context.Background(), cluster.MakeTestingClusterSettings()) cfg.GossipBootstrapResolvers = resolvers listenAddr := util.MakeUnresolvedAddr("tcp", resolverSpecs[0]) @@ -200,7 +200,7 @@ func TestFilterGossipBootstrapResolvers(t *testing.T) { func TestParseBootstrapResolvers(t *testing.T) { defer leaktest.AfterTest(t)() - cfg := MakeConfig(context.TODO(), cluster.MakeTestingClusterSettings()) + cfg := MakeConfig(context.Background(), cluster.MakeTestingClusterSettings()) const expectedName = "hello" t.Run("nosrv", func(t *testing.T) { @@ -209,7 +209,7 @@ func TestParseBootstrapResolvers(t *testing.T) { cfg.JoinPreferSRVRecords = false cfg.JoinList = append(base.JoinListType(nil), expectedName) - resolvers, err := cfg.parseGossipBootstrapResolvers(context.TODO()) + resolvers, err := cfg.parseGossipBootstrapResolvers(context.Background()) if err != nil { t.Fatal(err) } @@ -236,7 +236,7 @@ func TestParseBootstrapResolvers(t *testing.T) { return "cluster", []*net.SRV{{Target: expectedName, Port: 111}}, nil })() - resolvers, err := cfg.parseGossipBootstrapResolvers(context.TODO()) + resolvers, err := cfg.parseGossipBootstrapResolvers(context.Background()) if err != nil { t.Fatal(err) } diff --git a/pkg/server/drain_test.go b/pkg/server/drain_test.go index c512f2702506..4aad4aecb49b 100644 --- a/pkg/server/drain_test.go +++ b/pkg/server/drain_test.go @@ -87,7 +87,7 @@ func doTestDrain(tt *testing.T, newInterface bool) { // Now expect the server to be shut down. testutils.SucceedsSoon(t, func() error { - _, err := t.c.Drain(context.TODO(), &serverpb.DrainRequest{Shutdown: false}) + _, err := t.c.Drain(context.Background(), &serverpb.DrainRequest{Shutdown: false}) if grpcutil.IsClosedConnection(err) { return nil } @@ -118,7 +118,7 @@ func newTestDrainContext(t *testing.T, newInterface bool) *testDrainContext { // We'll have the RPC talk to the first node. var err error - tc.c, tc.connCloser, err = getAdminClientForServer(context.TODO(), + tc.c, tc.connCloser, err = getAdminClientForServer(context.Background(), tc.tc, 0 /* serverIdx */) if err != nil { tc.Close() @@ -132,7 +132,7 @@ func (t *testDrainContext) Close() { if t.connCloser != nil { t.connCloser() } - t.tc.Stopper().Stop(context.TODO()) + t.tc.Stopper().Stop(context.Background()) } func (t *testDrainContext) sendProbe() *serverpb.DrainResponse { @@ -155,7 +155,7 @@ func (t *testDrainContext) drainRequest(drain, shutdown bool) *serverpb.DrainRes } } - drainStream, err := t.c.Drain(context.TODO(), req) + drainStream, err := t.c.Drain(context.Background(), req) if err != nil { t.Fatal(err) } @@ -168,7 +168,7 @@ func (t *testDrainContext) drainRequest(drain, shutdown bool) *serverpb.DrainRes func (t *testDrainContext) sendShutdown() *serverpb.DrainResponse { req := &serverpb.DrainRequest{Shutdown: true} - drainStream, err := t.c.Drain(context.TODO(), req) + drainStream, err := t.c.Drain(context.Background(), req) if err != nil { t.Fatal(err) } diff --git a/pkg/server/goroutinedumper/goroutinedumper_test.go b/pkg/server/goroutinedumper/goroutinedumper_test.go index 72946ca2dc1f..2ef37ecca4e9 100644 --- a/pkg/server/goroutinedumper/goroutinedumper_test.go +++ b/pkg/server/goroutinedumper/goroutinedumper_test.go @@ -157,7 +157,7 @@ func TestHeuristic(t *testing.T) { dir: dumpDir, } - ctx := context.TODO() + ctx := context.Background() for _, v := range c.vals { currentTime = baseTime.Add(v.secs * time.Second) numGoroutinesThreshold.Override(&st.SV, v.threshold) @@ -283,7 +283,7 @@ func TestGC(t *testing.T) { err = os.Truncate(path, f.size) assert.NoError(t, err, "unexpected error when truncating file %s", path) } - ctx := context.TODO() + ctx := context.Background() gc(ctx, tempDir, c.sizeLimit) files, err := ioutil.ReadDir(tempDir) assert.NoError(t, err, "unexpected error when listing files in %s", tempDir) diff --git a/pkg/server/graphite_test.go b/pkg/server/graphite_test.go index ed179e8d3852..0cc4701416a4 100644 --- a/pkg/server/graphite_test.go +++ b/pkg/server/graphite_test.go @@ -30,7 +30,7 @@ import ( func TestGraphite(t *testing.T) { defer leaktest.AfterTest(t)() s, rawDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ctx := context.Background() const setQ = `SET CLUSTER SETTING "%s" = "%s"` diff --git a/pkg/server/intent_test.go b/pkg/server/intent_test.go index dbf14a869569..8583b35662f3 100644 --- a/pkg/server/intent_test.go +++ b/pkg/server/intent_test.go @@ -114,13 +114,13 @@ func TestIntentResolution(t *testing.T) { // inefficient. s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{Store: &storeKnobs}}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Split the Range. This should not have any asynchronous intents. - if err := kvDB.AdminSplit(context.TODO(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); err != nil { + if err := kvDB.AdminSplit(context.Background(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); err != nil { t.Fatal(err) } - if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := kvDB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { b := txn.NewBatch() if tc.keys[0] >= string(splitKey) { t.Fatalf("first key %s must be < split key %s", tc.keys[0], splitKey) @@ -155,7 +155,7 @@ func TestIntentResolution(t *testing.T) { // Use Raft to make it likely that any straddling intent // resolutions have come in. Don't touch existing data; that could // generate unexpected intent resolutions. - if _, err := kvDB.Scan(context.TODO(), "z\x00", "z\x00\x00", 0); err != nil { + if _, err := kvDB.Scan(context.Background(), "z\x00", "z\x00\x00", 0); err != nil { t.Fatal(err) } }() diff --git a/pkg/server/node_test.go b/pkg/server/node_test.go index 9e614f5c16f4..b8dd98c8e250 100644 --- a/pkg/server/node_test.go +++ b/pkg/server/node_test.go @@ -274,7 +274,7 @@ func compareNodeStatus( // ======================================== nodeStatusKey := keys.NodeStatusKey(ts.node.Descriptor.NodeID) nodeStatus := &statuspb.NodeStatus{} - if err := ts.db.GetProto(context.TODO(), nodeStatusKey, nodeStatus); err != nil { + if err := ts.db.GetProto(context.Background(), nodeStatusKey, nodeStatus); err != nil { t.Fatalf("%d: failure getting node status: %s", testNumber, err) } @@ -388,9 +388,9 @@ func TestNodeStatusWritten(t *testing.T) { srv, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{ DisableEventLog: true, }) - defer srv.Stopper().Stop(context.TODO()) + defer srv.Stopper().Stop(context.Background()) ts := srv.(*TestServer) - ctx := context.TODO() + ctx := context.Background() // Retrieve the first store from the Node. s, err := ts.node.stores.GetStore(roachpb.StoreID(1)) @@ -404,7 +404,7 @@ func TestNodeStatusWritten(t *testing.T) { leftKey := "a" // Scan over all keys to "wake up" all replicas (force a lease holder election). - if _, err := kvDB.Scan(context.TODO(), keys.MetaMax, keys.MaxKey, 0); err != nil { + if _, err := kvDB.Scan(context.Background(), keys.MetaMax, keys.MaxKey, 0); err != nil { t.Fatal(err) } @@ -526,7 +526,7 @@ func TestNodeStatusWritten(t *testing.T) { // ======================================== // Split the range. - if err := ts.db.AdminSplit(context.TODO(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); err != nil { + if err := ts.db.AdminSplit(context.Background(), splitKey, splitKey, hlc.MaxTimestamp /* expirationTime */); err != nil { t.Fatal(err) } diff --git a/pkg/server/server_test.go b/pkg/server/server_test.go index e0327a6c34be..6a7c40318c3e 100644 --- a/pkg/server/server_test.go +++ b/pkg/server/server_test.go @@ -62,7 +62,7 @@ func TestSelfBootstrap(t *testing.T) { if err != nil { t.Fatal(err) } - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if s.RPCContext().ClusterID.Get() == uuid.Nil { t.Error("cluster ID failed to be set on the RPC context") @@ -86,7 +86,7 @@ func TestHealthCheck(t *testing.T) { if err != nil { t.Fatal(err) } - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ctx := context.Background() @@ -124,7 +124,7 @@ func TestHealthCheck(t *testing.T) { func TestEngineTelemetry(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) rows, err := db.Query("SELECT * FROM crdb_internal.feature_usage WHERE feature_name LIKE 'storage.engine.%' AND usage_count > 0;") defer func() { @@ -162,7 +162,7 @@ func TestServerStartClock(t *testing.T) { }, } s, _, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Run a command so that we are sure to touch the timestamp cache. This is // actually not needed because other commands run during server @@ -192,7 +192,7 @@ func TestPlainHTTPServer(t *testing.T) { // The default context uses embedded certs. Insecure: true, }) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // First, make sure that the TestServer's built-in client interface // still works in insecure mode. @@ -209,7 +209,7 @@ func TestPlainHTTPServer(t *testing.T) { if !strings.HasPrefix(url, "http://") { t.Fatalf("expected insecure admin url to start with http://, but got %s", url) } - if resp, err := httputil.Get(context.TODO(), url); err != nil { + if resp, err := httputil.Get(context.Background(), url); err != nil { t.Error(err) } else { if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { @@ -222,7 +222,7 @@ func TestPlainHTTPServer(t *testing.T) { // Attempting to connect to the insecure server with HTTPS doesn't work. secureURL := strings.Replace(url, "http://", "https://", 1) - if _, err := httputil.Get(context.TODO(), secureURL); !testutils.IsError(err, "http: server gave HTTP response to HTTPS client") { + if _, err := httputil.Get(context.Background(), secureURL); !testutils.IsError(err, "http: server gave HTTP response to HTTPS client") { t.Error(err) } } @@ -230,7 +230,7 @@ func TestPlainHTTPServer(t *testing.T) { func TestSecureHTTPRedirect(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ts := s.(*TestServer) httpClient, err := s.GetHTTPClient() @@ -280,7 +280,7 @@ func TestSecureHTTPRedirect(t *testing.T) { func TestAcceptEncoding(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) client, err := s.GetAdminAuthenticatedHTTPClient() if err != nil { t.Fatal(err) @@ -632,7 +632,7 @@ func TestListenerFileCreation(t *testing.T) { if err != nil { t.Fatal(err) } - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) files, err := filepath.Glob(filepath.Join(dir, "cockroach.*")) if err != nil { @@ -694,7 +694,7 @@ func TestClusterIDMismatch(t *testing.T) { } _, err := inspectEngines( - context.TODO(), engines, roachpb.Version{}, roachpb.Version{}) + context.Background(), engines, roachpb.Version{}, roachpb.Version{}) expected := "conflicting store ClusterIDs" if !testutils.IsError(err, expected) { t.Fatalf("expected %s error, got %v", expected, err) @@ -764,7 +764,7 @@ func TestEnsureInitialWallTimeMonotonicity(t *testing.T) { } ensureClockMonotonicity( - context.TODO(), + context.Background(), c, c.PhysicalTime(), test.prevHLCUpperBound, @@ -969,7 +969,7 @@ func TestServeIndexHTML(t *testing.T) { // In test servers, web sessions are required by default. DisableWebSessionAuthentication: true, }) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) tsrv := s.(*TestServer) client, err := tsrv.GetHTTPClient() @@ -1035,7 +1035,7 @@ Binary built without web UI. linkInFakeUI() defer unlinkFakeUI() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) tsrv := s.(*TestServer) loggedInClient, err := tsrv.GetAdminAuthenticatedHTTPClient() diff --git a/pkg/server/settingsworker_test.go b/pkg/server/settingsworker_test.go index 13af3cf6d855..008688f9088e 100644 --- a/pkg/server/settingsworker_test.go +++ b/pkg/server/settingsworker_test.go @@ -68,7 +68,7 @@ func TestSettingsRefresh(t *testing.T) { // need to do this before starting the server, or there will be data races. st := cluster.MakeTestingClusterSettings() s, rawDB, _ := serverutils.StartServer(t, base.TestServerArgs{Settings: st}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(rawDB) @@ -192,7 +192,7 @@ func TestSettingsSetAndShow(t *testing.T) { // need to do this before starting the server, or there will be data races. st := cluster.MakeTestingClusterSettings() s, rawDB, _ := serverutils.StartServer(t, base.TestServerArgs{Settings: st}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(rawDB) @@ -300,7 +300,7 @@ func TestSettingsShowAll(t *testing.T) { st := cluster.MakeTestingClusterSettings() s, rawDB, _ := serverutils.StartServer(t, base.TestServerArgs{Settings: st}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(rawDB) diff --git a/pkg/server/status/jemalloc_test.go b/pkg/server/status/jemalloc_test.go index e8dcdab1a200..9a7263742a7f 100644 --- a/pkg/server/status/jemalloc_test.go +++ b/pkg/server/status/jemalloc_test.go @@ -22,7 +22,7 @@ import ( func TestJemalloc(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() cgoAllocated, _, err := getJemallocStats(ctx) if err != nil { t.Fatal(err) diff --git a/pkg/server/status_test.go b/pkg/server/status_test.go index 196f0a4372ad..a9c8ec23c70e 100644 --- a/pkg/server/status_test.go +++ b/pkg/server/status_test.go @@ -83,7 +83,7 @@ func getStatusJSONProtoWithAdminOption( func TestStatusLocalStacks(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Verify match with at least two goroutine stacks. re := regexp.MustCompile("(?s)goroutine [0-9]+.*goroutine [0-9]+.*") @@ -104,7 +104,7 @@ func TestStatusLocalStacks(t *testing.T) { func TestStatusJson(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ts := s.(*TestServer) nodeID := ts.Gossip().NodeID.Get() @@ -157,7 +157,7 @@ func TestStatusJson(t *testing.T) { func TestHealthTelemetry(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) rows, err := db.Query("SELECT * FROM crdb_internal.feature_usage WHERE feature_name LIKE 'monitoring%' AND usage_count > 0;") defer func() { @@ -231,7 +231,7 @@ func TestHealthTelemetry(t *testing.T) { func TestStatusGossipJson(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) var data gossip.InfoStatus if err := getStatusJSONProto(s, "gossip/local", &data); err != nil { @@ -267,7 +267,7 @@ func TestStatusEngineStatsJson(t *testing.T) { if err != nil { t.Fatal(err) } - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) var engineStats serverpb.EngineStatsResponse if err := getStatusJSONProto(s, "enginestats/local", &engineStats); err != nil { @@ -327,14 +327,14 @@ func startServer(t *testing.T) *TestServer { // Make sure the range is spun up with an arbitrary read command. We do not // expect a specific response. - if _, err := kvDB.Get(context.TODO(), "a"); err != nil { + if _, err := kvDB.Get(context.Background(), "a"); err != nil { t.Fatal(err) } // Make sure the node status is available. This is done by forcing stores to // publish their status, synchronizing to the event feed with a canary // event, and then forcing the server to write summaries immediately. - if err := ts.node.computePeriodicMetrics(context.TODO(), 0); err != nil { + if err := ts.node.computePeriodicMetrics(context.Background(), 0); err != nil { t.Fatalf("error publishing store statuses: %s", err) } @@ -352,7 +352,7 @@ func newRPCTestContext(ts *TestServer, cfg *base.Config) *rpc.Context { // Ensure that the RPC client context validates the server cluster ID. // This ensures that a test where the server is restarted will not let // its test RPC client talk to a server started by an unrelated concurrent test. - rpcContext.ClusterID.Set(context.TODO(), ts.ClusterID()) + rpcContext.ClusterID.Set(context.Background(), ts.ClusterID()) return rpcContext } @@ -371,7 +371,7 @@ func TestStatusGetFiles(t *testing.T) { }, }) ts := tsI.(*TestServer) - defer ts.Stopper().Stop(context.TODO()) + defer ts.Stopper().Stop(context.Background()) rootConfig := testutils.NewTestBaseContext(security.RootUser) rpcContext := newRPCTestContext(ts, rootConfig) @@ -492,7 +492,7 @@ func TestStatusLocalLogs(t *testing.T) { defer s.Close(t) ts := startServer(t) - defer ts.Stopper().Stop(context.TODO()) + defer ts.Stopper().Stop(context.Background()) // Log an error of each main type which we expect to be able to retrieve. // The resolution of our log timestamps is such that it's possible to get @@ -635,7 +635,7 @@ func TestStatusLocalLogs(t *testing.T) { func TestNodeStatusResponse(t *testing.T) { defer leaktest.AfterTest(t)() s := startServer(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // First fetch all the node statuses. wrapper := serverpb.NodesResponse{} @@ -700,7 +700,7 @@ func TestMetricsRecording(t *testing.T) { func TestMetricsEndpoint(t *testing.T) { defer leaktest.AfterTest(t)() s := startServer(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := getText(s, s.AdminURL()+statusPrefix+"metrics/"+s.Gossip().NodeID.String()); err != nil { t.Fatal(err) @@ -712,7 +712,7 @@ func TestMetricsEndpoint(t *testing.T) { func TestMetricsMetadata(t *testing.T) { defer leaktest.AfterTest(t)() s := startServer(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) metricsMetadata := s.recorder.GetMetricsMetadata() @@ -740,7 +740,7 @@ func TestMetricsMetadata(t *testing.T) { func TestChartCatalogGen(t *testing.T) { defer leaktest.AfterTest(t)() s := startServer(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) metricsMetadata := s.recorder.GetMetricsMetadata() @@ -815,7 +815,7 @@ func deleteSeenMetrics(c *catalog.ChartSection, metadata map[string]metric.Metad func TestChartCatalogMetrics(t *testing.T) { defer leaktest.AfterTest(t)() s := startServer(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) metricsMetadata := s.recorder.GetMetricsMetadata() @@ -856,7 +856,7 @@ func TestChartCatalogMetrics(t *testing.T) { func TestHotRangesResponse(t *testing.T) { defer leaktest.AfterTest(t)() ts := startServer(t) - defer ts.Stopper().Stop(context.TODO()) + defer ts.Stopper().Stop(context.Background()) var hotRangesResp serverpb.HotRangesResponse if err := getStatusJSONProto(ts, "hotranges", &hotRangesResp); err != nil { @@ -900,10 +900,10 @@ func TestRangesResponse(t *testing.T) { defer leaktest.AfterTest(t)() defer kvserver.EnableLeaseHistory(100)() ts := startServer(t) - defer ts.Stopper().Stop(context.TODO()) + defer ts.Stopper().Stop(context.Background()) // Perform a scan to ensure that all the raft groups are initialized. - if _, err := ts.db.Scan(context.TODO(), keys.LocalMax, roachpb.KeyMax, 0); err != nil { + if _, err := ts.db.Scan(context.Background(), keys.LocalMax, roachpb.KeyMax, 0); err != nil { t.Fatal(err) } @@ -943,7 +943,7 @@ func TestRangesResponse(t *testing.T) { func TestRaftDebug(t *testing.T) { defer leaktest.AfterTest(t)() s := startServer(t) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) var resp serverpb.RaftDebugResponse if err := getStatusJSONProto(s, "raft", &resp); err != nil { @@ -994,7 +994,7 @@ func TestRaftDebug(t *testing.T) { func TestStatusVars(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if body, err := getText(s, s.AdminURL()+statusPrefix+"vars"); err != nil { t.Fatal(err) @@ -1006,7 +1006,7 @@ func TestStatusVars(t *testing.T) { func TestSpanStatsResponse(t *testing.T) { defer leaktest.AfterTest(t)() ts := startServer(t) - defer ts.Stopper().Stop(context.TODO()) + defer ts.Stopper().Stop(context.Background()) httpClient, err := ts.GetAdminAuthenticatedHTTPClient() if err != nil { @@ -1072,7 +1072,7 @@ func TestSpanStatsGRPCResponse(t *testing.T) { func TestNodesGRPCResponse(t *testing.T) { defer leaktest.AfterTest(t)() ts := startServer(t) - defer ts.Stopper().Stop(context.TODO()) + defer ts.Stopper().Stop(context.Background()) rootConfig := testutils.NewTestBaseContext(security.RootUser) rpcContext := newRPCTestContext(ts, rootConfig) @@ -1099,7 +1099,7 @@ func TestNodesGRPCResponse(t *testing.T) { func TestCertificatesResponse(t *testing.T) { defer leaktest.AfterTest(t)() ts := startServer(t) - defer ts.Stopper().Stop(context.TODO()) + defer ts.Stopper().Stop(context.Background()) var response serverpb.CertificatesResponse if err := getStatusJSONProto(ts, "certificates/local", &response); err != nil { @@ -1149,7 +1149,7 @@ func TestDiagnosticsResponse(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) var resp diagnosticspb.DiagnosticReport if err := getStatusJSONProto(s, "diagnostics/local", &resp); err != nil { @@ -1167,7 +1167,7 @@ func TestRangeResponse(t *testing.T) { defer leaktest.AfterTest(t)() defer kvserver.EnableLeaseHistory(100)() ts := startServer(t) - defer ts.Stopper().Stop(context.TODO()) + defer ts.Stopper().Stop(context.Background()) // Perform a scan to ensure that all the raft groups are initialized. if _, err := ts.db.Scan(context.Background(), keys.LocalMax, roachpb.KeyMax, 0); err != nil { @@ -1232,7 +1232,7 @@ func TestRemoteDebugModeSetting(t *testing.T) { }, }) ts := s.(*TestServer) - defer ts.Stopper().Stop(context.TODO()) + defer ts.Stopper().Stop(context.Background()) if _, err := db.Exec(`SET CLUSTER SETTING server.remote_debugging.mode = 'off'`); err != nil { t.Fatal(err) @@ -1444,9 +1444,9 @@ func TestListSessionsSecurity(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) ts := s.(*TestServer) - defer ts.Stopper().Stop(context.TODO()) + defer ts.Stopper().Stop(context.Background()) - ctx := context.TODO() + ctx := context.Background() for _, requestWithAdmin := range []bool{true, false} { t.Run(fmt.Sprintf("admin=%v", requestWithAdmin), func(t *testing.T) { @@ -1520,7 +1520,7 @@ func TestCreateStatementDiagnosticsReport(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) req := &serverpb.CreateStatementDiagnosticsReportRequest{ StatementFingerprint: "INSERT INTO test VALUES (_)", @@ -1544,7 +1544,7 @@ func TestStatementDiagnosticsCompleted(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) _, err := db.Exec("CREATE TABLE test (x int PRIMARY KEY)") if err != nil { @@ -1590,7 +1590,7 @@ func TestStatementDiagnosticsCompleted(t *testing.T) { func TestJobStatusResponse(t *testing.T) { defer leaktest.AfterTest(t)() ts := startServer(t) - defer ts.Stopper().Stop(context.TODO()) + defer ts.Stopper().Stop(context.Background()) rootConfig := testutils.NewTestBaseContext(security.RootUser) rpcContext := newRPCTestContext(ts, rootConfig) diff --git a/pkg/server/testserver_test.go b/pkg/server/testserver_test.go index b9aba7a01adf..657182a2e0f9 100644 --- a/pkg/server/testserver_test.go +++ b/pkg/server/testserver_test.go @@ -22,5 +22,5 @@ import ( func TestServerTest(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) } diff --git a/pkg/server/updates_test.go b/pkg/server/updates_test.go index 2a3162773db8..83237236dafc 100644 --- a/pkg/server/updates_test.go +++ b/pkg/server/updates_test.go @@ -108,7 +108,7 @@ func TestUsageQuantization(t *testing.T) { defer r.Close() st := cluster.MakeTestingClusterSettings() - ctx := context.TODO() + ctx := context.Background() url := r.URL() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{ @@ -208,7 +208,7 @@ func TestReportUsage(t *testing.T) { defer leaktest.AfterTest(t)() const elemName = "somestring" - ctx := context.TODO() + ctx := context.Background() r := diagutils.NewServer() defer r.Close() @@ -247,7 +247,7 @@ func TestReportUsage(t *testing.T) { } s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) // stopper will wait for the update/report loop to finish too. + defer s.Stopper().Stop(context.Background()) // stopper will wait for the update/report loop to finish too. ts := s.(*TestServer) // make sure the test's generated activity is the only activity we measure. diff --git a/pkg/server/version_cluster_test.go b/pkg/server/version_cluster_test.go index b4c5861a9d6c..27be77bf22aa 100644 --- a/pkg/server/version_cluster_test.go +++ b/pkg/server/version_cluster_test.go @@ -154,7 +154,7 @@ func prev(version roachpb.Version) roachpb.Version { // Logic for versions below 19.1. if version.Major > 2 { - log.Fatalf(context.TODO(), "can't compute previous version for %s", version) + log.Fatalf(context.Background(), "can't compute previous version for %s", version) } if version.Minor != 0 { diff --git a/pkg/sql/ambiguous_commit_test.go b/pkg/sql/ambiguous_commit_test.go index 99dd14bba214..27f49938fe30 100644 --- a/pkg/sql/ambiguous_commit_test.go +++ b/pkg/sql/ambiguous_commit_test.go @@ -138,7 +138,7 @@ func TestAmbiguousCommit(t *testing.T) { const numReplicas = 3 tc := testcluster.StartTestCluster(t, numReplicas, testClusterArgs) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) // Avoid distSQL so we can reliably hydrate the intended dist // sender's cache below. diff --git a/pkg/sql/as_of_test.go b/pkg/sql/as_of_test.go index 42c2e1240da9..23c70463f8cc 100644 --- a/pkg/sql/as_of_test.go +++ b/pkg/sql/as_of_test.go @@ -35,7 +35,7 @@ func TestAsOfTime(t *testing.T) { params, _ := tests.CreateTestServerParams() params.Knobs.GCJob = &sql.GCJobTestingKnobs{RunBeforeResume: func(_ int64) error { select {} }} s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) const val1 = 1 const val2 = 2 @@ -251,7 +251,7 @@ func TestAsOfRetry(t *testing.T) { params, cmdFilters := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) const val1 = 1 const val2 = 2 @@ -348,7 +348,7 @@ func TestShowTraceAsOfTime(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) const val1 = 456 const val2 = 789 diff --git a/pkg/sql/builtin_test.go b/pkg/sql/builtin_test.go index 7245b425a1f5..1dc2fccdb394 100644 --- a/pkg/sql/builtin_test.go +++ b/pkg/sql/builtin_test.go @@ -29,7 +29,7 @@ func TestFuncNull(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) run := func(t *testing.T, q string) { diff --git a/pkg/sql/comment_on_column_test.go b/pkg/sql/comment_on_column_test.go index 2396719dc384..6b1a3fcd0d2d 100644 --- a/pkg/sql/comment_on_column_test.go +++ b/pkg/sql/comment_on_column_test.go @@ -26,7 +26,7 @@ func TestCommentOnColumn(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -94,7 +94,7 @@ func TestCommentOnColumnTransaction(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -114,7 +114,7 @@ func TestCommentOnColumnWhenDropTable(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -149,7 +149,7 @@ func TestCommentOnColumnWhenDropColumn(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; diff --git a/pkg/sql/comment_on_database_test.go b/pkg/sql/comment_on_database_test.go index 595cfdac19d6..e9ac6b136296 100644 --- a/pkg/sql/comment_on_database_test.go +++ b/pkg/sql/comment_on_database_test.go @@ -26,7 +26,7 @@ func TestCommentOnDatabase(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -77,7 +77,7 @@ func TestCommentOnDatabaseWhenDrop(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; diff --git a/pkg/sql/comment_on_index_test.go b/pkg/sql/comment_on_index_test.go index 7d06da41bb08..d443f035c0d8 100644 --- a/pkg/sql/comment_on_index_test.go +++ b/pkg/sql/comment_on_index_test.go @@ -26,7 +26,7 @@ func TestCommentOnIndex(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -79,7 +79,7 @@ func TestCommentOnIndexWhenDropTable(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -114,7 +114,7 @@ func TestCommentOnIndexWhenDropIndex(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; diff --git a/pkg/sql/comment_on_table_test.go b/pkg/sql/comment_on_table_test.go index aaf1fb904d99..eeb38eb51f1f 100644 --- a/pkg/sql/comment_on_table_test.go +++ b/pkg/sql/comment_on_table_test.go @@ -26,7 +26,7 @@ func TestCommentOnTable(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -79,7 +79,7 @@ func TestCommentOnTableWhenDrop(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; diff --git a/pkg/sql/conn_executor_internal_test.go b/pkg/sql/conn_executor_internal_test.go index 98b2e55f0ae1..14ee4ca61b17 100644 --- a/pkg/sql/conn_executor_internal_test.go +++ b/pkg/sql/conn_executor_internal_test.go @@ -218,7 +218,7 @@ func TestPortalsDestroyedOnTxnFinish(t *testing.T) { func mustParseOne(s string) parser.Statement { stmts, err := parser.Parse(s) if err != nil { - log.Fatalf(context.TODO(), "%v", err) + log.Fatalf(context.Background(), "%v", err) } return stmts[0] } diff --git a/pkg/sql/conn_executor_test.go b/pkg/sql/conn_executor_test.go index 6c9049e193db..47070a543d63 100644 --- a/pkg/sql/conn_executor_test.go +++ b/pkg/sql/conn_executor_test.go @@ -206,7 +206,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT); } }() - ctx := context.TODO() + ctx := context.Background() conn := c.(driver.ConnBeginTx) txn, err := conn.BeginTx(ctx, driver.TxOptions{}) if err != nil { @@ -327,7 +327,7 @@ func TestNonRetriableErrorOnAutoCommit(t *testing.T) { }, } s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB.SetMaxOpenConns(1) @@ -381,7 +381,7 @@ func TestErrorOnRollback(t *testing.T) { }, } s, sqlDB, _ := serverutils.StartServer(t, params) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) if _, err := sqlDB.Exec(` @@ -445,7 +445,7 @@ func TestHalloweenProblemAvoidance(t *testing.T) { params, _ := tests.CreateTestServerParams() params.Insecure = true s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE t; @@ -494,7 +494,7 @@ func TestAppNameStatisticsInitialization(t *testing.T) { params, _ := tests.CreateTestServerParams() params.Insecure = true s, _, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Prepare a session with a custom application name. pgURL := url.URL{ @@ -575,7 +575,7 @@ func TestQueryProgress(t *testing.T) { }, } s, rawDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(rawDB) diff --git a/pkg/sql/conn_io_test.go b/pkg/sql/conn_io_test.go index b5c14c9e20e6..19585c3d011c 100644 --- a/pkg/sql/conn_io_test.go +++ b/pkg/sql/conn_io_test.go @@ -52,7 +52,7 @@ func mustPush(ctx context.Context, t *testing.T, buf *StmtBuf, cmd Command) { func TestStmtBuf(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() s1, err := parser.ParseOne("SELECT 1") if err != nil { t.Fatal(err) @@ -140,7 +140,7 @@ func TestStmtBuf(t *testing.T) { func TestStmtBufSignal(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() buf := NewStmtBuf() s1, err := parser.ParseOne("SELECT 1") if err != nil { @@ -164,7 +164,7 @@ func TestStmtBufSignal(t *testing.T) { func TestStmtBufLtrim(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() buf := NewStmtBuf() for i := 0; i < 5; i++ { stmt, err := parser.ParseOne( @@ -192,7 +192,7 @@ func TestStmtBufLtrim(t *testing.T) { func TestStmtBufClose(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() buf := NewStmtBuf() stmt, err := parser.ParseOne("SELECT 1") if err != nil { @@ -229,7 +229,7 @@ func TestStmtBufPreparedStmt(t *testing.T) { defer leaktest.AfterTest(t)() buf := NewStmtBuf() - ctx := context.TODO() + ctx := context.Background() s1, err := parser.ParseOne("SELECT 1") if err != nil { @@ -272,7 +272,7 @@ func TestStmtBufBatching(t *testing.T) { defer leaktest.AfterTest(t)() buf := NewStmtBuf() - ctx := context.TODO() + ctx := context.Background() s1, err := parser.ParseOne("SELECT 1") if err != nil { diff --git a/pkg/sql/copy_file_upload_test.go b/pkg/sql/copy_file_upload_test.go index 08a582b324d0..a3d660cd66f7 100644 --- a/pkg/sql/copy_file_upload_test.go +++ b/pkg/sql/copy_file_upload_test.go @@ -95,7 +95,7 @@ func TestFileUpload(t *testing.T) { params.ExternalIODir = localExternalDir s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) testFileDir, cleanup2 := testutils.TempDir(t) defer cleanup2() @@ -125,7 +125,7 @@ func TestUploadEmptyFile(t *testing.T) { defer cleanup() params.ExternalIODir = localExternalDir s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) testFileDir, cleanup2 := testutils.TempDir(t) defer cleanup2() @@ -155,7 +155,7 @@ func TestFileNotExist(t *testing.T) { defer cleanup() params.ExternalIODir = localExternalDir s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) err := runCopyFile(t, db, filepath.Join(localExternalDir, filename)) expectedErr := "no such file" @@ -172,7 +172,7 @@ func TestFileExist(t *testing.T) { defer cleanup() params.ExternalIODir = localExternalDir s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) destination := filepath.Join(localExternalDir, filename) writeFile(t, destination, []byte("file exists")) @@ -193,7 +193,7 @@ func TestNotAdmin(t *testing.T) { params.ExternalIODir = localExternalDir params.Insecure = true s, rootDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) _, err := rootDB.Exec("CREATE USER jsmith") require.NoError(t, err) diff --git a/pkg/sql/copy_in_test.go b/pkg/sql/copy_in_test.go index df0e6fb6c222..8e57eab02cd1 100644 --- a/pkg/sql/copy_in_test.go +++ b/pkg/sql/copy_in_test.go @@ -39,7 +39,7 @@ func TestCopyNullInfNaN(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -138,7 +138,7 @@ func TestCopyRandom(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -277,7 +277,7 @@ func TestCopyError(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -332,7 +332,7 @@ func TestCopyOne(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -366,7 +366,7 @@ func TestCopyInProgress(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -399,7 +399,7 @@ func TestCopyTransaction(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; @@ -452,7 +452,7 @@ func TestCopyFKCheck(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) db.SetMaxOpenConns(1) r := sqlutils.MakeSQLRunner(db) diff --git a/pkg/sql/crdb_internal_test.go b/pkg/sql/crdb_internal_test.go index 1e12860589cf..493aa80c36e1 100644 --- a/pkg/sql/crdb_internal_test.go +++ b/pkg/sql/crdb_internal_test.go @@ -126,8 +126,8 @@ func TestGossipAlertsTable(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, _, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() if err := s.GossipI().(*gossip.Gossip).AddInfoProto(gossip.MakeNodeHealthAlertKey(456), &statuspb.HealthCheckResult{ Alerts: []statuspb.HealthAlert{{ @@ -169,7 +169,7 @@ func TestOldBitColumnMetadata(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -213,7 +213,7 @@ CREATE TABLE t.test (k INT); tableDesc.Columns = append(tableDesc.Columns, *col) // Write the modified descriptor. - if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := kvDB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } diff --git a/pkg/sql/create_stats_test.go b/pkg/sql/create_stats_test.go index a05de0f485f4..68e677814fd3 100644 --- a/pkg/sql/create_stats_test.go +++ b/pkg/sql/create_stats_test.go @@ -41,7 +41,7 @@ func TestStatsWithLowTTL(t *testing.T) { } s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) r := sqlutils.MakeSQLRunner(db) r.Exec(t, ` diff --git a/pkg/sql/create_test.go b/pkg/sql/create_test.go index 636d7c21ec0e..4bcd3b5f5771 100644 --- a/pkg/sql/create_test.go +++ b/pkg/sql/create_test.go @@ -37,8 +37,8 @@ func TestDatabaseDescriptor(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() codec := keys.SystemSQLCodec expectedCounter := int64(keys.MinNonPredefinedUserDescID) @@ -265,7 +265,7 @@ func verifyTables( } descKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, id) desc := &sqlbase.Descriptor{} - if err := kvDB.GetProto(context.TODO(), descKey, desc); err != nil { + if err := kvDB.GetProto(context.Background(), descKey, desc); err != nil { t.Fatal(err) } if (*desc != sqlbase.Descriptor{}) { @@ -285,7 +285,7 @@ func TestParallelCreateTables(t *testing.T) { const numberOfNodes = 3 tc := testcluster.StartTestCluster(t, numberOfNodes, base.TestClusterArgs{}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) if _, err := tc.ServerConn(0).Exec(`CREATE DATABASE "test"`); err != nil { t.Fatal(err) @@ -338,7 +338,7 @@ func TestParallelCreateConflictingTables(t *testing.T) { const numberOfNodes = 3 tc := testcluster.StartTestCluster(t, numberOfNodes, base.TestClusterArgs{}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) if _, err := tc.ServerConn(0).Exec(`CREATE DATABASE "test"`); err != nil { t.Fatal(err) @@ -386,7 +386,7 @@ func TestTableReadErrorsBeforeTableCreation(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -444,7 +444,7 @@ SELECT * FROM t.kv%d func TestCreateStatementType(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) pgURL, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) @@ -488,7 +488,7 @@ func TestSetUserPasswordInsecure(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{Insecure: true}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) errFail := "setting or updating a password is not supported in insecure mode" diff --git a/pkg/sql/database_test.go b/pkg/sql/database_test.go index 7c7b1ec59cd4..71e3ccb33e19 100644 --- a/pkg/sql/database_test.go +++ b/pkg/sql/database_test.go @@ -50,9 +50,9 @@ func TestDatabaseAccessors(t *testing.T) { defer leaktest.AfterTest(t)() s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) - if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := kvDB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if _, err := getDatabaseDescByID(ctx, txn, keys.SystemSQLCodec, sqlbase.SystemDB.ID); err != nil { return err } diff --git a/pkg/sql/descriptor_mutation_test.go b/pkg/sql/descriptor_mutation_test.go index 688865eddd62..2e0445163678 100644 --- a/pkg/sql/descriptor_mutation_test.go +++ b/pkg/sql/descriptor_mutation_test.go @@ -55,7 +55,7 @@ func (mt mutationTest) checkTableSize(e int) { // Check that there are no hidden values tableStartKey := keys.SystemSQLCodec.TablePrefix(uint32(mt.tableDesc.ID)) tableEndKey := tableStartKey.PrefixEnd() - if kvs, err := mt.kvDB.Scan(context.TODO(), tableStartKey, tableEndKey, 0); err != nil { + if kvs, err := mt.kvDB.Scan(context.Background(), tableStartKey, tableEndKey, 0); err != nil { mt.Error(err) } else if len(kvs) != e { mt.Errorf("expected %d key value pairs, but got %d", e, len(kvs)) @@ -84,7 +84,7 @@ func (mt mutationTest) makeMutationsActive() { mt.Fatal(err) } if err := mt.kvDB.Put( - context.TODO(), + context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, mt.tableDesc.ID), sqlbase.WrapDescriptor(mt.tableDesc), ); err != nil { @@ -142,7 +142,7 @@ func (mt mutationTest) writeMutation(m sqlbase.DescriptorMutation) { mt.Fatal(err) } if err := mt.kvDB.Put( - context.TODO(), + context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, mt.tableDesc.ID), sqlbase.WrapDescriptor(mt.tableDesc), ); err != nil { @@ -167,7 +167,7 @@ func TestUpsertWithColumnMutationAndNotNullDefault(t *testing.T) { // Disable external processing of mutations. params, _ := tests.CreateTestServerParams() server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -224,7 +224,7 @@ func TestOperationsWithColumnMutation(t *testing.T) { // Disable external processing of mutations. params, _ := tests.CreateTestServerParams() server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) // Fix the column families so the key counts below don't change if the // family heuristics are updated. @@ -489,7 +489,7 @@ func TestOperationsWithIndexMutation(t *testing.T) { // Disable external processing of mutations. params, _ := tests.CreateTestServerParams() server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -635,7 +635,7 @@ func TestOperationsWithColumnAndIndexMutation(t *testing.T) { defer sql.TestDisableTableLeases()() params, _ := tests.CreateTestServerParams() server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) // Create a table with column i and an index on v and i. Fix the column // families so the key counts below don't change if the family heuristics @@ -841,7 +841,7 @@ func TestSchemaChangeCommandsWithPendingMutations(t *testing.T) { }, } server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -1048,7 +1048,7 @@ func TestTableMutationQueue(t *testing.T) { }, } server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) // Create a table with column i and an index on v and i. if _, err := sqlDB.Exec(` @@ -1143,7 +1143,7 @@ func TestAddingFKs(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -1159,7 +1159,7 @@ func TestAddingFKs(t *testing.T) { ordersDesc.State = sqlbase.TableDescriptor_ADD ordersDesc.Version++ if err := kvDB.Put( - context.TODO(), + context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, ordersDesc.ID), sqlbase.WrapDescriptor(ordersDesc), ); err != nil { diff --git a/pkg/sql/distsql/sync_flow_after_drain_test.go b/pkg/sql/distsql/sync_flow_after_drain_test.go index 8e8630f8dcbd..65f9ca45abe3 100644 --- a/pkg/sql/distsql/sync_flow_after_drain_test.go +++ b/pkg/sql/distsql/sync_flow_after_drain_test.go @@ -31,7 +31,7 @@ import ( func TestSyncFlowAfterDrain(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() // We'll create a server just so that we can extract its distsql ServerConfig, // so we can use it for a manually-built DistSQL Server below. Otherwise, too // much work to create that ServerConfig by hand. diff --git a/pkg/sql/distsql_physical_planner_test.go b/pkg/sql/distsql_physical_planner_test.go index abc83cb22ece..205bad07ae70 100644 --- a/pkg/sql/distsql_physical_planner_test.go +++ b/pkg/sql/distsql_physical_planner_test.go @@ -137,7 +137,7 @@ func TestPlanningDuringSplitsAndMerges(t *testing.T) { ServerArgs: base.TestServerArgs{UseDatabase: "test"}, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) sqlutils.CreateTable( t, tc.ServerConn(0), "t", "x INT PRIMARY KEY, xsquared INT", @@ -148,7 +148,7 @@ func TestPlanningDuringSplitsAndMerges(t *testing.T) { ) // Start a worker that continuously performs splits in the background. - tc.Stopper().RunWorker(context.TODO(), func(ctx context.Context) { + tc.Stopper().RunWorker(context.Background(), func(ctx context.Context) { rng, _ := randutil.NewPseudoRand() cdb := tc.Server(0).DB() for { @@ -257,7 +257,7 @@ func TestDistSQLReceiverUpdatesCaches(t *testing.T) { rangeCache := kvcoord.NewRangeDescriptorCache(st, nil /* db */, size, stop.NewStopper()) leaseCache := kvcoord.NewLeaseHolderCache(size) r := MakeDistSQLReceiver( - context.TODO(), nil /* resultWriter */, tree.Rows, + context.Background(), nil /* resultWriter */, tree.Rows, rangeCache, leaseCache, nil /* txn */, nil /* updateClock */, &SessionTracing{}) descs := []roachpb.RangeDescriptor{ @@ -307,7 +307,7 @@ func TestDistSQLReceiverUpdatesCaches(t *testing.T) { t.Fatalf("expected: %+v, got: %+v", descs[i], desc) } - _, ok := leaseCache.Lookup(context.TODO(), descs[i].RangeID) + _, ok := leaseCache.Lookup(context.Background(), descs[i].RangeID) if !ok { t.Fatalf("didn't find lease for RangeID: %d", descs[i].RangeID) } @@ -330,7 +330,7 @@ func TestDistSQLRangeCachesIntegrationTest(t *testing.T) { UseDatabase: "test", }, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db0 := tc.ServerConn(0) sqlutils.CreateTable(t, db0, "left", @@ -380,7 +380,7 @@ func TestDistSQLRangeCachesIntegrationTest(t *testing.T) { // Run everything in a transaction, so we're bound on a connection on which we // force DistSQL. - txn, err := db3.BeginTx(context.TODO(), nil /* opts */) + txn, err := db3.BeginTx(context.Background(), nil /* opts */) if err != nil { t.Fatal(err) } @@ -404,7 +404,7 @@ func TestDistSQLRangeCachesIntegrationTest(t *testing.T) { // Run a non-trivial query to force the "wrong range" metadata to flow through // a number of components. - row = txn.QueryRowContext(context.TODO(), query) + row = txn.QueryRowContext(context.Background(), query) var cnt int if err := row.Scan(&cnt); err != nil { t.Fatal(err) @@ -441,7 +441,7 @@ func TestDistSQLDeadHosts(t *testing.T) { ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{UseDatabase: "test"}, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := tc.ServerConn(0) db.SetMaxOpenConns(1) @@ -469,7 +469,7 @@ func TestDistSQLDeadHosts(t *testing.T) { // Run a query that uses the entire table and is easy to verify. runQuery := func() error { - log.Infof(context.TODO(), "running test query") + log.Infof(context.Background(), "running test query") var res int if err := db.QueryRow("SELECT sum(xsquared) FROM t").Scan(&res); err != nil { return err @@ -477,7 +477,7 @@ func TestDistSQLDeadHosts(t *testing.T) { if exp := (n * (n + 1) * (2*n + 1)) / 6; res != exp { t.Fatalf("incorrect result %d, expected %d", res, exp) } - log.Infof(context.TODO(), "test query OK") + log.Infof(context.Background(), "test query OK") return nil } if err := runQuery(); err != nil { @@ -523,7 +523,7 @@ func TestDistSQLDrainingHosts(t *testing.T) { ServerArgs: base.TestServerArgs{Knobs: base.TestingKnobs{DistSQL: &execinfra.TestingKnobs{DrainFast: true}}, UseDatabase: "test"}, }, ) - ctx := context.TODO() + ctx := context.Background() defer tc.Stopper().Stop(ctx) conn := tc.ServerConn(0) @@ -779,7 +779,7 @@ func TestPartitionSpans(t *testing.T) { // We need a mock Gossip to contain addresses for the nodes. Otherwise the // DistSQLPlanner will not plan flows on them. testStopper := stop.NewStopper() - defer testStopper.Stop(context.TODO()) + defer testStopper.Stop(context.Background()) mockGossip := gossip.NewTest(roachpb.NodeID(1), nil /* rpcContext */, nil, /* grpcServer */ testStopper, metric.NewRegistry(), zonepb.DefaultZoneConfigRef()) var nodeDescs []*roachpb.NodeDescriptor @@ -809,7 +809,7 @@ func TestPartitionSpans(t *testing.T) { for testIdx, tc := range testCases { t.Run(strconv.Itoa(testIdx), func(t *testing.T) { stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) tsp := &testSpanResolver{ nodes: nodeDescs, @@ -964,13 +964,13 @@ func TestPartitionSpansSkipsIncompatibleNodes(t *testing.T) { t.Run(tc.name, func(t *testing.T) { stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // We need a mock Gossip to contain addresses for the nodes. Otherwise the // DistSQLPlanner will not plan flows on them. This Gossip will also // reflect tc.nodesNotAdvertisingDistSQLVersion. testStopper := stop.NewStopper() - defer testStopper.Stop(context.TODO()) + defer testStopper.Stop(context.Background()) mockGossip := gossip.NewTest(roachpb.NodeID(1), nil /* rpcContext */, nil, /* grpcServer */ testStopper, metric.NewRegistry(), zonepb.DefaultZoneConfigRef()) var nodeDescs []*roachpb.NodeDescriptor @@ -1059,7 +1059,7 @@ func TestPartitionSpansSkipsNodesNotInGossip(t *testing.T) { ranges := []testSpanResolverRange{{"A", 1}, {"B", 2}, {"C", 1}} stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) mockGossip := gossip.NewTest(roachpb.NodeID(1), nil /* rpcContext */, nil, /* grpcServer */ stopper, metric.NewRegistry(), zonepb.DefaultZoneConfigRef()) @@ -1148,7 +1148,7 @@ func TestCheckNodeHealth(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) const nodeID = roachpb.NodeID(5) diff --git a/pkg/sql/distsql_plan_backfill_test.go b/pkg/sql/distsql_plan_backfill_test.go index 11078c72aa07..99a99c44018b 100644 --- a/pkg/sql/distsql_plan_backfill_test.go +++ b/pkg/sql/distsql_plan_backfill_test.go @@ -58,7 +58,7 @@ func TestDistBackfill(t *testing.T) { }, }, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) cdb := tc.Server(0).DB() sqlutils.CreateTable( diff --git a/pkg/sql/distsql_plan_join_test.go b/pkg/sql/distsql_plan_join_test.go index 90e86862e0cd..34bc1e88ab89 100644 --- a/pkg/sql/distsql_plan_join_test.go +++ b/pkg/sql/distsql_plan_join_test.go @@ -152,8 +152,8 @@ func decodeTestKey(kvDB *kv.DB, key roachpb.Key) (string, error) { return "", err } - if err := kvDB.Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { - desc, err := sqlbase.GetTableDescFromID(context.TODO(), txn, keys.SystemSQLCodec, sqlbase.ID(descID)) + if err := kvDB.Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { + desc, err := sqlbase.GetTableDescFromID(context.Background(), txn, keys.SystemSQLCodec, sqlbase.ID(descID)) if err != nil { return err } @@ -227,7 +227,7 @@ func TestUseInterleavedJoin(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlutils.CreateTestInterleavedHierarchy(t, sqlDB) @@ -362,7 +362,7 @@ func TestMaximalJoinPrefix(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlutils.CreateTestInterleavedHierarchy(t, sqlDB) @@ -472,7 +472,7 @@ func TestAlignInterleavedSpans(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlutils.CreateTestInterleavedHierarchy(t, sqlDB) @@ -806,7 +806,7 @@ func TestInterleavedNodes(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlutils.CreateTestInterleavedHierarchy(t, sqlDB) diff --git a/pkg/sql/drop_test.go b/pkg/sql/drop_test.go index 45e9e5d5b590..b09b04179d03 100644 --- a/pkg/sql/drop_test.go +++ b/pkg/sql/drop_test.go @@ -131,8 +131,8 @@ func TestDropDatabase(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() // Fix the column families so the key counts below don't change if the // family heuristics are updated. @@ -262,8 +262,8 @@ func TestDropDatabaseEmpty(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -308,8 +308,8 @@ func TestDropDatabaseDeleteData(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() // Disable strict GC TTL enforcement because we're going to shove a zero-value // TTL into the system with addImmediateGCZoneConfig. @@ -489,7 +489,7 @@ func TestShowTablesAfterRecreateDatabase(t *testing.T) { }, } s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -539,7 +539,7 @@ func TestDropIndex(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Disable strict GC TTL enforcement because we're going to shove a zero-value // TTL into the system with addImmediateGCZoneConfig. @@ -704,7 +704,7 @@ func TestDropIndexInterleaved(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) numRows := 2*chunkSize + 1 tests.CreateKVInterleavedTable(t, sqlDB, numRows) @@ -732,8 +732,8 @@ func TestDropTable(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() numRows := 2*sql.TableTruncateChunkSize + 1 if err := tests.CreateKVTable(sqlDB, "kv", numRows); err != nil { @@ -821,8 +821,8 @@ func TestDropTableDeleteData(t *testing.T) { defer gcjob.SetSmallMaxGCIntervalForTest()() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() // Disable strict GC TTL enforcement because we're going to shove a zero-value // TTL into the system with addImmediateGCZoneConfig. @@ -1040,7 +1040,7 @@ func TestDropTableInterleavedDeleteData(t *testing.T) { defer gcjob.SetSmallMaxGCIntervalForTest()() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) numRows := 2*sql.TableTruncateChunkSize + 1 tests.CreateKVInterleavedTable(t, sqlDB, numRows) @@ -1077,7 +1077,7 @@ func TestDropTableInTxn(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -1124,7 +1124,7 @@ func TestDropDatabaseAfterDropTable(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if err := tests.CreateKVTable(sqlDB, "kv", 100); err != nil { t.Fatal(err) @@ -1162,7 +1162,7 @@ func TestDropAndCreateTable(t *testing.T) { params, _ := tests.CreateTestServerParams() params.UseDatabase = "test" s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(`CREATE DATABASE test`); err != nil { t.Fatal(err) @@ -1212,7 +1212,7 @@ func TestCommandsWhileTableBeingDropped(t *testing.T) { }, } s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sql := ` CREATE DATABASE test; @@ -1266,7 +1266,7 @@ func TestDropNameReuse(t *testing.T) { } s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sql := ` CREATE DATABASE test; diff --git a/pkg/sql/err_count_test.go b/pkg/sql/err_count_test.go index 8ef9e7e89639..7b6c55bce1b1 100644 --- a/pkg/sql/err_count_test.go +++ b/pkg/sql/err_count_test.go @@ -31,7 +31,7 @@ func TestErrorCounts(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) count1 := telemetry.GetRawFeatureCounts()["errorcodes."+pgcode.Syntax] @@ -70,7 +70,7 @@ func TestUnimplementedCounts(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec("CREATE TABLE t(x INT8)"); err != nil { t.Fatal(err) @@ -96,7 +96,7 @@ func TestTransactionRetryErrorCounts(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec("CREATE TABLE accounts (id INT8 PRIMARY KEY, balance INT8)"); err != nil { t.Fatal(err) diff --git a/pkg/sql/explain_test.go b/pkg/sql/explain_test.go index bcc313333f42..ddb6b51ea303 100644 --- a/pkg/sql/explain_test.go +++ b/pkg/sql/explain_test.go @@ -25,7 +25,7 @@ func TestStatementReuses(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) initStmts := []string{ `CREATE DATABASE d`, diff --git a/pkg/sql/flowinfra/cluster_test.go b/pkg/sql/flowinfra/cluster_test.go index 29483b8d7eca..7ec4ef94ab65 100644 --- a/pkg/sql/flowinfra/cluster_test.go +++ b/pkg/sql/flowinfra/cluster_test.go @@ -48,7 +48,7 @@ func TestClusterFlow(t *testing.T) { args := base.TestClusterArgs{ReplicationMode: base.ReplicationManual} tc := serverutils.StartTestCluster(t, 3, args) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) sumDigitsFn := func(row int) tree.Datum { sum := 0 @@ -326,7 +326,7 @@ func TestLimitedBufferingDeadlock(t *testing.T) { defer leaktest.AfterTest(t)() tc := serverutils.StartTestCluster(t, 1, base.TestClusterArgs{}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) // Set up the following network - a simplification of the one described in // #17097 (the numbers on the streams are the StreamIDs in the spec below): @@ -418,9 +418,9 @@ func TestLimitedBufferingDeadlock(t *testing.T) { 0, // maxOffset ) txn := kv.NewTxnFromProto( - context.TODO(), tc.Server(0).DB(), tc.Server(0).NodeID(), + context.Background(), tc.Server(0).DB(), tc.Server(0).NodeID(), now, kv.RootTxn, &txnProto) - leafInputState := txn.GetLeafTxnInputState(context.TODO()) + leafInputState := txn.GetLeafTxnInputState(context.Background()) req := execinfrapb.SetupFlowRequest{ Version: execinfra.Version, @@ -505,7 +505,7 @@ func TestLimitedBufferingDeadlock(t *testing.T) { t.Fatal(err) } - stream, err := execinfrapb.NewDistSQLClient(conn).RunSyncFlow(context.TODO()) + stream, err := execinfrapb.NewDistSQLClient(conn).RunSyncFlow(context.Background()) if err != nil { t.Fatal(err) } @@ -525,7 +525,7 @@ func TestLimitedBufferingDeadlock(t *testing.T) { } t.Fatal(err) } - err = decoder.AddMessage(context.TODO(), msg) + err = decoder.AddMessage(context.Background(), msg) if err != nil { t.Fatal(err) } @@ -581,7 +581,7 @@ func TestDistSQLReadsFillGatewayID(t *testing.T) { }, }, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := tc.ServerConn(0) sqlutils.CreateTable(t, db, "t", @@ -690,7 +690,7 @@ func BenchmarkInfrastructure(b *testing.B) { b.Fatal(err) } } - msg := se.FormMessage(context.TODO()) + msg := se.FormMessage(context.Background()) valSpecs[i] = execinfrapb.ValuesCoreSpec{ Columns: msg.Typing, RawBytes: [][]byte{msg.Data.RawBytes}, @@ -733,9 +733,9 @@ func BenchmarkInfrastructure(b *testing.B) { 0, // maxOffset ) txn := kv.NewTxnFromProto( - context.TODO(), tc.Server(0).DB(), tc.Server(0).NodeID(), + context.Background(), tc.Server(0).DB(), tc.Server(0).NodeID(), now, kv.RootTxn, &txnProto) - leafInputState := txn.GetLeafTxnInputState(context.TODO()) + leafInputState := txn.GetLeafTxnInputState(context.Background()) for i := range reqs { reqs[i] = execinfrapb.SetupFlowRequest{ Version: execinfra.Version, @@ -803,13 +803,13 @@ func BenchmarkInfrastructure(b *testing.B) { } for i := 1; i < numNodes; i++ { - if resp, err := clients[i].SetupFlow(context.TODO(), &reqs[i]); err != nil { + if resp, err := clients[i].SetupFlow(context.Background(), &reqs[i]); err != nil { b.Fatal(err) } else if resp.Error != nil { b.Fatal(resp.Error) } } - stream, err := clients[0].RunSyncFlow(context.TODO()) + stream, err := clients[0].RunSyncFlow(context.Background()) if err != nil { b.Fatal(err) } @@ -829,7 +829,7 @@ func BenchmarkInfrastructure(b *testing.B) { } b.Fatal(err) } - err = decoder.AddMessage(context.TODO(), msg) + err = decoder.AddMessage(context.Background(), msg) if err != nil { b.Fatal(err) } diff --git a/pkg/sql/flowinfra/flow_registry_test.go b/pkg/sql/flowinfra/flow_registry_test.go index fafaa19adcda..821debf8c70b 100644 --- a/pkg/sql/flowinfra/flow_registry_test.go +++ b/pkg/sql/flowinfra/flow_registry_test.go @@ -40,7 +40,7 @@ func lookupFlow(fr *FlowRegistry, fid execinfrapb.FlowID, timeout time.Duration) if entry.flow != nil { return entry.flow } - entry = fr.waitForFlowLocked(context.TODO(), fid, timeout) + entry = fr.waitForFlowLocked(context.Background(), fid, timeout) if entry == nil { return nil } @@ -225,7 +225,7 @@ func TestStreamConnectionTimeout(t *testing.T) { streamID1: {receiver: RowInboundStreamHandler{consumer}, waitGroup: wg}, } if err := reg.RegisterFlow( - context.TODO(), id1, f1, inboundStreams, jiffy, + context.Background(), id1, f1, inboundStreams, jiffy, ); err != nil { t.Fatal(err) } @@ -255,7 +255,7 @@ func TestStreamConnectionTimeout(t *testing.T) { } defer cleanup() - _, _, _, err = reg.ConnectInboundStream(context.TODO(), id1, streamID1, serverStream, jiffy) + _, _, _, err = reg.ConnectInboundStream(context.Background(), id1, streamID1, serverStream, jiffy) if !testutils.IsError(err, "came too late") { t.Fatalf("expected %q, got: %v", "came too late", err) } @@ -263,7 +263,7 @@ func TestStreamConnectionTimeout(t *testing.T) { // Unregister the flow. Subsequent attempts to connect a stream should result // in a different error than before. reg.UnregisterFlow(id1) - _, _, _, err = reg.ConnectInboundStream(context.TODO(), id1, streamID1, serverStream, jiffy) + _, _, _, err = reg.ConnectInboundStream(context.Background(), id1, streamID1, serverStream, jiffy) if !testutils.IsError(err, "not found") { t.Fatalf("expected %q, got: %v", "not found", err) } @@ -308,7 +308,7 @@ func TestHandshake(t *testing.T) { // async because the consumer is not yet there and ConnectInboundStream // is blocking. if _, _, _, err := reg.ConnectInboundStream( - context.TODO(), flowID, streamID, serverStream, time.Hour, + context.Background(), flowID, streamID, serverStream, time.Hour, ); err != nil { t.Error(err) } @@ -322,7 +322,7 @@ func TestHandshake(t *testing.T) { streamID: {receiver: RowInboundStreamHandler{consumer}, waitGroup: wg}, } if err := reg.RegisterFlow( - context.TODO(), flowID, f1, inboundStreams, time.Hour, /* timeout */ + context.Background(), flowID, f1, inboundStreams, time.Hour, /* timeout */ ); err != nil { t.Fatal(err) } @@ -375,7 +375,7 @@ func TestHandshake(t *testing.T) { func TestFlowRegistryDrain(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() reg := NewFlowRegistry(0) flow := &FlowBase{} diff --git a/pkg/sql/flowinfra/outbox_test.go b/pkg/sql/flowinfra/outbox_test.go index e62bfbbb2e5c..e46a65051dbe 100644 --- a/pkg/sql/flowinfra/outbox_test.go +++ b/pkg/sql/flowinfra/outbox_test.go @@ -51,7 +51,7 @@ func TestOutbox(t *testing.T) { // Create a mock server that the outbox will connect and push rows to. stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) clusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(clock, stopper, execinfra.StaticNodeID) if err != nil { @@ -75,7 +75,7 @@ func TestOutbox(t *testing.T) { outbox := NewOutbox(&flowCtx, execinfra.StaticNodeID, flowID, streamID) outbox.Init(sqlbase.OneIntCol) var outboxWG sync.WaitGroup - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Start the outbox. This should cause the stream to connect, even though // we're not sending any rows. @@ -140,7 +140,7 @@ func TestOutbox(t *testing.T) { } t.Fatal(err) } - err = decoder.AddMessage(context.TODO(), msg) + err = decoder.AddMessage(context.Background(), msg) if err != nil { t.Fatal(err) } @@ -206,7 +206,7 @@ func TestOutboxInitializesStreamBeforeReceivingAnyRows(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) clusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(clock, stopper, execinfra.StaticNodeID) if err != nil { @@ -231,7 +231,7 @@ func TestOutboxInitializesStreamBeforeReceivingAnyRows(t *testing.T) { outbox := NewOutbox(&flowCtx, execinfra.StaticNodeID, flowID, streamID) var outboxWG sync.WaitGroup - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() outbox.Init(sqlbase.OneIntCol) // Start the outbox. This should cause the stream to connect, even though @@ -280,7 +280,7 @@ func TestOutboxClosesWhenConsumerCloses(t *testing.T) { for _, tc := range testCases { t.Run("", func(t *testing.T) { stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) clusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(clock, stopper, execinfra.StaticNodeID) if err != nil { @@ -306,7 +306,7 @@ func TestOutboxClosesWhenConsumerCloses(t *testing.T) { var wg sync.WaitGroup var expectedErr error consumerReceivedMsg := make(chan struct{}) - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() if tc.outboxIsClient { outbox = NewOutbox(&flowCtx, execinfra.StaticNodeID, flowID, streamID) @@ -416,7 +416,7 @@ func TestOutboxCancelsFlowOnError(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) clusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(clock, stopper, execinfra.StaticNodeID) if err != nil { @@ -440,7 +440,7 @@ func TestOutboxCancelsFlowOnError(t *testing.T) { streamID := execinfrapb.StreamID(42) var outbox *Outbox var wg sync.WaitGroup - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() // We could test this on ctx.cancel(), but this mock @@ -474,7 +474,7 @@ func TestOutboxUnblocksProducers(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - ctx := context.TODO() + ctx := context.Background() defer stopper.Stop(ctx) st := cluster.MakeTestingClusterSettings() @@ -533,7 +533,7 @@ func BenchmarkOutbox(b *testing.B) { // Create a mock server that the outbox will connect and push rows to. stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) clock := hlc.NewClock(hlc.UnixNano, time.Nanosecond) clusterID, mockServer, addr, err := execinfrapb.StartMockDistSQLServer(clock, stopper, execinfra.StaticNodeID) if err != nil { @@ -563,7 +563,7 @@ func BenchmarkOutbox(b *testing.B) { outbox := NewOutbox(&flowCtx, execinfra.StaticNodeID, flowID, streamID) outbox.Init(sqlbase.MakeIntCols(numCols)) var outboxWG sync.WaitGroup - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() // Start the outbox. This should cause the stream to connect, even though // we're not sending any rows. diff --git a/pkg/sql/flowinfra/server_test.go b/pkg/sql/flowinfra/server_test.go index daf863516a65..12eaa4113db7 100644 --- a/pkg/sql/flowinfra/server_test.go +++ b/pkg/sql/flowinfra/server_test.go @@ -100,7 +100,7 @@ func TestServer(t *testing.T) { } t.Fatal(err) } - err = decoder.AddMessage(context.TODO(), msg) + err = decoder.AddMessage(context.Background(), msg) if err != nil { t.Fatal(err) } @@ -163,7 +163,7 @@ func TestDistSQLServerGossipsVersion(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) var v execinfrapb.DistSQLVersionGossipInfo if err := s.GossipI().(*gossip.Gossip).GetInfoProto( diff --git a/pkg/sql/flowinfra/stream_data_test.go b/pkg/sql/flowinfra/stream_data_test.go index 85e3c0d12311..95090ece2958 100644 --- a/pkg/sql/flowinfra/stream_data_test.go +++ b/pkg/sql/flowinfra/stream_data_test.go @@ -68,17 +68,17 @@ func testRowStream(tb testing.TB, rng *rand.Rand, types []*types.T, records []ro } numRows++ } else { - se.AddMetadata(context.TODO(), records[rowIdx].meta) + se.AddMetadata(context.Background(), records[rowIdx].meta) numMeta++ } } // "Send" a message every now and then and once at the end. final := (rowIdx == len(records)) if final || (rowIdx > 0 && rng.Intn(10) == 0) { - msg := se.FormMessage(context.TODO()) + msg := se.FormMessage(context.Background()) // Make a copy of the data buffer. msg.Data.RawBytes = append([]byte(nil), msg.Data.RawBytes...) - err := sd.AddMessage(context.TODO(), msg) + err := sd.AddMessage(context.Background(), msg) if err != nil { tb.Fatal(err) } @@ -132,8 +132,8 @@ func TestEmptyStreamEncodeDecode(t *testing.T) { defer leaktest.AfterTest(t)() var se StreamEncoder var sd StreamDecoder - msg := se.FormMessage(context.TODO()) - if err := sd.AddMessage(context.TODO(), msg); err != nil { + msg := se.FormMessage(context.Background()) + if err := sd.AddMessage(context.Background(), msg); err != nil { t.Fatal(err) } if msg.Header == nil { diff --git a/pkg/sql/flowinfra/utils_test.go b/pkg/sql/flowinfra/utils_test.go index 2dfb1d5e174a..4bccc69aec92 100644 --- a/pkg/sql/flowinfra/utils_test.go +++ b/pkg/sql/flowinfra/utils_test.go @@ -50,7 +50,7 @@ func createDummyStream() ( return nil, nil, nil, err } client := execinfrapb.NewDistSQLClient(conn) - clientStream, err = client.FlowStream(context.TODO()) + clientStream, err = client.FlowStream(context.Background()) if err != nil { return nil, nil, nil, err } @@ -58,7 +58,7 @@ func createDummyStream() ( serverStream = streamNotification.Stream cleanup = func() { close(streamNotification.Donec) - stopper.Stop(context.TODO()) + stopper.Stop(context.Background()) } return serverStream, clientStream, cleanup, nil } diff --git a/pkg/sql/indexbackfiller_test.go b/pkg/sql/indexbackfiller_test.go index dd6c52634b6c..5f5382e84e39 100644 --- a/pkg/sql/indexbackfiller_test.go +++ b/pkg/sql/indexbackfiller_test.go @@ -63,7 +63,7 @@ func TestIndexBackfiller(t *testing.T) { base.TestClusterArgs{ ServerArgs: params, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) sqlDB := tc.ServerConn(0) execOrFail := func(query string) gosql.Result { diff --git a/pkg/sql/internal_test.go b/pkg/sql/internal_test.go index 3814a75bf94f..313206a23cc2 100644 --- a/pkg/sql/internal_test.go +++ b/pkg/sql/internal_test.go @@ -33,7 +33,7 @@ import ( func TestInternalExecutor(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) @@ -106,7 +106,7 @@ func TestInternalExecutor(t *testing.T) { func TestQueryIsAdminWithNoTxn(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) @@ -148,7 +148,7 @@ func TestQueryIsAdminWithNoTxn(t *testing.T) { func TestSessionBoundInternalExecutor(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) @@ -209,7 +209,7 @@ func TestInternalExecAppNameInitialization(t *testing.T) { t.Run("root internal exec", func(t *testing.T) { s, _, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) testInternalExecutorAppNameInitialization(t, sem, sqlbase.InternalAppNamePrefix+"-test-query", // app name in SHOW @@ -221,10 +221,10 @@ func TestInternalExecAppNameInitialization(t *testing.T) { // as to reset the statement statistics properly. t.Run("session bound exec", func(t *testing.T) { s, _, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ie := sql.MakeInternalExecutor( - context.TODO(), + context.Background(), s.(*server.TestServer).Server.PGServer().SQLServer, sql.MemoryMetrics{}, s.ExecutorConfig().(sql.ExecutorConfig).Settings, @@ -261,7 +261,7 @@ func testInternalExecutorAppNameInitialization( ie testInternalExecutor, ) { // Check that the application_name is set properly in the executor. - if rows, err := ie.Query(context.TODO(), "test-query", nil, + if rows, err := ie.Query(context.Background(), "test-query", nil, "SHOW application_name"); err != nil { t.Fatal(err) } else if len(rows) != 1 { @@ -274,7 +274,7 @@ func testInternalExecutorAppNameInitialization( // have this keep running until we cancel it below. errChan := make(chan error) go func() { - _, err := ie.Query(context.TODO(), + _, err := ie.Query(context.Background(), "test-query", nil, /* txn */ "SELECT pg_sleep(1337666)") @@ -290,7 +290,7 @@ func testInternalExecutorAppNameInitialization( // When it does, we capture the query ID. var queryID string testutils.SucceedsSoon(t, func() error { - rows, err := ie.Query(context.TODO(), + rows, err := ie.Query(context.Background(), "find-query", nil, /* txn */ // We need to assemble the magic string so that this SELECT @@ -319,7 +319,7 @@ func testInternalExecutorAppNameInitialization( }) // Check that the query shows up in the internal tables without error. - if rows, err := ie.Query(context.TODO(), "find-query", nil, + if rows, err := ie.Query(context.Background(), "find-query", nil, "SELECT application_name FROM crdb_internal.node_queries WHERE query LIKE '%337' || '666%'"); err != nil { t.Fatal(err) } else if len(rows) != 1 { @@ -331,7 +331,7 @@ func testInternalExecutorAppNameInitialization( // We'll want to look at statistics below, and finish the test with // no goroutine leakage. To achieve this, cancel the query. and // drain the goroutine. - if _, err := ie.Exec(context.TODO(), "cancel-query", nil, "CANCEL QUERY $1", queryID); err != nil { + if _, err := ie.Exec(context.Background(), "cancel-query", nil, "CANCEL QUERY $1", queryID); err != nil { t.Fatal(err) } select { @@ -344,7 +344,7 @@ func testInternalExecutorAppNameInitialization( } // Now check that it was properly registered in statistics. - if rows, err := ie.Query(context.TODO(), "find-query", nil, + if rows, err := ie.Query(context.Background(), "find-query", nil, "SELECT application_name FROM crdb_internal.node_statement_statistics WHERE key LIKE 'SELECT' || ' pg_sleep(%'"); err != nil { t.Fatal(err) } else if len(rows) != 1 { diff --git a/pkg/sql/lease_internal_test.go b/pkg/sql/lease_internal_test.go index 3e5f0fc7d9e4..a4f194925732 100644 --- a/pkg/sql/lease_internal_test.go +++ b/pkg/sql/lease_internal_test.go @@ -131,7 +131,7 @@ func TestPurgeOldVersions(t *testing.T) { }, } s, db, kvDB := serverutils.StartServer(t, serverParams) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*LeaseManager) // Block gossip. gossipSem <- struct{}{} @@ -153,10 +153,10 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); var expiration hlc.Timestamp getLeases := func() { for i := 0; i < 3; i++ { - if err := leaseManager.AcquireFreshestFromStore(context.TODO(), tableDesc.ID); err != nil { + if err := leaseManager.AcquireFreshestFromStore(context.Background(), tableDesc.ID); err != nil { t.Fatal(err) } - table, exp, err := leaseManager.Acquire(context.TODO(), s.Clock().Now(), tableDesc.ID) + table, exp, err := leaseManager.Acquire(context.Background(), s.Clock().Now(), tableDesc.ID) if err != nil { t.Fatal(err) } @@ -174,14 +174,14 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); } // Verifies that errDidntUpdateDescriptor doesn't leak from Publish(). - if _, err := leaseManager.Publish(context.TODO(), tableDesc.ID, func(*sqlbase.MutableTableDescriptor) error { + if _, err := leaseManager.Publish(context.Background(), tableDesc.ID, func(*sqlbase.MutableTableDescriptor) error { return errDidntUpdateDescriptor }, nil); err != nil { t.Fatal(err) } // Publish a new version for the table - if _, err := leaseManager.Publish(context.TODO(), tableDesc.ID, func(*sqlbase.MutableTableDescriptor) error { + if _, err := leaseManager.Publish(context.Background(), tableDesc.ID, func(*sqlbase.MutableTableDescriptor) error { return nil }, nil); err != nil { t.Fatal(err) @@ -193,7 +193,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); t.Fatalf("found %d versions instead of 2", numLeases) } if err := purgeOldVersions( - context.TODO(), kvDB, tableDesc.ID, false, 2 /* minVersion */, leaseManager); err != nil { + context.Background(), kvDB, tableDesc.ID, false, 2 /* minVersion */, leaseManager); err != nil { t.Fatal(err) } @@ -225,7 +225,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); t.Fatalf("found %d versions instead of 2", numLeases) } if err := purgeOldVersions( - context.TODO(), kvDB, tableDesc.ID, false, 2 /* minVersion */, leaseManager); err != nil { + context.Background(), kvDB, tableDesc.ID, false, 2 /* minVersion */, leaseManager); err != nil { t.Fatal(err) } if numLeases := getNumVersions(ts); numLeases != 1 { @@ -239,7 +239,7 @@ func TestNameCacheDBConflictingTableNames(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*LeaseManager) if _, err := db.Exec(`SET experimental_enable_temp_tables = true`); err != nil { @@ -287,7 +287,7 @@ CREATE TEMP TABLE t2 (temp int); func TestNameCacheIsUpdated(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*LeaseManager) if _, err := db.Exec(` @@ -358,7 +358,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); func TestNameCacheEntryDoesntReturnExpiredLease(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*LeaseManager) const tableName = "test" @@ -408,7 +408,7 @@ func TestNameCacheContainsLatestLease(t *testing.T) { }, } s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{Knobs: testingKnobs}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*LeaseManager) const tableName = "test" @@ -436,7 +436,7 @@ CREATE TABLE t.%s (k CHAR PRIMARY KEY, v CHAR); tracker := removalTracker.TrackRemoval(&lease.ImmutableTableDescriptor) // Acquire another lease. - if _, err := acquireNodeLease(context.TODO(), leaseManager, tableDesc.ID); err != nil { + if _, err := acquireNodeLease(context.Background(), leaseManager, tableDesc.ID); err != nil { t.Fatal(err) } @@ -467,7 +467,7 @@ CREATE TABLE t.%s (k CHAR PRIMARY KEY, v CHAR); func TestTableNameCaseSensitive(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*LeaseManager) if _, err := db.Exec(` @@ -508,7 +508,7 @@ func TestReleaseAcquireByNameDeadlock(t *testing.T) { } s, sqlDB, kvDB := serverutils.StartServer( t, base.TestServerArgs{Knobs: testingKnobs}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*LeaseManager) if _, err := sqlDB.Exec(` @@ -521,7 +521,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") // Populate the name cache. - ctx := context.TODO() + ctx := context.Background() table, _, err := leaseManager.AcquireByName( ctx, leaseManager.clock.Now(), @@ -551,7 +551,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); for i := 0; i < 50; i++ { timestamp := leaseManager.clock.Now() - ctx := context.TODO() + ctx := context.Background() table, _, err := leaseManager.AcquireByName( ctx, timestamp, @@ -624,7 +624,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); func TestAcquireFreshestFromStoreRaces(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*LeaseManager) if _, err := db.Exec(` @@ -642,10 +642,10 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); for i := 0; i < numRoutines; i++ { go func() { defer wg.Done() - if err := leaseManager.AcquireFreshestFromStore(context.TODO(), tableDesc.ID); err != nil { + if err := leaseManager.AcquireFreshestFromStore(context.Background(), tableDesc.ID); err != nil { t.Error(err) } - table, _, err := leaseManager.Acquire(context.TODO(), s.Clock().Now(), tableDesc.ID) + table, _, err := leaseManager.Acquire(context.Background(), s.Clock().Now(), tableDesc.ID) if err != nil { t.Error(err) } @@ -676,7 +676,7 @@ func TestParallelLeaseAcquireWithImmediateRelease(t *testing.T) { } s, sqlDB, kvDB := serverutils.StartServer( t, base.TestServerArgs{Knobs: testingKnobs}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*LeaseManager) if _, err := sqlDB.Exec(` @@ -695,7 +695,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); for i := 0; i < numRoutines; i++ { go func() { defer wg.Done() - table, _, err := leaseManager.Acquire(context.TODO(), now, tableDesc.ID) + table, _, err := leaseManager.Acquire(context.Background(), now, tableDesc.ID) if err != nil { t.Error(err) } @@ -836,7 +836,7 @@ func TestLeaseAcquireAndReleaseConcurrently(t *testing.T) { s, _, _ := serverutils.StartServer( t, serverArgs) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) leaseManager := s.LeaseManager().(*LeaseManager) acquireResultChan := make(chan Result) diff --git a/pkg/sql/lease_test.go b/pkg/sql/lease_test.go index 29284984c4c6..6130790bf787 100644 --- a/pkg/sql/lease_test.go +++ b/pkg/sql/lease_test.go @@ -80,7 +80,7 @@ func newLeaseTest(tb testing.TB, params base.TestServerArgs) *leaseTest { } func (t *leaseTest) cleanup() { - t.server.Stopper().Stop(context.TODO()) + t.server.Stopper().Stop(context.Background()) } func (t *leaseTest) getLeases(descID sqlbase.ID) string { @@ -123,14 +123,14 @@ func (t *leaseTest) expectLeases(descID sqlbase.ID, expected string) { func (t *leaseTest) acquire( nodeID uint32, descID sqlbase.ID, ) (*sqlbase.ImmutableTableDescriptor, hlc.Timestamp, error) { - return t.node(nodeID).Acquire(context.TODO(), t.server.Clock().Now(), descID) + return t.node(nodeID).Acquire(context.Background(), t.server.Clock().Now(), descID) } func (t *leaseTest) acquireMinVersion( nodeID uint32, descID sqlbase.ID, minVersion sqlbase.DescriptorVersion, ) (*sqlbase.ImmutableTableDescriptor, hlc.Timestamp, error) { return t.node(nodeID).AcquireAndAssertMinVersion( - context.TODO(), t.server.Clock().Now(), descID, minVersion) + context.Background(), t.server.Clock().Now(), descID, minVersion) } func (t *leaseTest) mustAcquire( @@ -240,7 +240,7 @@ func TestLeaseManager(testingT *testing.T) { defer t.cleanup() const descID = keys.LeaseTableID - ctx := context.TODO() + ctx := context.Background() // We can't acquire a lease on a non-existent table. expected := "descriptor not found" @@ -402,7 +402,7 @@ func TestLeaseManagerPublishVersionChanged(testingT *testing.T) { wg.Add(2) go func(n1update, n2start chan struct{}) { - _, err := n1.Publish(context.TODO(), descID, func(*sqlbase.MutableTableDescriptor) error { + _, err := n1.Publish(context.Background(), descID, func(*sqlbase.MutableTableDescriptor) error { if n2start != nil { // Signal node 2 to start. close(n2start) @@ -423,7 +423,7 @@ func TestLeaseManagerPublishVersionChanged(testingT *testing.T) { // Wait for node 1 signal indicating that node 1 is in its update() // function. <-n2start - _, err := n2.Publish(context.TODO(), descID, func(*sqlbase.MutableTableDescriptor) error { + _, err := n2.Publish(context.Background(), descID, func(*sqlbase.MutableTableDescriptor) error { return nil }, nil) if err != nil { @@ -446,14 +446,14 @@ func TestLeaseManagerPublishIllegalVersionChange(testingT *testing.T) { defer t.cleanup() if _, err := t.node(1).Publish( - context.TODO(), keys.LeaseTableID, func(table *sqlbase.MutableTableDescriptor) error { + context.Background(), keys.LeaseTableID, func(table *sqlbase.MutableTableDescriptor) error { table.Version++ return nil }, nil); !testutils.IsError(err, "updated version") { t.Fatalf("unexpected error: %+v", err) } if _, err := t.node(1).Publish( - context.TODO(), keys.LeaseTableID, func(table *sqlbase.MutableTableDescriptor) error { + context.Background(), keys.LeaseTableID, func(table *sqlbase.MutableTableDescriptor) error { table.Version-- return nil }, nil); !testutils.IsError(err, "updated version") { @@ -616,7 +616,7 @@ func TestLeasesOnDeletedTableAreReleasedImmediately(t *testing.T) { GCJob: &sql.GCJobTestingKnobs{RunBeforeResume: func(_ int64) error { select {} }}, } s, db, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) stmt := ` CREATE DATABASE test; @@ -628,7 +628,7 @@ CREATE TABLE test.t(a INT PRIMARY KEY); } tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "test", "t") - ctx := context.TODO() + ctx := context.Background() lease1, _, err := acquire(ctx, s.(*server.TestServer), tableDesc.ID) if err != nil { @@ -713,7 +713,7 @@ func TestSubqueryLeases(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -780,7 +780,7 @@ func TestAsOfSystemTimeUsesCache(t *testing.T) { }, } s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -840,7 +840,7 @@ func TestDescriptorRefreshOnRetry(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -905,7 +905,7 @@ func TestTxnObeysTableModificationTime(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Disable strict GC TTL enforcement because we're going to shove a zero-value // TTL into the system with addImmediateGCZoneConfig. @@ -1123,7 +1123,7 @@ func TestLeaseAtLatestVersion(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` BEGIN; @@ -1154,7 +1154,7 @@ INSERT INTO t.timestamp VALUES ('a', 'b'); // Increment the table version after the txn has started. leaseMgr := s.LeaseManager().(*sql.LeaseManager) if _, err := leaseMgr.Publish( - context.TODO(), tableDesc.ID, func(table *sqlbase.MutableTableDescriptor) error { + context.Background(), tableDesc.ID, func(table *sqlbase.MutableTableDescriptor) error { // Do nothing: increments the version. return nil }, nil); err != nil { @@ -1205,7 +1205,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); // Acquire the lease so it is put into the tableNameCache. _, _, err := leaseManager.AcquireByName( - context.TODO(), + context.Background(), t.server.Clock().Now(), dbID, tableDesc.GetParentSchemaID(), @@ -1220,7 +1220,7 @@ CREATE TABLE t.test (k CHAR PRIMARY KEY, v CHAR); b.RunParallel(func(pb *testing.PB) { for pb.Next() { _, _, err := leaseManager.AcquireByName( - context.TODO(), + context.Background(), t.server.Clock().Now(), dbID, tableDesc.GetParentSchemaID(), @@ -1407,7 +1407,7 @@ func TestIncrementTableVersion(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -1509,7 +1509,7 @@ func TestTwoVersionInvariantRetryError(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -1614,7 +1614,7 @@ CREATE TABLE t.test0 (k CHAR PRIMARY KEY, v CHAR); t.Fatal(err) } - ctx := context.TODO() + ctx := context.Background() // When to end the test. end := timeutil.Now().Add(maxTime) @@ -1812,7 +1812,7 @@ func TestReadBeforeDrop(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -1863,7 +1863,7 @@ func TestTableCreationPushesTxnsInRecentPast(t *testing.T) { ReplicationMode: base.ReplicationManual, ServerArgs: params, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) sqlDB := tc.ServerConn(0) if _, err := sqlDB.Exec(` diff --git a/pkg/sql/logictest/parallel_test.go b/pkg/sql/logictest/parallel_test.go index 692f7f893624..518d7b58747d 100644 --- a/pkg/sql/logictest/parallel_test.go +++ b/pkg/sql/logictest/parallel_test.go @@ -59,7 +59,7 @@ type parallelTest struct { func (t *parallelTest) close() { t.clients = nil if t.cluster != nil { - t.cluster.Stopper().Stop(context.TODO()) + t.cluster.Stopper().Stop(context.Background()) } } diff --git a/pkg/sql/metric_test.go b/pkg/sql/metric_test.go index 5419a401f3d9..a42d1ed4fa2a 100644 --- a/pkg/sql/metric_test.go +++ b/pkg/sql/metric_test.go @@ -62,7 +62,7 @@ func TestQueryCounts(t *testing.T) { }, } s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) var testcases = []queryCounter{ // The counts are deltas for each query. @@ -170,7 +170,7 @@ func TestAbortCountConflictingWrites(t *testing.T) { testutils.RunTrueAndFalse(t, "retry loop", func(t *testing.T, retry bool) { params, cmdFilters := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) accum := initializeQueryCounter(s) @@ -274,7 +274,7 @@ func TestAbortCountErrorDuringTransaction(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) accum := initializeQueryCounter(s) @@ -308,7 +308,7 @@ func TestSavepointMetrics(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) accum := initializeQueryCounter(s) diff --git a/pkg/sql/mutation_test.go b/pkg/sql/mutation_test.go index aeae65df3ab8..8f5ba5200b14 100644 --- a/pkg/sql/mutation_test.go +++ b/pkg/sql/mutation_test.go @@ -30,7 +30,7 @@ func TestConstraintValidationBeforeBuffering(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(` CREATE DATABASE d; diff --git a/pkg/sql/namespace_test.go b/pkg/sql/namespace_test.go index 0917d9f75496..95ce1a366dd7 100644 --- a/pkg/sql/namespace_test.go +++ b/pkg/sql/namespace_test.go @@ -34,8 +34,8 @@ func TestNamespaceTableSemantics(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) - ctx := context.TODO() + defer s.Stopper().Stop(context.Background()) + ctx := context.Background() codec := keys.SystemSQLCodec // IDs to map (parentID, name) to. Actual ID value is irrelevant to the test. diff --git a/pkg/sql/opt/bench/bench_test.go b/pkg/sql/opt/bench/bench_test.go index f069c9856b3d..7d6b56ab16ae 100644 --- a/pkg/sql/opt/bench/bench_test.go +++ b/pkg/sql/opt/bench/bench_test.go @@ -350,7 +350,7 @@ func newHarness() *harness { func (h *harness) close() { if h.s != nil { - h.s.Stopper().Stop(context.TODO()) + h.s.Stopper().Stop(context.Background()) } } diff --git a/pkg/sql/opt/bench/fk_test.go b/pkg/sql/opt/bench/fk_test.go index 037c2759139a..32e1c668d1e4 100644 --- a/pkg/sql/opt/bench/fk_test.go +++ b/pkg/sql/opt/bench/fk_test.go @@ -41,7 +41,7 @@ func runFKBench( for _, cfg := range configs { b.Run(cfg.name, func(b *testing.B) { s, db, _ := serverutils.StartServer(b, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) r := sqlutils.MakeSQLRunner(db) // Don't let auto stats interfere with the test. Stock stats are // sufficient to get the right plans (i.e. lookup join). diff --git a/pkg/sql/opt/metadata_test.go b/pkg/sql/opt/metadata_test.go index 014294d04fc6..6de8eb843bf6 100644 --- a/pkg/sql/opt/metadata_test.go +++ b/pkg/sql/opt/metadata_test.go @@ -55,7 +55,7 @@ func TestMetadata(t *testing.T) { } md.AddDependency(opt.DepByName(&tab.TabName), tab, privilege.CREATE) - depsUpToDate, err := md.CheckDependencies(context.TODO(), testCat) + depsUpToDate, err := md.CheckDependencies(context.Background(), testCat) if err == nil || depsUpToDate { t.Fatalf("expected table privilege to be revoked") } @@ -82,7 +82,7 @@ func TestMetadata(t *testing.T) { t.Fatalf("unexpected view") } - depsUpToDate, err = md.CheckDependencies(context.TODO(), testCat) + depsUpToDate, err = md.CheckDependencies(context.Background(), testCat) if err == nil || depsUpToDate { t.Fatalf("expected table privilege to be revoked in metadata copy") } diff --git a/pkg/sql/pgwire/auth_test.go b/pkg/sql/pgwire/auth_test.go index 1eadda04dc0c..100368293f62 100644 --- a/pkg/sql/pgwire/auth_test.go +++ b/pkg/sql/pgwire/auth_test.go @@ -146,7 +146,7 @@ func hbaRunTest(t *testing.T, insecure bool) { s, conn, _ := serverutils.StartServer(t, base.TestServerArgs{Insecure: insecure, SocketFile: maybeSocketFile}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Enable conn/auth logging. // We can't use the cluster settings to do this, because diff --git a/pkg/sql/pgwire/conn_test.go b/pkg/sql/pgwire/conn_test.go index ba600c42f9ce..3726d5474b5d 100644 --- a/pkg/sql/pgwire/conn_test.go +++ b/pkg/sql/pgwire/conn_test.go @@ -73,7 +73,7 @@ func TestConn(t *testing.T) { // execute some metadata queries that pgx sends whenever it opens a // connection. s, _, _ := serverutils.StartServer(t, base.TestServerArgs{Insecure: true, UseDatabase: "system"}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Start a pgwire "server". addr := util.TestAddr @@ -82,10 +82,10 @@ func TestConn(t *testing.T) { t.Fatal(err) } serverAddr := ln.Addr() - log.Infof(context.TODO(), "started listener on %s", serverAddr) + log.Infof(context.Background(), "started listener on %s", serverAddr) var g errgroup.Group - ctx := context.TODO() + ctx := context.Background() var clientWG sync.WaitGroup clientWG.Add(1) @@ -288,7 +288,7 @@ func client(ctx context.Context, serverAddr net.Addr, wg *sync.WaitGroup) error batch := conn.BeginBatch() batch.Queue("select 7", nil, nil, nil) batch.Queue("select 8", nil, nil, nil) - if err := batch.Send(context.TODO(), &pgx.TxOptions{}); err != nil { + if err := batch.Send(context.Background(), &pgx.TxOptions{}); err != nil { return err } if err := batch.Close(); err != nil { @@ -332,7 +332,7 @@ func waitForClientConn(ln net.Listener) (*conn, error) { } // Consume the connection options. - if _, err := parseClientProvidedSessionParameters(context.TODO(), nil, &buf); err != nil { + if _, err := parseClientProvidedSessionParameters(context.Background(), nil, &buf); err != nil { return nil, err } @@ -602,7 +602,7 @@ func finishQuery(t finishType, c *conn) error { case describe: skipFinish = true if err := c.writeRowDescription( - context.TODO(), nil /* columns */, nil /* formatCodes */, c.conn, + context.Background(), nil /* columns */, nil /* formatCodes */, c.conn, ); err != nil { return err } @@ -641,7 +641,7 @@ func finishQuery(t finishType, c *conn) error { type pgxTestLogger struct{} func (l pgxTestLogger) Log(level pgx.LogLevel, msg string, data map[string]interface{}) { - log.Infof(context.TODO(), "pgx log [%s] %s - %s", level, msg, data) + log.Infof(context.Background(), "pgx log [%s] %s - %s", level, msg, data) } // pgxTestLogger implements pgx.Logger. @@ -655,7 +655,7 @@ func TestConnCloseReleasesLocks(t *testing.T) { // state. testutils.RunTrueAndFalse(t, "open state", func(t *testing.T, open bool) { s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) pgURL, cleanupFunc := sqlutils.PGUrl( @@ -723,7 +723,7 @@ func TestConnCloseReleasesLocks(t *testing.T) { func TestConnCloseWhileProducingRows(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) // Disable results buffering. @@ -784,7 +784,7 @@ func TestConnCloseWhileProducingRows(t *testing.T) { func TestMaliciousInputs(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() for _, tc := range [][]byte{ // This byte string sends a pgwirebase.ClientMsgClose message type. When @@ -870,7 +870,7 @@ func TestReadTimeoutConnExits(t *testing.T) { if err != nil { t.Fatal(err) } - log.Infof(context.TODO(), "started listener on %s", ln.Addr()) + log.Infof(context.Background(), "started listener on %s", ln.Addr()) defer func() { if err := ln.Close(); err != nil { t.Fatal(err) diff --git a/pkg/sql/pgwire/pgerror/pgcode_test.go b/pkg/sql/pgwire/pgerror/pgcode_test.go index 7fc8f3ad86b1..68674ce40372 100644 --- a/pkg/sql/pgwire/pgerror/pgcode_test.go +++ b/pkg/sql/pgwire/pgerror/pgcode_test.go @@ -63,8 +63,8 @@ func TestPGCode(t *testing.T) { tt.Run("local", func(tt testutils.T) { theTest(tt, origErr) }) - enc := errors.EncodeError(context.TODO(), origErr) - newErr := errors.DecodeError(context.TODO(), enc) + enc := errors.EncodeError(context.Background(), origErr) + newErr := errors.DecodeError(context.Background(), enc) tt.Run("remote", func(tt testutils.T) { theTest(tt, newErr) }) diff --git a/pkg/sql/pgwire/pgwire_test.go b/pkg/sql/pgwire/pgwire_test.go index c43b74baf5ca..24f1dc61c9e5 100644 --- a/pkg/sql/pgwire/pgwire_test.go +++ b/pkg/sql/pgwire/pgwire_test.go @@ -69,7 +69,7 @@ func TestPGWireDrainClient(t *testing.T) { params := base.TestServerArgs{Insecure: true} s, _, _ := serverutils.StartServer(t, params) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) host, port, err := net.SplitHostPort(s.ServingSQLAddr()) @@ -137,7 +137,7 @@ func TestPGWireDrainOngoingTxns(t *testing.T) { defer leaktest.AfterTest(t)() params := base.TestServerArgs{Insecure: true} s, _, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) host, port, err := net.SplitHostPort(s.ServingSQLAddr()) if err != nil { @@ -239,7 +239,7 @@ func TestPGUnwrapError(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) pgURL, cleanupFn := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) defer cleanupFn() @@ -266,7 +266,7 @@ func TestPGPrepareFail(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) pgURL, cleanupFn := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) defer cleanupFn() @@ -317,7 +317,7 @@ func TestPGPrepareWithCreateDropInTxn(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) pgURL, cleanupFn := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) defer cleanupFn() @@ -814,7 +814,7 @@ func TestPGPreparedQuery(t *testing.T) { } s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) pgURL, cleanupFn := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) defer cleanupFn() @@ -987,7 +987,7 @@ func (p preparedExecTest) RowsAffectedErr(err string) preparedExecTest { func TestPGPrepareDate(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec("CREATE TABLE test (t TIMESTAMPTZ)"); err != nil { t.Fatal(err) @@ -1230,7 +1230,7 @@ func TestPGPreparedExec(t *testing.T) { } s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) runTests := func( t *testing.T, query string, tests []preparedExecTest, execFunc func(...interface{}, @@ -1291,7 +1291,7 @@ func TestPGPreparedExec(t *testing.T) { func TestPGPrepareNameQual(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) pgURL, cleanupFn := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) defer cleanupFn() @@ -1342,7 +1342,7 @@ func TestPGPrepareNameQual(t *testing.T) { func TestPGPrepareInvalidate(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) pgURL, cleanupFn := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) defer cleanupFn() @@ -1407,7 +1407,7 @@ func TestPGPrepareInvalidate(t *testing.T) { func TestCmdCompleteVsEmptyStatements(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) pgURL, cleanupFn := sqlutils.PGUrl( t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) @@ -1451,7 +1451,7 @@ func TestCmdCompleteVsEmptyStatements(t *testing.T) { func TestPGCommandTags(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) pgURL, cleanupFn := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) defer cleanupFn() @@ -1572,7 +1572,7 @@ func TestSQLNetworkMetrics(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Setup pgwire client. pgURL, cleanupFn := sqlutils.PGUrl( @@ -1666,7 +1666,7 @@ func TestPGWireOverUnixSocket(t *testing.T) { SocketFile: socketFile, } s, _, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // We can't pass socket paths as url.Host to libpq, use ?host=/... instead. options := url.Values{ @@ -1686,7 +1686,7 @@ func TestPGWireOverUnixSocket(t *testing.T) { func TestPGWireResultChange(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) pgURL, cleanupFn := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) defer cleanupFn() @@ -1752,7 +1752,7 @@ func TestSessionParameters(t *testing.T) { params := base.TestServerArgs{Insecure: true} s, _, _ := serverutils.StartServer(t, params) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) host, ports, _ := net.SplitHostPort(s.ServingSQLAddr()) @@ -1858,7 +1858,7 @@ func TestSessionParameters(t *testing.T) { type pgxTestLogger struct{} func (l pgxTestLogger) Log(level pgx.LogLevel, msg string, data map[string]interface{}) { - log.Infof(context.TODO(), "pgx log [%s] %s - %s", level, msg, data) + log.Infof(context.Background(), "pgx log [%s] %s - %s", level, msg, data) } // pgxTestLogger implements pgx.Logger. @@ -1871,7 +1871,7 @@ func TestCancelRequest(t *testing.T) { params := base.TestServerArgs{Insecure: insecure} s, _, _ := serverutils.StartServer(t, params) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) var d net.Dialer @@ -1908,7 +1908,7 @@ func TestFailPrepareFailsTxn(t *testing.T) { defer leaktest.AfterTest(t)() s, _, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) pgURL, cleanupFn := sqlutils.PGUrl(t, s.ServingSQLAddr(), t.Name(), url.User(security.RootUser)) defer cleanupFn() diff --git a/pkg/sql/pgwire_internal_test.go b/pkg/sql/pgwire_internal_test.go index 0ddda51d2373..6b88f3f2ecac 100644 --- a/pkg/sql/pgwire_internal_test.go +++ b/pkg/sql/pgwire_internal_test.go @@ -35,7 +35,7 @@ import ( func TestPGWireConnectionCloseReleasesLeases(t *testing.T) { defer leaktest.AfterTest(t)() s, _, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) url, cleanupConn := sqlutils.PGUrl(t, s.ServingSQLAddr(), "SetupServer", url.User(security.RootUser)) defer cleanupConn() diff --git a/pkg/sql/physicalplan/aggregator_funcs_test.go b/pkg/sql/physicalplan/aggregator_funcs_test.go index c468583ddd9e..d6edfe095920 100644 --- a/pkg/sql/physicalplan/aggregator_funcs_test.go +++ b/pkg/sql/physicalplan/aggregator_funcs_test.go @@ -59,7 +59,7 @@ func runTestFlow( ) sqlbase.EncDatumRows { distSQLSrv := srv.DistSQLServer().(*distsql.ServerImpl) - leafInputState := txn.GetLeafTxnInputState(context.TODO()) + leafInputState := txn.GetLeafTxnInputState(context.Background()) req := execinfrapb.SetupFlowRequest{ Version: execinfra.Version, LeafTxnInputState: &leafInputState, @@ -71,7 +71,7 @@ func runTestFlow( var rowBuf distsqlutils.RowBuffer - ctx, flow, err := distSQLSrv.SetupSyncFlow(context.TODO(), distSQLSrv.ParentMemoryMonitor, &req, &rowBuf) + ctx, flow, err := distSQLSrv.SetupSyncFlow(context.Background(), distSQLSrv.ParentMemoryMonitor, &req, &rowBuf) if err != nil { t.Fatal(err) } @@ -406,7 +406,7 @@ func TestDistAggregationTable(t *testing.T) { const numRows = 100 tc := serverutils.StartTestCluster(t, 1, base.TestClusterArgs{}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) // Create a table with a few columns: // - random integer values from 0 to numRows diff --git a/pkg/sql/physicalplan/replicaoracle/oracle_test.go b/pkg/sql/physicalplan/replicaoracle/oracle_test.go index 3ca7d075d10b..8033e9a4b9db 100644 --- a/pkg/sql/physicalplan/replicaoracle/oracle_test.go +++ b/pkg/sql/physicalplan/replicaoracle/oracle_test.go @@ -50,7 +50,7 @@ func TestBinPackingOracleIsConsistent(t *testing.T) { }) // For our purposes, an uninitialized binPackingOracle will do. bp := of.Oracle(nil) - repl, err := bp.ChoosePreferredReplica(context.TODO(), rng, queryState) + repl, err := bp.ChoosePreferredReplica(context.Background(), rng, queryState) if err != nil { t.Fatal(err) } @@ -62,7 +62,7 @@ func TestBinPackingOracleIsConsistent(t *testing.T) { func TestClosest(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) g, _ := makeGossip(t, stopper) nd, _ := g.GetNodeDescriptor(1) of := NewOracleFactory(ClosestChoice, Config{ @@ -76,7 +76,7 @@ func TestClosest(t *testing.T) { return time.Millisecond, true } o := of.Oracle(nil) - info, err := o.ChoosePreferredReplica(context.TODO(), roachpb.RangeDescriptor{ + info, err := o.ChoosePreferredReplica(context.Background(), roachpb.RangeDescriptor{ InternalReplicas: []roachpb.ReplicaDescriptor{ {NodeID: 1, StoreID: 1}, {NodeID: 2, StoreID: 2}, diff --git a/pkg/sql/physicalplan/span_resolver_test.go b/pkg/sql/physicalplan/span_resolver_test.go index 87b0a815347b..7e71a57ee2b4 100644 --- a/pkg/sql/physicalplan/span_resolver_test.go +++ b/pkg/sql/physicalplan/span_resolver_test.go @@ -46,7 +46,7 @@ func TestSpanResolverUsesCaches(t *testing.T) { UseDatabase: "t", }, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) rowRanges, _ := setupRanges( tc.Conns[0], tc.Servers[0], tc.Servers[0].DB(), t) @@ -105,7 +105,7 @@ func TestSpanResolverUsesCaches(t *testing.T) { // Resolve the spans. Since the LeaseHolderCache is empty, all the ranges // should be grouped and "assigned" to replica 0. - replicas, err := resolveSpans(context.TODO(), lr.NewSpanResolverIterator(nil), spans...) + replicas, err := resolveSpans(context.Background(), lr.NewSpanResolverIterator(nil), spans...) if err != nil { t.Fatal(err) } @@ -132,7 +132,7 @@ func TestSpanResolverUsesCaches(t *testing.T) { if err := populateCache(tc.Conns[3], 3 /* expectedNumRows */); err != nil { t.Fatal(err) } - replicas, err = resolveSpans(context.TODO(), lr.NewSpanResolverIterator(nil), spans...) + replicas, err = resolveSpans(context.Background(), lr.NewSpanResolverIterator(nil), spans...) if err != nil { t.Fatal(err) } @@ -188,7 +188,7 @@ func TestSpanResolver(t *testing.T) { s, db, cdb := serverutils.StartServer(t, base.TestServerArgs{ UseDatabase: "t", }) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) rowRanges, tableDesc := setupRanges(db, s.(*server.TestServer), cdb, t) lr := physicalplan.NewSpanResolver( @@ -283,7 +283,7 @@ func TestMixedDirections(t *testing.T) { s, db, cdb := serverutils.StartServer(t, base.TestServerArgs{ UseDatabase: "t", }) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) rowRanges, tableDesc := setupRanges(db, s.(*server.TestServer), cdb, t) lr := physicalplan.NewSpanResolver( diff --git a/pkg/sql/plan_opt_test.go b/pkg/sql/plan_opt_test.go index ff10de5f31cb..d8a0f532d738 100644 --- a/pkg/sql/plan_opt_test.go +++ b/pkg/sql/plan_opt_test.go @@ -67,7 +67,7 @@ func makeQueryCacheTestHelper(tb testing.TB, numConns int) *queryCacheTestHelper } func (h *queryCacheTestHelper) Stop() { - h.srv.Stopper().Stop(context.TODO()) + h.srv.Stopper().Stop(context.Background()) } func (h *queryCacheTestHelper) GetStats() (numHits, numMisses int) { diff --git a/pkg/sql/rand_test.go b/pkg/sql/rand_test.go index 844950ddff2e..f605c19eba1c 100644 --- a/pkg/sql/rand_test.go +++ b/pkg/sql/rand_test.go @@ -25,7 +25,7 @@ func TestGenerateRandInterestingTable(t *testing.T) { // Ensure that we can create the random table. params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec("CREATE DATABASE d"); err != nil { t.Fatal(err) } diff --git a/pkg/sql/rename_test.go b/pkg/sql/rename_test.go index c0a0add2bcf8..ef22f758b34b 100644 --- a/pkg/sql/rename_test.go +++ b/pkg/sql/rename_test.go @@ -29,7 +29,7 @@ import ( func TestRenameTable(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) counter := int64(keys.MinNonPredefinedUserDescID) @@ -48,7 +48,7 @@ func TestRenameTable(t *testing.T) { // Check the table descriptor. desc := &sqlbase.Descriptor{} tableDescKey := sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, sqlbase.ID(counter)) - ts, err := kvDB.GetProtoTs(context.TODO(), tableDescKey, desc) + ts, err := kvDB.GetProtoTs(context.Background(), tableDescKey, desc) if err != nil { t.Fatal(err) } @@ -74,7 +74,7 @@ func TestRenameTable(t *testing.T) { } // Check the table descriptor again. - ts, err = kvDB.GetProtoTs(context.TODO(), tableDescKey, desc) + ts, err = kvDB.GetProtoTs(context.Background(), tableDescKey, desc) if err != nil { t.Fatal(err) } @@ -133,7 +133,7 @@ func TestTxnCanStillResolveOldName(t *testing.T) { } } s, db, kvDB := serverutils.StartServer(t, serverParams) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sql := ` CREATE DATABASE test; @@ -224,7 +224,7 @@ CREATE TABLE test.t (a INT PRIMARY KEY); func TestTxnCanUseNewNameAfterRename(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sql := ` CREATE DATABASE test; @@ -288,7 +288,7 @@ SELECT * FROM test.t2 func TestSeriesOfRenames(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sql := ` CREATE DATABASE test; @@ -366,7 +366,7 @@ func TestRenameDuringDrainingName(t *testing.T) { }} s, db, kvDB := serverutils.StartServer(t, serverParams) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sql := ` CREATE DATABASE test; diff --git a/pkg/sql/revert_test.go b/pkg/sql/revert_test.go index 34c711668ef3..a052df15acb1 100644 --- a/pkg/sql/revert_test.go +++ b/pkg/sql/revert_test.go @@ -27,11 +27,11 @@ import ( func TestRevertTable(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() s, sqlDB, kv := serverutils.StartServer( t, base.TestServerArgs{UseDatabase: "test"}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) execCfg := s.ExecutorConfig().(ExecutorConfig) db := sqlutils.MakeSQLRunner(sqlDB) @@ -70,7 +70,7 @@ func TestRevertTable(t *testing.T) { // Revert the table to ts. desc := sqlbase.GetTableDescriptor(kv, keys.SystemSQLCodec, "test", "test") desc.State = sqlbase.TableDescriptor_OFFLINE // bypass the offline check. - require.NoError(t, RevertTables(context.TODO(), kv, &execCfg, []*sqlbase.TableDescriptor{desc}, targetTime, 10)) + require.NoError(t, RevertTables(context.Background(), kv, &execCfg, []*sqlbase.TableDescriptor{desc}, targetTime, 10)) var reverted int db.QueryRow(t, `SELECT xor_agg(k # rev) FROM test`).Scan(&reverted) diff --git a/pkg/sql/row/fetcher_test.go b/pkg/sql/row/fetcher_test.go index a3b91b1f8ac1..f9e1d2833de9 100644 --- a/pkg/sql/row/fetcher_test.go +++ b/pkg/sql/row/fetcher_test.go @@ -167,7 +167,7 @@ func TestNextRowSingle(t *testing.T) { } if err := rf.StartScan( - context.TODO(), + context.Background(), kv.NewTxn(ctx, kvDB, 0), roachpb.Spans{tableDesc.IndexSpan(keys.SystemSQLCodec, tableDesc.PrimaryIndex.ID)}, false, /*limitBatches*/ @@ -181,7 +181,7 @@ func TestNextRowSingle(t *testing.T) { expectedVals := [2]int64{1, 1} for { - datums, desc, index, err := rf.NextRowDecoded(context.TODO()) + datums, desc, index, err := rf.NextRowDecoded(context.Background()) if err != nil { t.Fatal(err) } @@ -287,7 +287,7 @@ func TestNextRowBatchLimiting(t *testing.T) { } if err := rf.StartScan( - context.TODO(), + context.Background(), kv.NewTxn(ctx, kvDB, 0), roachpb.Spans{tableDesc.IndexSpan(keys.SystemSQLCodec, tableDesc.PrimaryIndex.ID)}, true, /*limitBatches*/ @@ -301,7 +301,7 @@ func TestNextRowBatchLimiting(t *testing.T) { expectedVals := [2]int64{1, 1} for { - datums, desc, index, err := rf.NextRowDecoded(context.TODO()) + datums, desc, index, err := rf.NextRowDecoded(context.Background()) if err != nil { t.Fatal(err) } @@ -415,7 +415,7 @@ INDEX(c) indexSpan.EndKey = midKey if err := rf.StartScan( - context.TODO(), + context.Background(), kv.NewTxn(ctx, kvDB, 0), roachpb.Spans{indexSpan, roachpb.Span{Key: midKey, EndKey: endKey}, @@ -433,7 +433,7 @@ INDEX(c) for { // Just try to grab the row - we don't need to validate the contents // in this test. - datums, _, _, err := rf.NextRowDecoded(context.TODO()) + datums, _, _, err := rf.NextRowDecoded(context.Background()) if err != nil { t.Fatal(err) } @@ -579,7 +579,7 @@ func TestNextRowSecondaryIndex(t *testing.T) { } if err := rf.StartScan( - context.TODO(), + context.Background(), kv.NewTxn(ctx, kvDB, 0), roachpb.Spans{tableDesc.IndexSpan(keys.SystemSQLCodec, tableDesc.Indexes[0].ID)}, false, /*limitBatches*/ @@ -593,7 +593,7 @@ func TestNextRowSecondaryIndex(t *testing.T) { nullCount := 0 var prevIdxVal int64 for { - datums, desc, index, err := rf.NextRowDecoded(context.TODO()) + datums, desc, index, err := rf.NextRowDecoded(context.Background()) if err != nil { t.Fatal(err) } @@ -941,7 +941,7 @@ func TestNextRowInterleaved(t *testing.T) { } if err := rf.StartScan( - context.TODO(), + context.Background(), kv.NewTxn(ctx, kvDB, 0), lookupSpans, false, /*limitBatches*/ @@ -955,7 +955,7 @@ func TestNextRowInterleaved(t *testing.T) { count := make(map[string]int, len(entries)) for { - datums, desc, index, err := rf.NextRowDecoded(context.TODO()) + datums, desc, index, err := rf.NextRowDecoded(context.Background()) if err != nil { t.Fatal(err) } diff --git a/pkg/sql/row/fk_test.go b/pkg/sql/row/fk_test.go index 595a9d27a1a2..c6714dfa7108 100644 --- a/pkg/sql/row/fk_test.go +++ b/pkg/sql/row/fk_test.go @@ -162,7 +162,7 @@ func TestMakeFkMetadata(t *testing.T) { test := func(t *testing.T, usage FKCheckType, expectedIDs []TableID) { tableLookups, err := MakeFkMetadata( - context.TODO(), + context.Background(), xDesc, usage, lookup, diff --git a/pkg/sql/rowexec/aggregator_test.go b/pkg/sql/rowexec/aggregator_test.go index 51f40e676dd2..d75c04ac317c 100644 --- a/pkg/sql/rowexec/aggregator_test.go +++ b/pkg/sql/rowexec/aggregator_test.go @@ -442,7 +442,7 @@ func BenchmarkAggregation(b *testing.B) { if err != nil { b.Fatal(err) } - d.Run(context.TODO()) + d.Run(context.Background()) input.Reset() } b.StopTimer() @@ -481,7 +481,7 @@ func BenchmarkCountRows(b *testing.B) { if err != nil { b.Fatal(err) } - d.Run(context.TODO()) + d.Run(context.Background()) input.Reset() } } diff --git a/pkg/sql/rowexec/backfiller_test.go b/pkg/sql/rowexec/backfiller_test.go index 564d06edf712..cbd7d74635e0 100644 --- a/pkg/sql/rowexec/backfiller_test.go +++ b/pkg/sql/rowexec/backfiller_test.go @@ -33,7 +33,7 @@ import ( func TestWriteResumeSpan(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() server, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{ Knobs: base.TestingKnobs{ diff --git a/pkg/sql/rowexec/indexjoiner_test.go b/pkg/sql/rowexec/indexjoiner_test.go index 3cab0c8e7517..01dd23194fe5 100644 --- a/pkg/sql/rowexec/indexjoiner_test.go +++ b/pkg/sql/rowexec/indexjoiner_test.go @@ -34,7 +34,7 @@ func TestIndexJoiner(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Create a table where each row is: // diff --git a/pkg/sql/rowexec/joinreader_test.go b/pkg/sql/rowexec/joinreader_test.go index 257d65df7148..d728e6bf5333 100644 --- a/pkg/sql/rowexec/joinreader_test.go +++ b/pkg/sql/rowexec/joinreader_test.go @@ -609,7 +609,7 @@ func TestJoinReaderDrain(t *testing.T) { defer leaktest.AfterTest(t)() s, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlutils.CreateTable( t, diff --git a/pkg/sql/rowexec/processors_test.go b/pkg/sql/rowexec/processors_test.go index 61645da5f5e4..e9b20915405a 100644 --- a/pkg/sql/rowexec/processors_test.go +++ b/pkg/sql/rowexec/processors_test.go @@ -290,7 +290,7 @@ func TestPostProcess(t *testing.T) { } // Run the rows through the helper. for i := range input { - status, err := out.EmitRow(context.TODO(), input[i]) + status, err := out.EmitRow(context.Background(), input[i]) if err != nil { t.Fatal(err) } @@ -556,7 +556,7 @@ func TestDrainingProcessorSwallowsUncertaintyError(t *testing.T) { }, }, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) origDB0 := tc.ServerConn(0) sqlutils.CreateTable(t, origDB0, "t", diff --git a/pkg/sql/rowexec/sample_aggregator_test.go b/pkg/sql/rowexec/sample_aggregator_test.go index 5cbf15941ffe..9bbe5be92373 100644 --- a/pkg/sql/rowexec/sample_aggregator_test.go +++ b/pkg/sql/rowexec/sample_aggregator_test.go @@ -38,7 +38,7 @@ func TestSampleAggregator(t *testing.T) { defer leaktest.AfterTest(t)() server, sqlDB, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) st := cluster.MakeTestingClusterSettings() evalCtx := tree.MakeTestingEvalContext(st) diff --git a/pkg/sql/rowexec/windower_test.go b/pkg/sql/rowexec/windower_test.go index 93589ea6232f..6feca76a824c 100644 --- a/pkg/sql/rowexec/windower_test.go +++ b/pkg/sql/rowexec/windower_test.go @@ -253,7 +253,7 @@ func BenchmarkWindower(b *testing.B) { if err != nil { b.Fatal(err) } - d.Run(context.TODO()) + d.Run(context.Background()) input.Reset() } b.StopTimer() diff --git a/pkg/sql/rowflow/routers_test.go b/pkg/sql/rowflow/routers_test.go index b9271ad382f5..16e3b82ddfed 100644 --- a/pkg/sql/rowflow/routers_test.go +++ b/pkg/sql/rowflow/routers_test.go @@ -57,7 +57,7 @@ func setupRouter( t.Fatal(err) } - ctx := context.TODO() + ctx := context.Background() flowCtx := execinfra.FlowCtx{ Cfg: &execinfra.ServerConfig{ Settings: st, @@ -79,7 +79,7 @@ func TestRouters(t *testing.T) { rng, _ := randutil.NewPseudoRand() alloc := &sqlbase.DatumAlloc{} - ctx := context.TODO() + ctx := context.Background() st := cluster.MakeTestingClusterSettings() evalCtx := tree.NewTestingEvalContext(st) defer evalCtx.Stop(context.Background()) @@ -292,7 +292,7 @@ var ( func TestConsumerStatus(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() st := cluster.MakeTestingClusterSettings() evalCtx := tree.NewTestingEvalContext(st) defer evalCtx.Stop(context.Background()) @@ -448,7 +448,7 @@ func preimageAttack( func TestMetadataIsForwarded(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() st := cluster.MakeTestingClusterSettings() evalCtx := tree.NewTestingEvalContext(st) defer evalCtx.Stop(context.Background()) @@ -607,7 +607,7 @@ func TestRouterBlocks(t *testing.T) { t.Fatal(err) } st := cluster.MakeTestingClusterSettings() - ctx := context.TODO() + ctx := context.Background() evalCtx := tree.MakeTestingEvalContext(st) defer evalCtx.Stop(ctx) diskMonitor := execinfra.NewTestDiskMonitor(ctx, st) diff --git a/pkg/sql/run_control_test.go b/pkg/sql/run_control_test.go index 4dea296e977d..e7eb6d15c1a8 100644 --- a/pkg/sql/run_control_test.go +++ b/pkg/sql/run_control_test.go @@ -46,7 +46,7 @@ func TestCancelSelectQuery(t *testing.T) { base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) conn1 = tc.ServerConn(0) conn2 = tc.ServerConn(1) @@ -124,7 +124,7 @@ func TestCancelDistSQLQuery(t *testing.T) { }, }, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) conn1 = tc.ServerConn(0) conn2 = tc.ServerConn(1) @@ -181,7 +181,7 @@ func TestCancelDistSQLQuery(t *testing.T) { } func testCancelSession(t *testing.T, hasActiveSession bool) { - ctx := context.TODO() + ctx := context.Background() numNodes := 2 tc := serverutils.StartTestCluster(t, numNodes, @@ -275,7 +275,7 @@ func testCancelSession(t *testing.T, hasActiveSession bool) { func TestCancelMultipleSessions(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() tc := serverutils.StartTestCluster(t, 2, /* numNodes */ base.TestClusterArgs{ @@ -333,7 +333,7 @@ func TestCancelIfExists(t *testing.T) { base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) conn := tc.ServerConn(0) diff --git a/pkg/sql/scan_test.go b/pkg/sql/scan_test.go index 243d50aa207a..d24c2027f239 100644 --- a/pkg/sql/scan_test.go +++ b/pkg/sql/scan_test.go @@ -130,7 +130,7 @@ func TestScanBatches(t *testing.T) { s, db, _ := serverutils.StartServer( t, base.TestServerArgs{UseDatabase: "test"}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := db.Exec(`CREATE DATABASE IF NOT EXISTS test`); err != nil { t.Fatal(err) diff --git a/pkg/sql/scatter_test.go b/pkg/sql/scatter_test.go index aad2dda5a9f7..5906f322d7c3 100644 --- a/pkg/sql/scatter_test.go +++ b/pkg/sql/scatter_test.go @@ -37,7 +37,7 @@ func TestScatterRandomizeLeases(t *testing.T) { const numHosts = 3 tc := serverutils.StartTestCluster(t, numHosts, base.TestClusterArgs{}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) sqlutils.CreateTable( t, tc.ServerConn(0), "t", diff --git a/pkg/sql/schema_change_migrations_test.go b/pkg/sql/schema_change_migrations_test.go index 53a807f4d69a..2dc7aa714fe5 100644 --- a/pkg/sql/schema_change_migrations_test.go +++ b/pkg/sql/schema_change_migrations_test.go @@ -168,7 +168,7 @@ func testSchemaChangeMigrations(t *testing.T, testCase migrationTestCase) { signalRevMigrationDone, signalMigrationDone, ) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) defer disableGCTTLStrictEnforcement(t, sqlDB)() log.Info(ctx, "waiting for all schema changes to block") @@ -865,7 +865,7 @@ func TestGCJobCreated(t *testing.T) { AlwaysRunJobMigration: true, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) ctx := context.Background() sqlRunner := sqlutils.MakeSQLRunner(sqlDB) diff --git a/pkg/sql/schema_changer_test.go b/pkg/sql/schema_changer_test.go index 0382648679c4..7d9187c8f434 100644 --- a/pkg/sql/schema_changer_test.go +++ b/pkg/sql/schema_changer_test.go @@ -84,7 +84,7 @@ func TestSchemaChangeProcess(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) var id = sqlbase.ID(keys.MinNonPredefinedUserDescID + 1 /* skip over DB ID */) var instance = base.SQLInstanceID(2) @@ -104,7 +104,7 @@ func TestSchemaChangeProcess(t *testing.T) { cfg, ) jobRegistry := s.JobRegistry().(*jobs.Registry) - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) changer := sql.NewSchemaChangerForTesting( id, 0, instance, *kvDB, leaseMgr, jobRegistry, &execCfg, cluster.MakeTestingClusterSettings()) @@ -119,7 +119,7 @@ INSERT INTO t.test VALUES ('a', 'b'), ('c', 'd'); // Read table descriptor for version. tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") expectedVersion := tableDesc.Version - ctx := context.TODO() + ctx := context.Background() // Check that RunStateMachineBeforeBackfill doesn't do anything // if there are no mutations queued. @@ -204,7 +204,7 @@ func TestAsyncSchemaChanger(t *testing.T) { // changer executes all schema changes. params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -346,7 +346,7 @@ func runSchemaChangeWithOperations( <-backfillNotification // Run a variety of operations during the backfill. - ctx := context.TODO() + ctx := context.Background() // Update some rows. var updatedKeys []int @@ -486,7 +486,7 @@ func TestRaceWithBackfill(t *testing.T) { ReplicationMode: base.ReplicationManual, ServerArgs: params, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) kvDB := tc.Server(0).DB() sqlDB := tc.ServerConn(0) @@ -511,7 +511,7 @@ CREATE UNIQUE INDEX vidx ON t.test (v); } sql.SplitTable(t, tc, tableDesc, sps) - ctx := context.TODO() + ctx := context.Background() // number of keys == 2 * number of rows; 1 column family and 1 index entry // for each row. @@ -662,7 +662,7 @@ func TestDropWhileBackfill(t *testing.T) { ReplicationMode: base.ReplicationManual, ServerArgs: params, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) kvDB := tc.Server(0).DB() sqlDB := tc.ServerConn(0) @@ -687,7 +687,7 @@ CREATE UNIQUE INDEX vidx ON t.test (v); } sql.SplitTable(t, tc, tableDesc, sps) - ctx := context.TODO() + ctx := context.Background() // number of keys == 2 * number of rows; 1 column family and 1 index entry // for each row. @@ -760,7 +760,7 @@ func TestBackfillErrors(t *testing.T) { ReplicationMode: base.ReplicationManual, ServerArgs: params, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) kvDB := tc.Server(0).DB() sqlDB := tc.ServerConn(0) @@ -800,7 +800,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); } sql.SplitTable(t, tc, tableDesc, sps) - ctx := context.TODO() + ctx := context.Background() if err := checkTableKeyCount(ctx, kvDB, 1, maxValue); err != nil { t.Fatal(err) @@ -903,7 +903,7 @@ func TestAbortSchemaChangeBackfill(t *testing.T) { }, } server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) // Disable strict GC TTL enforcement because we're going to shove a zero-value // TTL into the system with addImmediateGCZoneConfig. @@ -999,7 +999,7 @@ COMMIT; wg.Wait() // for schema change to complete - ctx := context.TODO() + ctx := context.Background() // Verify the number of keys left behind in the table to validate // schema change operations. @@ -1050,7 +1050,7 @@ func addIndexSchemaChange( t.Fatalf("read the wrong number of rows: e = %d, v = %d", eCount, count) } - ctx := context.TODO() + ctx := context.Background() if err := checkTableKeyCount(ctx, kvDB, numKeysPerRow, maxValue); err != nil { t.Fatal(err) @@ -1087,7 +1087,7 @@ func addColumnSchemaChange( t.Fatalf("read the wrong number of rows: e = %d, v = %d", eCount, count) } - ctx := context.TODO() + ctx := context.Background() if err := checkTableKeyCount(ctx, kvDB, numKeysPerRow, maxValue); err != nil { t.Fatal(err) @@ -1102,7 +1102,7 @@ func dropColumnSchemaChange( t.Fatal(err) } - ctx := context.TODO() + ctx := context.Background() if err := checkTableKeyCount(ctx, kvDB, numKeysPerRow, maxValue); err != nil { t.Fatal(err) @@ -1118,7 +1118,7 @@ func dropIndexSchemaChange( t.Fatal(err) } - if err := checkTableKeyCount(context.TODO(), kvDB, numKeysPerRow, maxValue); err != nil { + if err := checkTableKeyCount(context.Background(), kvDB, numKeysPerRow, maxValue); err != nil { t.Fatal(err) } } @@ -1130,7 +1130,7 @@ func TestDropColumn(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -1234,7 +1234,7 @@ func TestSchemaChangeRetry(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -1311,7 +1311,7 @@ func TestSchemaChangeRetryOnVersionChange(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -1322,7 +1322,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); tableDesc := sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") id := tableDesc.ID - ctx := context.TODO() + ctx := context.Background() upTableVersion = func() { leaseMgr := s.LeaseManager().(*sql.LeaseManager) @@ -1413,7 +1413,7 @@ func TestSchemaChangePurgeFailure(t *testing.T) { }, } server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) // Disable strict GC TTL enforcement because we're going to shove a zero-value // TTL into the system with addImmediateGCZoneConfig. @@ -1484,7 +1484,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); // rows from k = 0 to k = chunkSize - 1 have index values. numGarbageValues := chunkSize - ctx := context.TODO() + ctx := context.Background() if err := checkTableKeyCount(ctx, kvDB, 1, maxValue+1+numGarbageValues); err != nil { t.Fatal(err) @@ -1558,7 +1558,7 @@ func TestSchemaChangeFailureAfterCheckpointing(t *testing.T) { }, } server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -1573,7 +1573,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); t.Fatal(err) } - if err := checkTableKeyCount(context.TODO(), kvDB, 1, maxValue); err != nil { + if err := checkTableKeyCount(context.Background(), kvDB, 1, maxValue); err != nil { t.Fatal(err) } @@ -1583,7 +1583,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); } // No garbage left behind. - if err := checkTableKeyCount(context.TODO(), kvDB, 1, maxValue); err != nil { + if err := checkTableKeyCount(context.Background(), kvDB, 1, maxValue); err != nil { t.Fatal(err) } @@ -1597,7 +1597,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); } // No garbage left behind. - if err := checkTableKeyCount(context.TODO(), kvDB, 1, maxValue); err != nil { + if err := checkTableKeyCount(context.Background(), kvDB, 1, maxValue); err != nil { t.Fatal(err) } @@ -1637,7 +1637,7 @@ func TestSchemaChangeReverseMutations(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Disable strict GC TTL enforcement because we're going to shove a zero-value // TTL into the system with addImmediateGCZoneConfig. @@ -1835,7 +1835,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT8); return nil }) - ctx := context.TODO() + ctx := context.Background() // Check that the number of k-v pairs is accurate. if err := checkTableKeyCount(ctx, kvDB, 3, maxValue); err != nil { @@ -1892,7 +1892,7 @@ func TestParseSentinelValueWithNewColumnInSentinelFamily(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -1918,7 +1918,7 @@ CREATE TABLE t.test ( t.Fatal(err) } - ctx := context.TODO() + ctx := context.Background() // Convert table data created by the above INSERT into sentinel // values. This is done to make the table appear like it were @@ -2020,7 +2020,7 @@ func TestAddColumnDuringColumnDrop(t *testing.T) { }, } server, sqlDB, _ := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -2106,7 +2106,7 @@ func TestSchemaUniqueColumnDropFailure(t *testing.T) { }, } server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -2120,7 +2120,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT UNIQUE DEFAULT 23 CREATE FAMILY F3 t.Fatal(err) } - if err := checkTableKeyCount(context.TODO(), kvDB, 2, maxValue); err != nil { + if err := checkTableKeyCount(context.Background(), kvDB, 2, maxValue); err != nil { t.Fatal(err) } @@ -3140,7 +3140,7 @@ func TestCRUDWhileColumnBackfill(t *testing.T) { }, } server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -3368,7 +3368,7 @@ func TestBackfillCompletesOnChunkBoundary(t *testing.T) { ReplicationMode: base.ReplicationManual, ServerArgs: params, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) kvDB := tc.Server(0).DB() sqlDB := tc.ServerConn(0) @@ -3411,7 +3411,7 @@ func TestBackfillCompletesOnChunkBoundary(t *testing.T) { t.Error(err) } - ctx := context.TODO() + ctx := context.Background() // Verify the number of keys left behind in the table to // validate schema change operations. @@ -3430,7 +3430,7 @@ func TestSchemaChangeInTxn(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -3544,7 +3544,7 @@ func TestSecondaryIndexWithOldStoringEncoding(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() server, sqlDB, kvDB := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE d; @@ -3573,7 +3573,7 @@ CREATE TABLE d.t ( tableDesc.Indexes[i] = index } if err := kvDB.Put( - context.TODO(), + context.Background(), sqlbase.MakeDescMetadataKey(keys.SystemSQLCodec, tableDesc.GetID()), sqlbase.WrapDescriptor(tableDesc), ); err != nil { @@ -3673,7 +3673,7 @@ func TestSchemaChangeEvalContext(t *testing.T) { ReplicationMode: base.ReplicationManual, ServerArgs: params, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) kvDB := tc.Server(0).DB() sqlDB := tc.ServerConn(0) @@ -3747,7 +3747,7 @@ func TestSchemaChangeCompletion(t *testing.T) { SQLSchemaChanger: &sql.SchemaChangerTestingKnobs{}, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) if _, err := sqlDB.Exec(` @@ -3831,7 +3831,7 @@ func TestTruncateInternals(t *testing.T) { } s, sqlDB, kvDB := serverutils.StartServer(t, params) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) if _, err := sqlDB.Exec(` @@ -3934,7 +3934,7 @@ func TestTruncateCompletion(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - ctx := context.TODO() + ctx := context.Background() defer s.Stopper().Stop(ctx) // Disable strict GC TTL enforcement because we're going to shove a zero-value @@ -4102,7 +4102,7 @@ func TestTruncateWhileColumnBackfill(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -4152,7 +4152,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v INT); tableDesc = sqlbase.GetTableDescriptor(kvDB, keys.SystemSQLCodec, "t", "test") tablePrefix := keys.SystemSQLCodec.TablePrefix(uint32(tableDesc.ID)) tableEnd := tablePrefix.PrefixEnd() - if kvs, err := kvDB.Scan(context.TODO(), tablePrefix, tableEnd, 0); err != nil { + if kvs, err := kvDB.Scan(context.Background(), tablePrefix, tableEnd, 0); err != nil { t.Fatal(err) } else if e := 0; len(kvs) != e { t.Fatalf("expected %d key value pairs, but got %d", e, len(kvs)) @@ -4199,7 +4199,7 @@ func TestSchemaChangeErrorOnCommit(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -4266,7 +4266,7 @@ func TestIndexBackfillAfterGC(t *testing.T) { } tc = serverutils.StartTestCluster(t, 1, base.TestClusterArgs{ServerArgs: params}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := tc.ServerConn(0) kvDB := tc.Server(0).DB() sqlDB := sqlutils.MakeSQLRunner(db) @@ -4278,7 +4278,7 @@ func TestIndexBackfillAfterGC(t *testing.T) { t.Fatal(err) } - if err := checkTableKeyCount(context.TODO(), kvDB, 2, 0); err != nil { + if err := checkTableKeyCount(context.Background(), kvDB, 2, 0); err != nil { t.Fatal(err) } } @@ -4310,7 +4310,7 @@ func TestAddComputedColumn(t *testing.T) { } tc := serverutils.StartTestCluster(t, 1, base.TestClusterArgs{ServerArgs: params}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db = tc.ServerConn(0) sqlDB := sqlutils.MakeSQLRunner(db) @@ -4325,7 +4325,7 @@ func TestSchemaChangeAfterCreateInTxn(t *testing.T) { defer leaktest.AfterTest(t)() params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // The schema change below can occasionally take more than // 5 seconds and gets pushed by the closed timestamp mechanism @@ -4390,7 +4390,7 @@ ALTER TABLE t.test ADD COLUMN c INT AS (v + 4) STORED, ADD COLUMN d INT DEFAULT t.Fatal(err) } - if err := checkTableKeyCount(context.TODO(), kvDB, 2, maxValue); err != nil { + if err := checkTableKeyCount(context.Background(), kvDB, 2, maxValue); err != nil { t.Fatal(err) } @@ -4490,7 +4490,7 @@ func TestCancelSchemaChange(t *testing.T) { ReplicationMode: base.ReplicationManual, ServerArgs: params, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db = tc.ServerConn(0) kvDB := tc.Server(0).DB() sqlDB = sqlutils.MakeSQLRunner(db) @@ -4518,7 +4518,7 @@ func TestCancelSchemaChange(t *testing.T) { } sql.SplitTable(t, tc, tableDesc, sps) - ctx := context.TODO() + ctx := context.Background() if err := checkTableKeyCount(ctx, kvDB, 1, maxValue); err != nil { t.Fatal(err) } @@ -4651,7 +4651,7 @@ func TestSchemaChangeRetryError(t *testing.T) { ReplicationMode: base.ReplicationManual, ServerArgs: params, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) sqlDB := tc.ServerConn(0) if _, err := sqlDB.Exec(` @@ -4726,7 +4726,7 @@ func TestCancelSchemaChangeContext(t *testing.T) { }, } s, db, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, ` @@ -4739,7 +4739,7 @@ func TestCancelSchemaChangeContext(t *testing.T) { t.Fatal(err) } - ctx := context.TODO() + ctx := context.Background() if err := checkTableKeyCount(ctx, kvDB, 1, maxValue); err != nil { t.Fatal(err) } @@ -4750,7 +4750,7 @@ func TestCancelSchemaChangeContext(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - ctx := context.TODO() + ctx := context.Background() // When using db.Exec(), CANCEL SESSION below will result in the // database client retrying the request on another connection. // Use a connection here so when the session gets canceled; a @@ -4800,7 +4800,7 @@ func TestSchemaChangeGRPCError(t *testing.T) { }, } s, db, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, ` @@ -4813,7 +4813,7 @@ func TestSchemaChangeGRPCError(t *testing.T) { t.Fatal(err) } - ctx := context.TODO() + ctx := context.Background() if err := checkTableKeyCount(ctx, kvDB, 1, maxValue); err != nil { t.Fatal(err) } @@ -4852,7 +4852,7 @@ func TestBlockedSchemaChange(t *testing.T) { }, } s, db, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, ` @@ -4865,7 +4865,7 @@ func TestBlockedSchemaChange(t *testing.T) { t.Fatal(err) } - ctx := context.TODO() + ctx := context.Background() if err := checkTableKeyCount(ctx, kvDB, 1, maxValue); err != nil { t.Fatal(err) } @@ -4916,11 +4916,11 @@ func TestIndexBackfillValidation(t *testing.T) { if count == 2 { // drop an index value before validation. key := keys.SystemSQLCodec.IndexPrefix(uint32(tableDesc.ID), uint32(tableDesc.NextIndexID)) - kv, err := db.Scan(context.TODO(), key, key.PrefixEnd(), 1) + kv, err := db.Scan(context.Background(), key, key.PrefixEnd(), 1) if err != nil { t.Error(err) } - if err := db.Del(context.TODO(), kv[0].Key); err != nil { + if err := db.Del(context.Background(), kv[0].Key); err != nil { t.Error(err) } } @@ -4933,7 +4933,7 @@ func TestIndexBackfillValidation(t *testing.T) { } server, sqlDB, kvDB := serverutils.StartServer(t, params) db = kvDB - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -4985,11 +4985,11 @@ func TestInvertedIndexBackfillValidation(t *testing.T) { if count == 2 { // drop an index value before validation. key := keys.SystemSQLCodec.IndexPrefix(uint32(tableDesc.ID), uint32(tableDesc.NextIndexID)) - kv, err := db.Scan(context.TODO(), key, key.PrefixEnd(), 1) + kv, err := db.Scan(context.Background(), key, key.PrefixEnd(), 1) if err != nil { t.Error(err) } - if err := db.Del(context.TODO(), kv[0].Key); err != nil { + if err := db.Del(context.Background(), kv[0].Key); err != nil { t.Error(err) } } @@ -5002,7 +5002,7 @@ func TestInvertedIndexBackfillValidation(t *testing.T) { } server, sqlDB, kvDB := serverutils.StartServer(t, params) db = kvDB - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -5054,7 +5054,7 @@ func TestMultipleIndexBackfills(t *testing.T) { }, } server, sqlDB, _ := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -5112,7 +5112,7 @@ func TestCreateStatsAfterSchemaChange(t *testing.T) { stats.DefaultAsOfTime = time.Microsecond server, sqlDB, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) sqlRun := sqlutils.MakeSQLRunner(sqlDB) sqlRun.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled=false`) @@ -5192,7 +5192,7 @@ func TestTableValidityWhileAddingFK(t *testing.T) { } server, sqlDB, _ := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -5282,7 +5282,7 @@ func TestWritesWithChecksBeforeDefaultColumnBackfill(t *testing.T) { } server, sqlDB, _ := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -5380,7 +5380,7 @@ func TestWritesWithChecksBeforeComputedColumnBackfill(t *testing.T) { } server, sqlDB, _ := serverutils.StartServer(t, params) - defer server.Stopper().Stop(context.TODO()) + defer server.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; @@ -5477,11 +5477,11 @@ func TestIntentRaceWithIndexBackfill(t *testing.T) { ReplicationMode: base.ReplicationManual, ServerArgs: params, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) kvDB := tc.Server(0).DB() sqlDB := tc.ServerConn(0) - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(context.Background()) readyToBackfill = make(chan struct{}) canStartBackfill = make(chan struct{}) @@ -5591,7 +5591,7 @@ func TestSchemaChangeJobRunningStatusValidation(t *testing.T) { }, } s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v INT); @@ -5644,7 +5644,7 @@ func TestFKReferencesAddedOnlyOnceOnRetry(t *testing.T) { }, } s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v INT); @@ -5838,7 +5838,7 @@ func TestMultipleRevert(t *testing.T) { s, sqlDB, _ := serverutils.StartServer(t, params) db = sqlDB - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Disable strict GC TTL enforcement because we're going to shove a zero-value // TTL into the system with addImmediateGCZoneConfig. diff --git a/pkg/sql/scrub_test.go b/pkg/sql/scrub_test.go index 8e020aa29234..a6de6f311e51 100644 --- a/pkg/sql/scrub_test.go +++ b/pkg/sql/scrub_test.go @@ -38,7 +38,7 @@ import ( func TestScrubIndexMissingIndexEntry(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) r := sqlutils.MakeSQLRunner(db) // Create the table and the row entry. @@ -74,7 +74,7 @@ INSERT INTO t."tEst" VALUES (10, 20); } // Delete the entry. - if err := kvDB.Del(context.TODO(), secondaryIndexKey[0].Key); err != nil { + if err := kvDB.Del(context.Background(), secondaryIndexKey[0].Key); err != nil { t.Fatalf("unexpected error: %s", err) } @@ -111,7 +111,7 @@ INSERT INTO t."tEst" VALUES (10, 20); func TestScrubIndexDanglingIndexReference(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Create the table and the row entry. if _, err := db.Exec(` @@ -142,7 +142,7 @@ CREATE INDEX secondary ON t.test (v); } // Put the new secondary k/v into the database. - if err := kvDB.Put(context.TODO(), secondaryIndex[0].Key, &secondaryIndex[0].Value); err != nil { + if err := kvDB.Put(context.Background(), secondaryIndex[0].Key, &secondaryIndex[0].Value); err != nil { t.Fatalf("unexpected error: %s", err) } @@ -203,7 +203,7 @@ CREATE INDEX secondary ON t.test (v); func TestScrubIndexCatchesStoringMismatch(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Create the table and the row entry. if _, err := db.Exec(` @@ -236,7 +236,7 @@ INSERT INTO t.test VALUES (10, 20, 1337); t.Fatalf("unexpected error: %s", err) } // Delete the existing secondary k/v. - if err := kvDB.Del(context.TODO(), secondaryIndex[0].Key); err != nil { + if err := kvDB.Del(context.Background(), secondaryIndex[0].Key); err != nil { t.Fatalf("unexpected error: %s", err) } @@ -248,7 +248,7 @@ INSERT INTO t.test VALUES (10, 20, 1337); t.Fatalf("unexpected error: %s", err) } // Put the incorrect secondary k/v. - if err := kvDB.Put(context.TODO(), secondaryIndex[0].Key, &secondaryIndex[0].Value); err != nil { + if err := kvDB.Put(context.Background(), secondaryIndex[0].Key, &secondaryIndex[0].Value); err != nil { t.Fatalf("unexpected error: %s", err) } @@ -323,7 +323,7 @@ INSERT INTO t.test VALUES (10, 20, 1337); func TestScrubCheckConstraint(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Create the table and the row entry. if _, err := db.Exec(` @@ -368,7 +368,7 @@ INSERT INTO t.test VALUES (10, 2); value.SetTuple(valueBuf) // Overwrite the existing value. - if err := kvDB.Put(context.TODO(), primaryIndexKey, &value); err != nil { + if err := kvDB.Put(context.Background(), primaryIndexKey, &value); err != nil { t.Fatalf("unexpected error: %s", err) } // Run SCRUB and find the CHECK violation created. @@ -413,7 +413,7 @@ INSERT INTO t.test VALUES (10, 2); func TestScrubFKConstraintFKMissing(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) r := sqlutils.MakeSQLRunner(db) // Create the table and the row entry. @@ -456,7 +456,7 @@ func TestScrubFKConstraintFKMissing(t *testing.T) { // Delete the existing secondary key entry, as we will later replace // it. - if err := kvDB.Del(context.TODO(), secondaryIndexKey[0].Key); err != nil { + if err := kvDB.Del(context.Background(), secondaryIndexKey[0].Key); err != nil { t.Fatalf("unexpected error: %s", err) } @@ -475,7 +475,7 @@ func TestScrubFKConstraintFKMissing(t *testing.T) { } // Add the new, replacement secondary index entry. - if err := kvDB.Put(context.TODO(), secondaryIndexKey[0].Key, &secondaryIndexKey[0].Value); err != nil { + if err := kvDB.Put(context.Background(), secondaryIndexKey[0].Key, &secondaryIndexKey[0].Value); err != nil { t.Fatalf("unexpected error: %s", err) } @@ -513,7 +513,7 @@ func TestScrubFKConstraintFKMissing(t *testing.T) { func TestScrubFKConstraintFKNulls(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Create the table and the row entry. if _, err := db.Exec(` @@ -558,7 +558,7 @@ ALTER TABLE t.child ADD FOREIGN KEY (parent_id, parent_id2) REFERENCES t.parent func TestScrubPhysicalNonnullableNullInSingleColumnFamily(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Create the table and the row entry. if _, err := db.Exec(` @@ -595,7 +595,7 @@ INSERT INTO t.test VALUES (217, 314); var value roachpb.Value value.SetTuple([]byte(nil)) - if err := kvDB.Put(context.TODO(), primaryIndexKey, &value); err != nil { + if err := kvDB.Put(context.Background(), primaryIndexKey, &value); err != nil { t.Fatalf("unexpected error: %s", err) } @@ -639,7 +639,7 @@ INSERT INTO t.test VALUES (217, 314); func TestScrubPhysicalNonnullableNullInMulticolumnFamily(t *testing.T) { defer leaktest.AfterTest(t)() s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Create the table and the row entry. if _, err := db.Exec(` @@ -686,7 +686,7 @@ INSERT INTO t.test VALUES (217, 314, 1337); value.SetTuple(valueBuf) // Overwrite the existing value. - if err := kvDB.Put(context.TODO(), primaryIndexKey, &value); err != nil { + if err := kvDB.Put(context.Background(), primaryIndexKey, &value); err != nil { t.Fatalf("unexpected error: %s", err) } @@ -732,7 +732,7 @@ func TestScrubPhysicalUnexpectedFamilyID(t *testing.T) { defer leaktest.AfterTest(t)() t.Skip("currently KV pairs with unexpected family IDs are not noticed by the fetcher") s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Create the table and the row entry. if _, err := db.Exec(` @@ -786,7 +786,7 @@ CREATE TABLE t.test ( value.SetTuple(valueBuf) // Insert the value. - if err := kvDB.Put(context.TODO(), primaryIndexKeyWithFamily, &value); err != nil { + if err := kvDB.Put(context.Background(), primaryIndexKeyWithFamily, &value); err != nil { t.Fatalf("unexpected error: %s", err) } @@ -804,7 +804,7 @@ CREATE TABLE t.test ( value.SetTuple(valueBuf) // Insert the incorrect family k/v. - if err := kvDB.Put(context.TODO(), primaryIndexKeyWithFamily, &value); err != nil { + if err := kvDB.Put(context.Background(), primaryIndexKeyWithFamily, &value); err != nil { t.Fatalf("unexpected error: %s", err) } @@ -849,7 +849,7 @@ func TestScrubPhysicalIncorrectPrimaryIndexValueColumn(t *testing.T) { defer leaktest.AfterTest(t)() t.Skip("the test is not failing, as it would be expected") s, db, kvDB := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Create the table and the row entry. if _, err := db.Exec(` @@ -897,7 +897,7 @@ CREATE TABLE t.test (k INT PRIMARY KEY, v1 INT, v2 INT); value.SetTuple(valueBuf) // Overwrite the existing value. - if err := kvDB.Put(context.TODO(), primaryIndexKey, &value); err != nil { + if err := kvDB.Put(context.Background(), primaryIndexKey, &value); err != nil { t.Fatalf("unexpected error: %s", err) } diff --git a/pkg/sql/sem/tree/eval_test.go b/pkg/sql/sem/tree/eval_test.go index 9fe37583cbbc..321e3e1d307f 100644 --- a/pkg/sql/sem/tree/eval_test.go +++ b/pkg/sql/sem/tree/eval_test.go @@ -279,7 +279,7 @@ func TestEval(t *testing.T) { func optBuildScalar(evalCtx *tree.EvalContext, e tree.Expr) (tree.TypedExpr, error) { var o xform.Optimizer o.Init(evalCtx, nil /* catalog */) - b := optbuilder.NewScalar(context.TODO(), &tree.SemaContext{}, evalCtx, o.Factory()) + b := optbuilder.NewScalar(context.Background(), &tree.SemaContext{}, evalCtx, o.Factory()) b.AllowUnsupportedExpr = true if err := b.Build(e); err != nil { return nil, err diff --git a/pkg/sql/show_stats_test.go b/pkg/sql/show_stats_test.go index 9355d38ff20b..70b891e13b20 100644 --- a/pkg/sql/show_stats_test.go +++ b/pkg/sql/show_stats_test.go @@ -26,7 +26,7 @@ func TestShowStatisticsJSON(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) r := sqlutils.MakeSQLRunner(db) r.Exec(t, `SET CLUSTER SETTING sql.stats.automatic_collection.enabled = false`) diff --git a/pkg/sql/show_test.go b/pkg/sql/show_test.go index b31d203eb7d4..cbcb2fbf0290 100644 --- a/pkg/sql/show_test.go +++ b/pkg/sql/show_test.go @@ -38,7 +38,7 @@ func TestShowCreateTable(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE d; @@ -338,7 +338,7 @@ func TestShowCreateView(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE d; @@ -432,7 +432,7 @@ func TestShowCreateSequence(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE d; @@ -601,7 +601,7 @@ func TestShowQueries(t *testing.T) { }, }, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) conn1 = tc.ServerConn(0) conn2 = tc.ServerConn(1) @@ -658,7 +658,7 @@ func TestShowSessions(t *testing.T) { var conn *gosql.DB tc := serverutils.StartTestCluster(t, 2 /* numNodes */, base.TestClusterArgs{}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) conn = tc.ServerConn(0) sqlutils.CreateTable(t, conn, "t", "num INT", 0, nil) @@ -770,7 +770,7 @@ func TestShowSessionPrivileges(t *testing.T) { params.Insecure = true s, rawSQLDBroot, _ := serverutils.StartServer(t, params) sqlDBroot := sqlutils.MakeSQLRunner(rawSQLDBroot) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Prepare a non-root session. _ = sqlDBroot.Exec(t, `CREATE USER nonroot`) @@ -844,7 +844,7 @@ func TestLintClusterSettingNames(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) rows, err := sqlDB.Query(`SELECT variable, setting_type, description FROM [SHOW ALL CLUSTER SETTINGS]`) if err != nil { diff --git a/pkg/sql/sort_test.go b/pkg/sql/sort_test.go index 83e227bdb7b0..64ce9a37bb67 100644 --- a/pkg/sql/sort_test.go +++ b/pkg/sql/sort_test.go @@ -24,7 +24,7 @@ func TestOrderByRandom(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) seenOne := false seenTwo := false diff --git a/pkg/sql/span_builder_test.go b/pkg/sql/span_builder_test.go index e7be6e5b0b26..3fd5c6e4ca67 100644 --- a/pkg/sql/span_builder_test.go +++ b/pkg/sql/span_builder_test.go @@ -28,7 +28,7 @@ import ( func TestSpanBuilderCanSplitSpan(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() params, _ := tests.CreateTestServerParams() s, sqlDB, kvDB := serverutils.StartServer(t, params) defer s.Stopper().Stop(ctx) diff --git a/pkg/sql/split_test.go b/pkg/sql/split_test.go index 9bcd1327b927..20b5df75d7ed 100644 --- a/pkg/sql/split_test.go +++ b/pkg/sql/split_test.go @@ -29,7 +29,7 @@ func TestSplitAt(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) r := sqlutils.MakeSQLRunner(db) diff --git a/pkg/sql/sqlbase/structured_test.go b/pkg/sql/sqlbase/structured_test.go index b9c97fdd2612..2a645cea1204 100644 --- a/pkg/sql/sqlbase/structured_test.go +++ b/pkg/sql/sqlbase/structured_test.go @@ -1286,7 +1286,7 @@ func TestKeysPerRow(t *testing.T) { // a TableDescriptor. It should be possible to move MakeTableDesc into // sqlbase. If/when that happens, use it here instead of this server. s, conn, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := conn.Exec(`CREATE DATABASE d`); err != nil { t.Fatalf("%+v", err) } @@ -1359,7 +1359,7 @@ func TestColumnNeedsBackfill(t *testing.T) { func TestDefaultExprNil(t *testing.T) { s, conn, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := conn.Exec(`CREATE DATABASE t`); err != nil { t.Fatalf("%+v", err) } diff --git a/pkg/sql/table_ref_test.go b/pkg/sql/table_ref_test.go index 8a2ab1dd96f1..7528bcd6c64d 100644 --- a/pkg/sql/table_ref_test.go +++ b/pkg/sql/table_ref_test.go @@ -27,7 +27,7 @@ func TestTableRefs(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // Populate the test database. stmt := ` diff --git a/pkg/sql/table_test.go b/pkg/sql/table_test.go index 4a3afabf0b10..e4e5e02f99ce 100644 --- a/pkg/sql/table_test.go +++ b/pkg/sql/table_test.go @@ -188,7 +188,7 @@ func TestMakeTableDescColumns(t *testing.T) { } for i, d := range testData { s := "CREATE TABLE foo.test (a " + d.sqlType + " PRIMARY KEY, b " + d.sqlType + ")" - schema, err := CreateTestTableDescriptor(context.TODO(), 1, 100, s, sqlbase.NewDefaultPrivilegeDescriptor()) + schema, err := CreateTestTableDescriptor(context.Background(), 1, 100, s, sqlbase.NewDefaultPrivilegeDescriptor()) if err != nil { t.Fatalf("%d: %v", i, err) } @@ -302,7 +302,7 @@ func TestMakeTableDescIndexes(t *testing.T) { } for i, d := range testData { s := "CREATE TABLE foo.test (" + d.sql + ")" - schema, err := CreateTestTableDescriptor(context.TODO(), 1, 100, s, sqlbase.NewDefaultPrivilegeDescriptor()) + schema, err := CreateTestTableDescriptor(context.Background(), 1, 100, s, sqlbase.NewDefaultPrivilegeDescriptor()) if err != nil { t.Fatalf("%d (%s): %v", i, d.sql, err) } @@ -319,7 +319,7 @@ func TestMakeTableDescIndexes(t *testing.T) { func TestPrimaryKeyUnspecified(t *testing.T) { defer leaktest.AfterTest(t)() s := "CREATE TABLE foo.test (a INT, b INT, CONSTRAINT c UNIQUE (b))" - desc, err := CreateTestTableDescriptor(context.TODO(), 1, 100, s, sqlbase.NewDefaultPrivilegeDescriptor()) + desc, err := CreateTestTableDescriptor(context.Background(), 1, 100, s, sqlbase.NewDefaultPrivilegeDescriptor()) if err != nil { t.Fatal(err) } diff --git a/pkg/sql/temporary_schema_test.go b/pkg/sql/temporary_schema_test.go index 9f3d7616772d..5c1e053dcb0f 100644 --- a/pkg/sql/temporary_schema_test.go +++ b/pkg/sql/temporary_schema_test.go @@ -163,7 +163,7 @@ func TestTemporaryObjectCleaner(t *testing.T) { }, }, ) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) // Start and close two temporary schemas. for _, dbID := range []int{0, 1} { @@ -232,7 +232,7 @@ func TestTemporarySchemaDropDatabase(t *testing.T) { }, }, ) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) // Create a database to drop that has a temporary table inside. { diff --git a/pkg/sql/tests/impure_builtin_test.go b/pkg/sql/tests/impure_builtin_test.go index 48b4cd3559ea..a6f1f0939869 100644 --- a/pkg/sql/tests/impure_builtin_test.go +++ b/pkg/sql/tests/impure_builtin_test.go @@ -28,7 +28,7 @@ func TestClusterID(t *testing.T) { ReplicationMode: base.ReplicationAuto, } tc := testcluster.StartTestCluster(t, 3, testClusterArgs) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) for i := 0; i < 3; i++ { db := sqlutils.MakeSQLRunner(tc.Conns[i]) diff --git a/pkg/sql/tests/inverted_index_test.go b/pkg/sql/tests/inverted_index_test.go index 5b5cdb845986..5e5374e363aa 100644 --- a/pkg/sql/tests/inverted_index_test.go +++ b/pkg/sql/tests/inverted_index_test.go @@ -33,7 +33,7 @@ func TestInvertedIndex(t *testing.T) { defer leaktest.AfterTest(t)() tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := sqlutils.MakeSQLRunner(tc.Conns[0]) diff --git a/pkg/sql/tests/kv_test.go b/pkg/sql/tests/kv_test.go index 7fc84e9640a2..3e81894fa3f1 100644 --- a/pkg/sql/tests/kv_test.go +++ b/pkg/sql/tests/kv_test.go @@ -54,7 +54,7 @@ func newKVNative(b *testing.B) kvInterface { return &kvNative{ db: db, doneFn: func() { - s.Stopper().Stop(context.TODO()) + s.Stopper().Stop(context.Background()) }, } } @@ -62,7 +62,7 @@ func newKVNative(b *testing.B) kvInterface { func (kv *kvNative) Insert(rows, run int) error { firstRow := rows * run lastRow := rows * (run + 1) - err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *kv2.Txn) error { + err := kv.db.Txn(context.Background(), func(ctx context.Context, txn *kv2.Txn) error { b := txn.NewBatch() for i := firstRow; i < lastRow; i++ { b.Put(fmt.Sprintf("%s%08d", kv.prefix, i), i) @@ -74,7 +74,7 @@ func (kv *kvNative) Insert(rows, run int) error { func (kv *kvNative) Update(rows, run int) error { perm := rand.Perm(rows) - err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *kv2.Txn) error { + err := kv.db.Txn(context.Background(), func(ctx context.Context, txn *kv2.Txn) error { // Read all values in a batch. b := txn.NewBatch() for i := 0; i < rows; i++ { @@ -97,7 +97,7 @@ func (kv *kvNative) Update(rows, run int) error { func (kv *kvNative) Delete(rows, run int) error { firstRow := rows * run lastRow := rows * (run + 1) - err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *kv2.Txn) error { + err := kv.db.Txn(context.Background(), func(ctx context.Context, txn *kv2.Txn) error { b := txn.NewBatch() for i := firstRow; i < lastRow; i++ { b.Del(fmt.Sprintf("%s%08d", kv.prefix, i)) @@ -109,7 +109,7 @@ func (kv *kvNative) Delete(rows, run int) error { func (kv *kvNative) Scan(rows, run int) error { var kvs []kv2.KeyValue - err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *kv2.Txn) error { + err := kv.db.Txn(context.Background(), func(ctx context.Context, txn *kv2.Txn) error { var err error kvs, err = txn.Scan(ctx, fmt.Sprintf("%s%08d", kv.prefix, 0), fmt.Sprintf("%s%08d", kv.prefix, rows), int64(rows)) return err @@ -126,7 +126,7 @@ func (kv *kvNative) prep(rows int, initData bool) error { if !initData { return nil } - err := kv.db.Txn(context.TODO(), func(ctx context.Context, txn *kv2.Txn) error { + err := kv.db.Txn(context.Background(), func(ctx context.Context, txn *kv2.Txn) error { b := txn.NewBatch() for i := 0; i < rows; i++ { b.Put(fmt.Sprintf("%s%08d", kv.prefix, i), i) @@ -157,7 +157,7 @@ func newKVSQL(b *testing.B) kvInterface { kv := &kvSQL{} kv.db = db kv.doneFn = func() { - s.Stopper().Stop(context.TODO()) + s.Stopper().Stop(context.Background()) } return kv } diff --git a/pkg/sql/tests/split_test.go b/pkg/sql/tests/split_test.go index ebe9bb139fe0..50cdfd96e75f 100644 --- a/pkg/sql/tests/split_test.go +++ b/pkg/sql/tests/split_test.go @@ -29,7 +29,7 @@ import ( // getRangeKeys returns the end keys of all ranges. func getRangeKeys(db *kv.DB) ([]roachpb.Key, error) { - rows, err := db.Scan(context.TODO(), keys.Meta2Prefix, keys.MetaMax, 0) + rows, err := db.Scan(context.Background(), keys.Meta2Prefix, keys.MetaMax, 0) if err != nil { return nil, err } @@ -71,7 +71,7 @@ func TestSplitOnTableBoundaries(t *testing.T) { params.ScanMinIdleTime = time.Millisecond params.ScanMaxIdleTime = time.Millisecond s, sqlDB, kvDB := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) expectedInitialRanges, err := server.ExpectedInitialRangeCount(kvDB, &s.(*server.TestServer).Cfg.DefaultZoneConfig, &s.(*server.TestServer).Cfg.DefaultSystemZoneConfig) if err != nil { diff --git a/pkg/sql/tests/system_table_test.go b/pkg/sql/tests/system_table_test.go index 9b5bd09b0630..403ae1eb4bb5 100644 --- a/pkg/sql/tests/system_table_test.go +++ b/pkg/sql/tests/system_table_test.go @@ -56,7 +56,7 @@ func TestInitialKeys(t *testing.T) { // Add an additional table. sqlbase.SystemAllowedPrivileges[keys.MaxReservedDescID] = privilege.List{privilege.ALL} desc, err := sql.CreateTestTableDescriptor( - context.TODO(), + context.Background(), keys.SystemDatabaseID, keys.MaxReservedDescID, "CREATE TABLE system.x (val INTEGER PRIMARY KEY)", @@ -184,7 +184,7 @@ func TestSystemTableLiterals(t *testing.T) { } { privs := *test.pkg.Privileges gen, err := sql.CreateTestTableDescriptor( - context.TODO(), + context.Background(), keys.SystemDatabaseID, test.id, test.schema, diff --git a/pkg/sql/tests/table_split_test.go b/pkg/sql/tests/table_split_test.go index 6b63cf2f1aa7..dedc133bf0b0 100644 --- a/pkg/sql/tests/table_split_test.go +++ b/pkg/sql/tests/table_split_test.go @@ -31,7 +31,7 @@ func TestSplitAtTableBoundary(t *testing.T) { ReplicationMode: base.ReplicationAuto, } tc := testcluster.StartTestCluster(t, 3, testClusterArgs) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) runner := sqlutils.MakeSQLRunner(tc.Conns[0]) runner.Exec(t, `CREATE DATABASE test`) @@ -53,7 +53,7 @@ SELECT tables.id FROM system.namespace tables t.Fatal(err) } if !desc.StartKey.Equal(tableStartKey) { - log.Infof(context.TODO(), "waiting on split results") + log.Infof(context.Background(), "waiting on split results") return errors.Errorf("expected range start key %s; got %s", tableStartKey, desc.StartKey) } return nil diff --git a/pkg/sql/trace_test.go b/pkg/sql/trace_test.go index 70577049f98b..35007a47aa70 100644 --- a/pkg/sql/trace_test.go +++ b/pkg/sql/trace_test.go @@ -250,7 +250,7 @@ func TestTrace(t *testing.T) { // Create a cluster. We'll run sub-tests using each node of this cluster. const numNodes = 3 cluster := serverutils.StartTestCluster(t, numNodes, base.TestClusterArgs{}) - defer cluster.Stopper().Stop(context.TODO()) + defer cluster.Stopper().Stop(context.Background()) clusterDB := cluster.ServerConn(0) if _, err := clusterDB.Exec(` @@ -405,7 +405,7 @@ func TestTraceFieldDecomposition(t *testing.T) { }, } s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB.SetMaxOpenConns(1) @@ -515,7 +515,7 @@ func TestKVTraceWithCountStar(t *testing.T) { // Test that we don't crash if we try to do a KV trace // on a COUNT(*) query (#19846). s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) r := sqlutils.MakeSQLRunner(db) r.Exec(t, "CREATE DATABASE test") @@ -535,7 +535,7 @@ func TestKVTraceDistSQL(t *testing.T) { UseDatabase: "test", }, }) - defer cluster.Stopper().Stop(context.TODO()) + defer cluster.Stopper().Stop(context.Background()) r := sqlutils.MakeSQLRunner(cluster.ServerConn(0)) r.Exec(t, "CREATE DATABASE test") diff --git a/pkg/sql/txn_restart_test.go b/pkg/sql/txn_restart_test.go index cc0fa80322f7..93b409ce2eab 100644 --- a/pkg/sql/txn_restart_test.go +++ b/pkg/sql/txn_restart_test.go @@ -210,7 +210,7 @@ func checkRestarts(t *testing.T, magicVals *filterVals) { // params, cmdFilters := tests.CreateTestServerParams() // params.Knobs.SQLExecutor = aborter.executorKnobs() // s, sqlDB, _ := serverutils.StartServer(t, params) -// defer s.Stopper().Stop(context.TODO()) +// defer s.Stopper().Stop(context.Background()) // { // pgURL, cleanup := sqlutils.PGUrl(t, s.ServingRPCAddr(), "TestTxnAutoRetry", url.User(security.RootUser) // defer cleanup() @@ -448,7 +448,7 @@ func TestTxnAutoRetry(t *testing.T) { params, cmdFilters := tests.CreateTestServerParams() params.Knobs.SQLExecutor = aborter.executorKnobs() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) { pgURL, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), "TestTxnAutoRetry", url.User(security.RootUser)) defer cleanup() @@ -625,7 +625,7 @@ func TestAbortedTxnOnlyRetriedOnce(t *testing.T) { params, _ := tests.CreateTestServerParams() params.Knobs.SQLExecutor = aborter.executorKnobs() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) { pgURL, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), "TestAbortedTxnOnlyRetriedOnce", url.User(security.RootUser)) defer cleanup() @@ -780,7 +780,7 @@ func TestTxnUserRestart(t *testing.T) { params, cmdFilters := tests.CreateTestServerParams() params.Knobs.SQLExecutor = aborter.executorKnobs() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) { pgURL, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), "TestTxnUserRestart", url.User(security.RootUser)) defer cleanup() @@ -858,7 +858,7 @@ func TestCommitWaitState(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) if _, err := sqlDB.Exec(` CREATE DATABASE t; CREATE TABLE t.test (k INT PRIMARY KEY, v TEXT); `); err != nil { @@ -894,7 +894,7 @@ func TestErrorOnCommitFinalizesTxn(t *testing.T) { params, _ := tests.CreateTestServerParams() params.Knobs.SQLExecutor = aborter.executorKnobs() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) { pgURL, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), "TestErrorOnCommitFinalizesTxn", url.User(security.RootUser)) defer cleanup() @@ -980,7 +980,7 @@ func TestRollbackInRestartWait(t *testing.T) { params, _ := tests.CreateTestServerParams() params.Knobs.SQLExecutor = aborter.executorKnobs() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) { pgURL, cleanup := sqlutils.PGUrl(t, s.ServingSQLAddr(), "TestRollbackInRestartWait", url.User(security.RootUser)) defer cleanup() @@ -1039,7 +1039,7 @@ func TestUnexpectedStatementInRestartWait(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) tx, err := sqlDB.Begin() if err != nil { @@ -1089,7 +1089,7 @@ func TestNonRetryableError(t *testing.T) { params, cmdFilters := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) testKey := []byte("test_key") hitError := false @@ -1171,7 +1171,7 @@ func TestReacquireLeaseOnRestart(t *testing.T) { params.Knobs.Store = storeTestingKnobs params.Knobs.KVClient = clientTestingKnobs s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) var restartDone int32 cleanupFilter := cmdFilters.AppendFilter( @@ -1243,7 +1243,7 @@ func TestFlushUncommitedDescriptorCacheOnRestart(t *testing.T) { params, _ := tests.CreateTestServerParams() params.Knobs.Store = testingKnobs s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) var restartDone int32 cleanupFilter := cmdFilters.AppendFilter( @@ -1332,7 +1332,7 @@ func TestDistSQLRetryableError(t *testing.T) { }, }, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) db := tc.ServerConn(0) sqlutils.CreateTable(t, db, "t", @@ -1432,7 +1432,7 @@ func TestRollbackToSavepointFromUnusualStates(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) checkState := func(tx *gosql.Tx, ts time.Time) { t.Helper() @@ -1494,7 +1494,7 @@ func TestTxnAutoRetriesDisabledAfterResultsHaveBeenSentToClient(t *testing.T) { params, _ := tests.CreateTestServerParams() s, sqlDB, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) tests := []struct { name string diff --git a/pkg/sql/txn_state_test.go b/pkg/sql/txn_state_test.go index 2b1948e03a8e..f4975792337c 100644 --- a/pkg/sql/txn_state_test.go +++ b/pkg/sql/txn_state_test.go @@ -72,7 +72,7 @@ func makeTestContext() testContext { settings, ), tracer: ambient.Tracer, - ctx: context.TODO(), + ctx: context.Background(), settings: settings, } } @@ -206,7 +206,7 @@ func checkTxn(txn *kv.Txn, exp expKVTxn) error { func TestTransitions(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() dummyRewCap := rewindCapability{rewindPos: CmdPos(12)} testCon := makeTestContext() tranCtx := transitionCtx{ diff --git a/pkg/sql/unsplit_test.go b/pkg/sql/unsplit_test.go index 13a6721551a2..a57d28dc2202 100644 --- a/pkg/sql/unsplit_test.go +++ b/pkg/sql/unsplit_test.go @@ -45,7 +45,7 @@ func TestUnsplitAt(t *testing.T) { }, } s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) r := sqlutils.MakeSQLRunner(db) diff --git a/pkg/sql/upsert_test.go b/pkg/sql/upsert_test.go index ddfc18861582..52cca6da1037 100644 --- a/pkg/sql/upsert_test.go +++ b/pkg/sql/upsert_test.go @@ -57,7 +57,7 @@ func TestUpsertFastPath(t *testing.T) { }, }}, }) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(conn) sqlDB.Exec(t, `CREATE DATABASE d`) sqlDB.Exec(t, `CREATE TABLE d.kv (k INT PRIMARY KEY, v INT)`) @@ -133,7 +133,7 @@ func TestConcurrentUpsert(t *testing.T) { defer leaktest.AfterTest(t)() s, conn, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(conn) sqlDB.Exec(t, `CREATE DATABASE d`) diff --git a/pkg/sql/values_test.go b/pkg/sql/values_test.go index 2d97174df4f5..7a5687881c8e 100644 --- a/pkg/sql/values_test.go +++ b/pkg/sql/values_test.go @@ -132,7 +132,7 @@ func TestValues(t *testing.T) { }, } - ctx := context.TODO() + ctx := context.Background() for i, tc := range testCases { t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { plan, err := func() (_ planNode, err error) { @@ -141,7 +141,7 @@ func TestValues(t *testing.T) { err = errors.Errorf("%v", r) } }() - return p.Values(context.TODO(), tc.stmt, nil) + return p.Values(context.Background(), tc.stmt, nil) }() if plan != nil { defer plan.Close(ctx) diff --git a/pkg/sql/zone_config_test.go b/pkg/sql/zone_config_test.go index 3fca8f77e693..e26caca8823f 100644 --- a/pkg/sql/zone_config_test.go +++ b/pkg/sql/zone_config_test.go @@ -51,7 +51,7 @@ func forceNewConfig(t testing.TB, s *server.TestServer) *config.SystemConfig { } // This needs to be done in a transaction with the system trigger set. - if err := s.DB().Txn(context.TODO(), func(ctx context.Context, txn *kv.Txn) error { + if err := s.DB().Txn(context.Background(), func(ctx context.Context, txn *kv.Txn) error { if err := txn.SetSystemConfigTrigger(); err != nil { return err } @@ -100,7 +100,7 @@ func TestGetZoneConfig(t *testing.T) { } srv, sqlDB, _ := serverutils.StartServer(t, params) - defer srv.Stopper().Stop(context.TODO()) + defer srv.Stopper().Stop(context.Background()) s := srv.(*server.TestServer) expectedCounter := uint32(keys.MinNonPredefinedUserDescID) @@ -336,7 +336,7 @@ func TestCascadingZoneConfig(t *testing.T) { } srv, sqlDB, _ := serverutils.StartServer(t, params) - defer srv.Stopper().Stop(context.TODO()) + defer srv.Stopper().Stop(context.Background()) s := srv.(*server.TestServer) expectedCounter := uint32(keys.MinNonPredefinedUserDescID) @@ -638,7 +638,7 @@ func BenchmarkGetZoneConfig(b *testing.B) { params, _ := tests.CreateTestServerParams() srv, _, _ := serverutils.StartServer(b, params) - defer srv.Stopper().Stop(context.TODO()) + defer srv.Stopper().Stop(context.Background()) s := srv.(*server.TestServer) cfg := forceNewConfig(b, s) diff --git a/pkg/sql/zone_test.go b/pkg/sql/zone_test.go index eb5795a2eff4..739a101a8647 100644 --- a/pkg/sql/zone_test.go +++ b/pkg/sql/zone_test.go @@ -32,7 +32,7 @@ func TestValidSetShowZones(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlDB.Exec(t, `CREATE DATABASE d; USE d; CREATE TABLE t ();`) @@ -234,7 +234,7 @@ func TestZoneInheritField(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) sqlDB := sqlutils.MakeSQLRunner(db) sqlutils.RemoveAllZoneConfigs(t, sqlDB) @@ -280,7 +280,7 @@ func TestInvalidSetShowZones(t *testing.T) { params, _ := tests.CreateTestServerParams() s, db, _ := serverutils.StartServer(t, params) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) for i, tc := range []struct { query string diff --git a/pkg/sqlmigrations/migrations_test.go b/pkg/sqlmigrations/migrations_test.go index 04ebd4ba0626..30dd9b42eee7 100644 --- a/pkg/sqlmigrations/migrations_test.go +++ b/pkg/sqlmigrations/migrations_test.go @@ -143,7 +143,7 @@ func TestEnsureMigrations(t *testing.T) { db: db, codec: codec, } - defer mgr.stopper.Stop(context.TODO()) + defer mgr.stopper.Stop(context.Background()) fnGotCalled := false fnGotCalledDescriptor := migrationDescriptor{ @@ -318,7 +318,7 @@ func TestDBErrors(t *testing.T) { db: db, codec: codec, } - defer mgr.stopper.Stop(context.TODO()) + defer mgr.stopper.Stop(context.Background()) migration := noopMigration1 defer func(prev []migrationDescriptor) { backwardCompatibleMigrations = prev }(backwardCompatibleMigrations) @@ -384,7 +384,7 @@ func TestLeaseErrors(t *testing.T) { db: db, codec: codec, } - defer mgr.stopper.Stop(context.TODO()) + defer mgr.stopper.Stop(context.Background()) migration := noopMigration1 defer func(prev []migrationDescriptor) { backwardCompatibleMigrations = prev }(backwardCompatibleMigrations) @@ -413,7 +413,7 @@ func TestLeaseExpiration(t *testing.T) { db: db, codec: codec, } - defer mgr.stopper.Stop(context.TODO()) + defer mgr.stopper.Stop(context.Background()) oldLeaseRefreshInterval := leaseRefreshInterval leaseRefreshInterval = time.Microsecond diff --git a/pkg/storage/batch_test.go b/pkg/storage/batch_test.go index 7b53532c8a46..74ad358c8640 100644 --- a/pkg/storage/batch_test.go +++ b/pkg/storage/batch_test.go @@ -791,7 +791,7 @@ func TestBatchBuilder(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) e := newRocksDBInMem(roachpb.Attributes{}, 1<<20) stopper.AddCloser(e) @@ -847,7 +847,7 @@ func TestBatchBuilderStress(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) e := newRocksDBInMem(roachpb.Attributes{}, 1<<20) stopper.AddCloser(e) diff --git a/pkg/storage/cloud/external_storage_test.go b/pkg/storage/cloud/external_storage_test.go index d889b9898fb9..8fa72ccf7122 100644 --- a/pkg/storage/cloud/external_storage_test.go +++ b/pkg/storage/cloud/external_storage_test.go @@ -76,7 +76,7 @@ func testExportStore(t *testing.T, storeURI string, skipSingleFile bool) { func testExportStoreWithExternalIOConfig( t *testing.T, ioConf base.ExternalIOConfig, storeURI string, skipSingleFile bool, ) { - ctx := context.TODO() + ctx := context.Background() conf, err := ExternalStorageConfFromURI(storeURI) if err != nil { @@ -221,7 +221,7 @@ func testExportStoreWithExternalIOConfig( } func testListFiles(t *testing.T, storeURI string) { - ctx := context.TODO() + ctx := context.Background() dataLetterFiles := []string{"file/letters/dataA.csv", "file/letters/dataB.csv", "file/letters/dataC.csv"} dataNumberFiles := []string{"file/numbers/data1.csv", "file/numbers/data2.csv", "file/numbers/data3.csv"} letterFiles := []string{"file/abc/A.csv", "file/abc/B.csv", "file/abc/C.csv"} @@ -433,7 +433,7 @@ func TestPutGoogleCloud(t *testing.T) { }) t.Run("implicit", func(t *testing.T) { // Only test these if they exist. - if _, err := google.FindDefaultCredentials(context.TODO()); err != nil { + if _, err := google.FindDefaultCredentials(context.Background()); err != nil { t.Skip(err) } testExportStore(t, diff --git a/pkg/storage/cloud/gcs_storage_test.go b/pkg/storage/cloud/gcs_storage_test.go index de7f2401b752..0acbda241b43 100644 --- a/pkg/storage/cloud/gcs_storage_test.go +++ b/pkg/storage/cloud/gcs_storage_test.go @@ -102,9 +102,9 @@ func TestAntagonisticRead(t *testing.T) { require.NoError(t, err) s, err := MakeExternalStorage( - context.TODO(), conf, base.ExternalIOConfig{}, testSettings, nil) + context.Background(), conf, base.ExternalIOConfig{}, testSettings, nil) require.NoError(t, err) - stream, err := s.ReadFile(context.TODO(), "") + stream, err := s.ReadFile(context.Background(), "") require.NoError(t, err) defer stream.Close() _, err = ioutil.ReadAll(stream) diff --git a/pkg/storage/cloud/http_storage_test.go b/pkg/storage/cloud/http_storage_test.go index 7a1255372a83..f6570e244cb0 100644 --- a/pkg/storage/cloud/http_storage_test.go +++ b/pkg/storage/cloud/http_storage_test.go @@ -140,7 +140,7 @@ func TestPutHttp(t *testing.T) { // Ensure that servers that error on HEAD are handled gracefully. t.Run("bad-head-response", func(t *testing.T) { - ctx := context.TODO() + ctx := context.Background() srv, _, cleanup := makeServer() defer cleanup() @@ -292,7 +292,7 @@ func TestCanDisableHttp(t *testing.T) { DisableHTTP: true, } s, err := MakeExternalStorage( - context.TODO(), + context.Background(), roachpb.ExternalStorage{Provider: roachpb.ExternalStorageProvider_Http}, conf, testSettings, blobs.TestEmptyBlobClientFactory) @@ -320,9 +320,9 @@ func TestExternalStorageCanUseHTTPProxy(t *testing.T) { conf, err := ExternalStorageConfFromURI("http://my-server") require.NoError(t, err) s, err := MakeExternalStorage( - context.TODO(), conf, base.ExternalIOConfig{}, testSettings, nil) + context.Background(), conf, base.ExternalIOConfig{}, testSettings, nil) require.NoError(t, err) - stream, err := s.ReadFile(context.TODO(), "file") + stream, err := s.ReadFile(context.Background(), "file") require.NoError(t, err) defer stream.Close() data, err := ioutil.ReadAll(stream) diff --git a/pkg/storage/cloud/nodelocal_storage_test.go b/pkg/storage/cloud/nodelocal_storage_test.go index 8d195071e57f..97a7cbfd95a1 100644 --- a/pkg/storage/cloud/nodelocal_storage_test.go +++ b/pkg/storage/cloud/nodelocal_storage_test.go @@ -37,7 +37,7 @@ func TestPutLocal(t *testing.T) { func TestLocalIOLimits(t *testing.T) { defer leaktest.AfterTest(t)() - ctx := context.TODO() + ctx := context.Background() const allowed = "/allowed" testSettings.ExternalIODir = allowed diff --git a/pkg/storage/cloud/s3_storage_test.go b/pkg/storage/cloud/s3_storage_test.go index 06b5d8118e39..30971790ba7c 100644 --- a/pkg/storage/cloud/s3_storage_test.go +++ b/pkg/storage/cloud/s3_storage_test.go @@ -41,7 +41,7 @@ func TestPutS3(t *testing.T) { t.Skip("AWS_S3_BUCKET env var must be set") } - ctx := context.TODO() + ctx := context.Background() t.Run("auth-empty-no-cred", func(t *testing.T) { _, err := ExternalStorageFromURI( ctx, fmt.Sprintf("s3://%s/%s", bucket, "backup-test-default"), @@ -131,7 +131,7 @@ func TestPutS3Endpoint(t *testing.T) { } func TestS3DisallowCustomEndpoints(t *testing.T) { - s3, err := makeS3Storage(context.TODO(), + s3, err := makeS3Storage(context.Background(), base.ExternalIOConfig{DisableHTTP: true}, &roachpb.ExternalStorage_S3{Endpoint: "http://do.not.go.there/"}, nil, ) @@ -141,7 +141,7 @@ func TestS3DisallowCustomEndpoints(t *testing.T) { func TestS3DisallowImplicitCredentials(t *testing.T) { defer leaktest.AfterTest(t)() - s3, err := makeS3Storage(context.TODO(), + s3, err := makeS3Storage(context.Background(), base.ExternalIOConfig{DisableImplicitCredentials: true}, &roachpb.ExternalStorage_S3{ Endpoint: "http://do-not-go-there", diff --git a/pkg/storage/mvcc_test.go b/pkg/storage/mvcc_test.go index 19aebf226406..f11c623bc376 100644 --- a/pkg/storage/mvcc_test.go +++ b/pkg/storage/mvcc_test.go @@ -1394,7 +1394,7 @@ func TestMVCCPutAfterBatchIterCreate(t *testing.T) { iter.Next() // key2/5 // Lay down an intent on key3, which will go at key3/0 and sort before key3/5. - err = MVCCDelete(context.TODO(), batch, nil, testKey3, txn.WriteTimestamp, txn) + err = MVCCDelete(context.Background(), batch, nil, testKey3, txn.WriteTimestamp, txn) if err != nil { t.Fatal(err) } diff --git a/pkg/storage/rocksdb_test.go b/pkg/storage/rocksdb_test.go index dfa2088ee898..8089befed41d 100644 --- a/pkg/storage/rocksdb_test.go +++ b/pkg/storage/rocksdb_test.go @@ -1655,7 +1655,7 @@ func TestRocksDBGlobalSeqnumIssue(t *testing.T) { // When this file is ingested, it'll be added to L0, since it overlaps in key // bounds (but not actual keys) with the SSTable flushed earlier. - if err := db.IngestExternalFiles(context.TODO(), []string{sstFilePath}); err != nil { + if err := db.IngestExternalFiles(context.Background(), []string{sstFilePath}); err != nil { t.Fatal(err) } snapshot.Close() diff --git a/pkg/storage/temp_dir_test.go b/pkg/storage/temp_dir_test.go index b0123969764d..00dc4c5f8a97 100644 --- a/pkg/storage/temp_dir_test.go +++ b/pkg/storage/temp_dir_test.go @@ -27,7 +27,7 @@ func TestCreateTempDir(t *testing.T) { defer leaktest.AfterTest(t)() stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) + defer stopper.Stop(context.Background()) // Temporary parent directory to test this. dir, err := ioutil.TempDir("", "") if err != nil { diff --git a/pkg/testutils/lint/lint_test.go b/pkg/testutils/lint/lint_test.go index 2a80d32d3c02..685a3d88d467 100644 --- a/pkg/testutils/lint/lint_test.go +++ b/pkg/testutils/lint/lint_test.go @@ -287,6 +287,38 @@ func TestLint(t *testing.T) { } }) + t.Run("TestNoContextTODOInTests", func(t *testing.T) { + t.Parallel() + cmd, stderr, filter, err := dirCmd( + pkgDir, + "git", + "grep", + "-nE", + `context.TODO\(\)`, + "--", + "*_test.go", + ) + if err != nil { + t.Fatal(err) + } + + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + if err := stream.ForEach(filter, func(s string) { + t.Errorf("\n%s <- forbidden; use context.Background() in tests.", s) + }); err != nil { + t.Error(err) + } + + if err := cmd.Wait(); err != nil { + if out := stderr.String(); len(out) > 0 { + t.Fatalf("err=%s, stderr=%s", err, out) + } + } + }) + t.Run("TestTabsInShellScripts", func(t *testing.T) { t.Parallel() cmd, stderr, filter, err := dirCmd(pkgDir, "git", "grep", "-nE", "^ *\t", "--", "*.sh") diff --git a/pkg/testutils/sqlutils/sql_runner_test.go b/pkg/testutils/sqlutils/sql_runner_test.go index 16a8ed6fc495..c469c09cd83e 100644 --- a/pkg/testutils/sqlutils/sql_runner_test.go +++ b/pkg/testutils/sqlutils/sql_runner_test.go @@ -26,7 +26,7 @@ func TestRowsToStrMatrixError(t *testing.T) { defer leaktest.AfterTest(t)() s, db, _ := serverutils.StartServer(t, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) // We'll run a query that only fails after returning some rows, so that the // error is discovered by RowsToStrMatrix below. diff --git a/pkg/testutils/testcluster/testcluster_test.go b/pkg/testutils/testcluster/testcluster_test.go index 84efcfc5e7c3..fc36317a3268 100644 --- a/pkg/testutils/testcluster/testcluster_test.go +++ b/pkg/testutils/testcluster/testcluster_test.go @@ -38,7 +38,7 @@ func TestManualReplication(t *testing.T) { UseDatabase: "t", }, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) s0 := sqlutils.MakeSQLRunner(tc.Conns[0]) s1 := sqlutils.MakeSQLRunner(tc.Conns[1]) @@ -139,7 +139,7 @@ func TestBasicManualReplication(t *testing.T) { defer leaktest.AfterTest(t)() tc := StartTestCluster(t, 3, base.TestClusterArgs{ReplicationMode: base.ReplicationManual}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) desc, err := tc.AddReplicas(keys.MinKey, tc.Target(1), tc.Target(2)) if err != nil { @@ -171,7 +171,7 @@ func TestBasicAutoReplication(t *testing.T) { defer leaktest.AfterTest(t)() tc := StartTestCluster(t, 3, base.TestClusterArgs{ReplicationMode: base.ReplicationAuto}) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) // NB: StartTestCluster will wait for full replication. } @@ -187,7 +187,7 @@ func TestStopServer(t *testing.T) { }, ReplicationMode: base.ReplicationAuto, }) - defer tc.Stopper().Stop(context.TODO()) + defer tc.Stopper().Stop(context.Background()) // Connect to server 1, ensure it is answering requests over HTTP and GRPC. server1 := tc.Server(1) diff --git a/pkg/ts/db_test.go b/pkg/ts/db_test.go index 1a6550c92985..72f2495f7c52 100644 --- a/pkg/ts/db_test.go +++ b/pkg/ts/db_test.go @@ -280,11 +280,11 @@ func (tm *testModelRunner) storeTimeSeriesData(r Resolution, data []tspb.TimeSer for _, d := range data { rdata = append(rdata, computeRollupsFromData(d, r.SampleDuration())) } - if err := tm.DB.storeRollup(context.TODO(), r, rdata); err != nil { + if err := tm.DB.storeRollup(context.Background(), r, rdata); err != nil { tm.t.Fatalf("error storing time series rollups: %s", err) } } else { - if err := tm.DB.StoreData(context.TODO(), r, data); err != nil { + if err := tm.DB.StoreData(context.Background(), r, data); err != nil { tm.t.Fatalf("error storing time series data: %s", err) } } @@ -303,7 +303,7 @@ func (tm *testModelRunner) storeTimeSeriesData(r Resolution, data []tspb.TimeSer func (tm *testModelRunner) prune(nowNanos int64, timeSeries ...timeSeriesResolutionInfo) { // Prune time series from the system under test. if err := tm.DB.pruneTimeSeries( - context.TODO(), + context.Background(), tm.LocalTestCluster.DB, timeSeries, hlc.Timestamp{ @@ -352,7 +352,7 @@ func (tm *testModelRunner) rollupWithMemoryContext( qmc QueryMemoryContext, nowNanos int64, timeSeries ...timeSeriesResolutionInfo, ) { if err := tm.DB.rollupTimeSeries( - context.TODO(), + context.Background(), timeSeries, hlc.Timestamp{ WallTime: nowNanos, @@ -408,7 +408,7 @@ func (tm *testModelRunner) maintain(nowNanos int64) { snap := tm.Store.Engine().NewSnapshot() defer snap.Close() if err := tm.DB.MaintainTimeSeries( - context.TODO(), + context.Background(), snap, roachpb.RKey(keys.TimeseriesPrefix), roachpb.RKey(keys.TimeseriesKeyMax), @@ -549,9 +549,9 @@ func (mq *modelQuery) queryDB() ([]tspb.TimeSeriesDatapoint, []string, error) { memContext := MakeQueryMemoryContext( mq.workerMemMonitor, mq.resultMemMonitor, mq.QueryMemoryOptions, ) - defer memContext.Close(context.TODO()) + defer memContext.Close(context.Background()) return mq.modelRunner.DB.Query( - context.TODO(), mq.Query, mq.diskResolution, mq.QueryTimespan, memContext, + context.Background(), mq.Query, mq.diskResolution, mq.QueryTimespan, memContext, ) } diff --git a/pkg/ts/metrics_test.go b/pkg/ts/metrics_test.go index a78706f6468f..ee6074aaa0f4 100644 --- a/pkg/ts/metrics_test.go +++ b/pkg/ts/metrics_test.go @@ -57,7 +57,7 @@ func TestTimeSeriesWriteMetrics(t *testing.T) { } // Introduce an error into the db. - if err := tm.DB.StoreData(context.TODO(), resolutionInvalid, []tspb.TimeSeriesData{ + if err := tm.DB.StoreData(context.Background(), resolutionInvalid, []tspb.TimeSeriesData{ { Name: "test.multimetric", Source: "source3", diff --git a/pkg/ts/query_test.go b/pkg/ts/query_test.go index d0e9fa762307..c74b0908cb6c 100644 --- a/pkg/ts/query_test.go +++ b/pkg/ts/query_test.go @@ -403,8 +403,8 @@ func TestQueryWorkerMemoryConstraint(t *testing.T) { math.MaxInt64, cluster.MakeTestingClusterSettings(), ) - adjustedMon.Start(context.TODO(), tm.workerMemMonitor, mon.BoundAccount{}) - defer adjustedMon.Stop(context.TODO()) + adjustedMon.Start(context.Background(), tm.workerMemMonitor, mon.BoundAccount{}) + defer adjustedMon.Stop(context.Background()) query := tm.makeQuery("test.metric", resolution1ns, 11, 109) query.workerMemMonitor = &adjustedMon @@ -418,8 +418,8 @@ func TestQueryWorkerMemoryConstraint(t *testing.T) { memoryUsed / 3, } { // Limit memory in use by model. Reset memory monitor to get new maximum. - adjustedMon.Stop(context.TODO()) - adjustedMon.Start(context.TODO(), tm.workerMemMonitor, mon.BoundAccount{}) + adjustedMon.Stop(context.Background()) + adjustedMon.Start(context.Background(), tm.workerMemMonitor, mon.BoundAccount{}) if adjustedMon.MaximumBytes() != 0 { t.Fatalf("maximum bytes was %d, wanted zero", adjustedMon.MaximumBytes()) } @@ -481,8 +481,8 @@ func TestQueryWorkerMemoryMonitor(t *testing.T) { 100, cluster.MakeTestingClusterSettings(), ) - limitedMon.Start(context.TODO(), tm.workerMemMonitor, mon.BoundAccount{}) - defer limitedMon.Stop(context.TODO()) + limitedMon.Start(context.Background(), tm.workerMemMonitor, mon.BoundAccount{}) + defer limitedMon.Stop(context.Background()) // Assert correctness with no memory pressure. query := tm.makeQuery("test.metric", resolution1ns, 0, 60) @@ -491,19 +491,19 @@ func TestQueryWorkerMemoryMonitor(t *testing.T) { // Assert failure with memory pressure. acc := limitedMon.MakeBoundAccount() - if err := acc.Grow(context.TODO(), memoryBudget-1); err != nil { + if err := acc.Grow(context.Background(), memoryBudget-1); err != nil { t.Fatal(err) } query.assertError("memory budget exceeded") // Assert success again with memory pressure released. - acc.Close(context.TODO()) + acc.Close(context.Background()) query.assertSuccess(7, 1) // Start/Stop limited monitor to reset maximum allocation. - limitedMon.Stop(context.TODO()) - limitedMon.Start(context.TODO(), tm.workerMemMonitor, mon.BoundAccount{}) + limitedMon.Stop(context.Background()) + limitedMon.Start(context.Background(), tm.workerMemMonitor, mon.BoundAccount{}) var ( memStatsBefore runtime.MemStats diff --git a/pkg/ts/rollup_test.go b/pkg/ts/rollup_test.go index f91c73f6f8c8..c63de97dcc30 100644 --- a/pkg/ts/rollup_test.go +++ b/pkg/ts/rollup_test.go @@ -205,7 +205,7 @@ func TestRollupBasic(t *testing.T) { Columnar: tm.DB.WriteColumnar(), } if err := tm.DB.rollupTimeSeries( - context.TODO(), + context.Background(), []timeSeriesResolutionInfo{ { Name: "test.othermetric", @@ -222,7 +222,7 @@ func TestRollupBasic(t *testing.T) { } if err := tm.DB.pruneTimeSeries( - context.TODO(), + context.Background(), tm.DB.db, []timeSeriesResolutionInfo{ { @@ -273,8 +273,8 @@ func TestRollupMemoryConstraint(t *testing.T) { math.MaxInt64, cluster.MakeTestingClusterSettings(), ) - adjustedMon.Start(context.TODO(), tm.workerMemMonitor, mon.BoundAccount{}) - defer adjustedMon.Stop(context.TODO()) + adjustedMon.Start(context.Background(), tm.workerMemMonitor, mon.BoundAccount{}) + defer adjustedMon.Stop(context.Background()) // Roll up time series with the new monitor to measure high-water mark // of @@ -318,8 +318,8 @@ func TestRollupMemoryConstraint(t *testing.T) { tm.assertKeyCount(51 + i /* rollups from previous iterations */ + 50) // Restart monitor to clear query memory options. - adjustedMon.Stop(context.TODO()) - adjustedMon.Start(context.TODO(), tm.workerMemMonitor, mon.BoundAccount{}) + adjustedMon.Stop(context.Background()) + adjustedMon.Start(context.Background(), tm.workerMemMonitor, mon.BoundAccount{}) qmc := MakeQueryMemoryContext(&adjustedMon, &adjustedMon, QueryMemoryOptions{ // Large budget, but not maximum to avoid overflows. diff --git a/pkg/ts/server_test.go b/pkg/ts/server_test.go index 639182d5c9cb..6f01e4db3222 100644 --- a/pkg/ts/server_test.go +++ b/pkg/ts/server_test.go @@ -43,12 +43,12 @@ func TestServerQuery(t *testing.T) { }, }, }) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) tsrv := s.(*server.TestServer) // Populate data directly. tsdb := tsrv.TsDB() - if err := tsdb.StoreData(context.TODO(), ts.Resolution10s, []tspb.TimeSeriesData{ + if err := tsdb.StoreData(context.Background(), ts.Resolution10s, []tspb.TimeSeriesData{ { Name: "test.metric", Source: "source1", @@ -262,7 +262,7 @@ func TestServerQueryStarvation(t *testing.T) { s, _, _ := serverutils.StartServer(t, base.TestServerArgs{ TimeSeriesQueryWorkerMax: workerCount, }) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) tsrv := s.(*server.TestServer) seriesCount := workerCount * 2 @@ -319,7 +319,7 @@ func TestServerQueryMemoryManagement(t *testing.T) { TimeSeriesQueryWorkerMax: workerCount, TimeSeriesQueryMemoryBudget: budget, }) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) tsrv := s.(*server.TestServer) if err := populateSeries(seriesCount, sourceCount, valueCount, tsrv.TsDB()); err != nil { @@ -359,7 +359,7 @@ func TestServerDump(t *testing.T) { }, }, }) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) tsrv := s.(*server.TestServer) seriesCount := 10 @@ -381,7 +381,7 @@ func TestServerDump(t *testing.T) { } client := tspb.NewTimeSeriesClient(conn) - dumpClient, err := client.Dump(context.TODO(), nil) + dumpClient, err := client.Dump(context.Background(), nil) if err != nil { t.Fatal(err) } @@ -443,7 +443,7 @@ func TestServerDump(t *testing.T) { func BenchmarkServerQuery(b *testing.B) { s, _, _ := serverutils.StartServer(b, base.TestServerArgs{}) - defer s.Stopper().Stop(context.TODO()) + defer s.Stopper().Stop(context.Background()) tsrv := s.(*server.TestServer) // Populate data for large number of time series. @@ -502,7 +502,7 @@ func generateTimeSeriesDatapoints(valueCount int) []tspb.TimeSeriesDatapoint { func populateSeries(seriesCount, sourceCount, valueCount int, tsdb *ts.DB) error { for series := 0; series < seriesCount; series++ { for source := 0; source < sourceCount; source++ { - if err := tsdb.StoreData(context.TODO(), ts.Resolution10s, []tspb.TimeSeriesData{ + if err := tsdb.StoreData(context.Background(), ts.Resolution10s, []tspb.TimeSeriesData{ { Name: seriesName(series), Source: sourceName(source), diff --git a/pkg/util/cloudinfo/cloudinfo_test.go b/pkg/util/cloudinfo/cloudinfo_test.go index 0a9d90599e20..39c48559b74b 100644 --- a/pkg/util/cloudinfo/cloudinfo_test.go +++ b/pkg/util/cloudinfo/cloudinfo_test.go @@ -152,7 +152,7 @@ func TestAWSInstanceMetadataParsing(t *testing.T) { cli := client{NewInstanceMetadataTestClient()} - s, p, i := cli.getAWSInstanceMetadata(context.TODO(), instanceClass) + s, p, i := cli.getAWSInstanceMetadata(context.Background(), instanceClass) if !s { t.Fatalf("expected parsing to succeed") @@ -166,7 +166,7 @@ func TestAWSInstanceMetadataParsing(t *testing.T) { t.Fatalf("expected parsing to get instanceType m5a.large") } - _, _, r := cli.getAWSInstanceMetadata(context.TODO(), region) + _, _, r := cli.getAWSInstanceMetadata(context.Background(), region) if r != "us-east-1" { t.Fatalf("expected parsing to get region us-east-1") @@ -178,7 +178,7 @@ func TestGCPInstanceMetadataParsing(t *testing.T) { cli := client{NewInstanceMetadataTestClient()} - s, p, i := cli.getGCPInstanceMetadata(context.TODO(), instanceClass) + s, p, i := cli.getGCPInstanceMetadata(context.Background(), instanceClass) if !s { t.Fatalf("expected parsing to succeed") @@ -192,7 +192,7 @@ func TestGCPInstanceMetadataParsing(t *testing.T) { t.Fatalf("expected parsing to get machineTypes g1-small") } - _, _, r := cli.getGCPInstanceMetadata(context.TODO(), region) + _, _, r := cli.getGCPInstanceMetadata(context.Background(), region) if r != "us-east4-c" { t.Fatalf("expected parsing to get region us-east4-c") @@ -204,7 +204,7 @@ func TestAzureInstanceMetadataParsing(t *testing.T) { cli := client{NewInstanceMetadataTestClient()} - s, p, i := cli.getAzureInstanceMetadata(context.TODO(), instanceClass) + s, p, i := cli.getAzureInstanceMetadata(context.Background(), instanceClass) if !s { t.Fatalf("expected parsing to succeed") @@ -218,7 +218,7 @@ func TestAzureInstanceMetadataParsing(t *testing.T) { t.Fatalf("expected parsing to get machineTypes Standard_D2s_v3") } - _, _, r := cli.getAzureInstanceMetadata(context.TODO(), region) + _, _, r := cli.getAzureInstanceMetadata(context.Background(), region) if r != "eastus" { t.Fatalf("expected parsing to get region eastus") diff --git a/pkg/util/contextutil/context_test.go b/pkg/util/contextutil/context_test.go index 464bccd88afd..c018a5c0d84f 100644 --- a/pkg/util/contextutil/context_test.go +++ b/pkg/util/contextutil/context_test.go @@ -21,7 +21,7 @@ import ( ) func TestRunWithTimeout(t *testing.T) { - ctx := context.TODO() + ctx := context.Background() err := RunWithTimeout(ctx, "foo", 1, func(ctx context.Context) error { time.Sleep(10 * time.Millisecond) return nil @@ -74,7 +74,7 @@ func TestRunWithTimeout(t *testing.T) { // returned error is still a TimeoutError. In this case however the underlying // cause should be the returned error and not context.DeadlineExceeded. func TestRunWithTimeoutWithoutDeadlineExceeded(t *testing.T) { - ctx := context.TODO() + ctx := context.Background() notContextDeadlineExceeded := errors.New(context.DeadlineExceeded.Error()) err := RunWithTimeout(ctx, "foo", 1, func(ctx context.Context) error { <-ctx.Done() diff --git a/pkg/util/limit/limiter_test.go b/pkg/util/limit/limiter_test.go index a5c63a9be933..ec80c573dccb 100644 --- a/pkg/util/limit/limiter_test.go +++ b/pkg/util/limit/limiter_test.go @@ -31,7 +31,7 @@ func TestConcurrentRequestLimiter(t *testing.T) { const runs = 1000000 var count int64 - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() for thread := 0; thread < threads; thread++ { diff --git a/pkg/workload/csv_test.go b/pkg/workload/csv_test.go index 1d9bd24051cc..8752ff83e556 100644 --- a/pkg/workload/csv_test.go +++ b/pkg/workload/csv_test.go @@ -57,7 +57,7 @@ func TestHandleCSV(t *testing.T) { })) defer ts.Close() - res, err := httputil.Get(context.TODO(), ts.URL+`/bank/bank`+test.params) + res, err := httputil.Get(context.Background(), ts.URL+`/bank/bank`+test.params) if err != nil { t.Fatal(err) } diff --git a/pkg/workload/tpch/expected_rows.go b/pkg/workload/tpch/expected_rows.go index 859e2a97c3a6..7d880b0e3260 100644 --- a/pkg/workload/tpch/expected_rows.go +++ b/pkg/workload/tpch/expected_rows.go @@ -11,39 +11,39 @@ package tpch var ( - maxCols int - numColsByQueryName = map[string]int{} - numExpectedRowsByQueryName = map[string]int{ - `11`: 1048, - `16`: 18314, - } - queriesToCheckOnlyNumRows = map[string]bool{ - `11`: true, - `16`: true, + maxCols int + numColsByQueryNumber = map[int]int{} + // numExpectedRowsByQueryNumber is a mapping from query number to the + // number of expected rows the query should return. For all of these + // queries, only row count is checked (i.e. we won't perform row-by-row + // check). + numExpectedRowsByQueryNumber = map[int]int{ + 11: 1048, + 16: 18314, } ) func init() { - for queryName, expectedRows := range expectedRowsByQueryName { - numColsByQueryName[queryName] = len(expectedRows[0]) - numExpectedRowsByQueryName[queryName] = len(expectedRows) + for queryNumber, expectedRows := range expectedRowsByQueryNumber { + numColsByQueryNumber[queryNumber] = len(expectedRows[0]) + numExpectedRowsByQueryNumber[queryNumber] = len(expectedRows) if len(expectedRows[0]) > maxCols { maxCols = len(expectedRows[0]) } } } -// expectedRowsByQueryName maps a query name to the expected rows for that +// expectedRowsByQueryNumber maps a query number to the expected rows for that // query. Queries 11 and 16 return 1048 and 18314 rows, respectively, so we // only verify the number of rows and these both are omitted from the map. -var expectedRowsByQueryName = map[string][][]string{ - `1`: { +var expectedRowsByQueryNumber = map[int][][]string{ + 1: { {`A`, `F`, `3.7734107e+07`, `5.65865544007299e+10`, `5.375825713486514e+10`, `5.590906522282561e+10`, `25.522005853257337`, `38273.1297346216`, `0.04998529583825443`, `1478493`}, {`N`, `F`, `991417`, `1.4875047103799965e+09`, `1.413082168054104e+09`, `1.4696492231943603e+09`, `25.516471920522985`, `38284.467760848216`, `0.05009342667419324`, `38854`}, {`N`, `O`, `7.447604e+07`, `1.1170172969773557e+11`, `1.0611823030761223e+11`, `1.1036704387249208e+11`, `25.50222676958499`, `38249.11798890675`, `0.04999658605362673`, `2920374`}, {`R`, `F`, `3.7719753e+07`, `5.656804138090447e+10`, `5.374129268460378e+10`, `5.588961911982966e+10`, `25.50579361269077`, `38250.85462610268`, `0.050009405829983596`, `1478870`}, }, - `2`: { + 2: { {`9938.53`, `Supplier#000005359`, `UNITED KINGDOM`, `185358`, `Manufacturer#4`, `QKuHYh,vZGiwu2FWEJoLDx04`, `33-429-790-6131`, `uriously regular requests hag`}, {`9937.84`, `Supplier#000005969`, `ROMANIA`, `108438`, `Manufacturer#1`, `ANDENSOSmk,miq23Xfb5RWt6dvUcvt6Qa`, `29-520-692-3537`, `efully express instructions. regular requests against the slyly fin`}, {`9936.22`, `Supplier#000005250`, `UNITED KINGDOM`, `249`, `Manufacturer#4`, `B3rqp0xbSEim4Mpy2RH J`, `33-320-228-2957`, `etect about the furiously final accounts. slyly ironic pinto beans sleep inside the furiously`}, @@ -145,7 +145,7 @@ var expectedRowsByQueryName = map[string][][]string{ {`7850.66`, `Supplier#000001518`, `UNITED KINGDOM`, `86501`, `Manufacturer#1`, `ONda3YJiHKJOC`, `33-730-383-3892`, `ifts haggle fluffily pending pai`}, {`7843.52`, `Supplier#000006683`, `FRANCE`, `11680`, `Manufacturer#4`, `2Z0JGkiv01Y00oCFwUGfviIbhzCdy`, `16-464-517-8943`, ` express, final pinto beans x-ray slyly asymptotes. unusual, unusual`}, }, - `3`: { + 3: { {`2456423`, `406181.0111`, `1995-03-05 00:00:00 +0000 +0000`, `0`}, {`3459808`, `405838.69889999996`, `1995-03-04 00:00:00 +0000 +0000`, `0`}, {`492164`, `390324.061`, `1995-02-19 00:00:00 +0000 +0000`, `0`}, @@ -157,34 +157,34 @@ var expectedRowsByQueryName = map[string][][]string{ {`993600`, `371407.4595`, `1995-03-05 00:00:00 +0000 +0000`, `0`}, {`2300070`, `367371.1452000001`, `1995-03-13 00:00:00 +0000 +0000`, `0`}, }, - `4`: { + 4: { {`1-URGENT`, `10594`}, {`2-HIGH`, `10476`}, {`3-MEDIUM`, `10410`}, {`4-NOT SPECIFIED`, `10556`}, {`5-LOW`, `10487`}, }, - `5`: { + 5: { {`INDONESIA`, `5.5502041169699945e+07`}, {`VIETNAM`, `5.529508699669996e+07`}, {`CHINA`, `5.372449425659997e+07`}, {`INDIA`, `5.203551200020005e+07`}, {`JAPAN`, `4.5410175695400015e+07`}, }, - `6`: { + 6: { {`1.2314107822829871e+08`}, }, - `7`: { + 7: { {`FRANCE`, `GERMANY`, `1995`, `5.463973273359995e+07`}, {`FRANCE`, `GERMANY`, `1996`, `5.463308330759997e+07`}, {`GERMANY`, `FRANCE`, `1995`, `5.253174666969997e+07`}, {`GERMANY`, `FRANCE`, `1996`, `5.252054902239985e+07`}, }, - `8`: { + 8: { {`1995`, `0.03443589040665483`}, {`1996`, `0.04148552129353034`}, }, - `9`: { + 9: { {`ALGERIA`, `1998`, `2.713690018030001e+07`}, {`ALGERIA`, `1997`, `4.861183349620003e+07`}, {`ALGERIA`, `1996`, `4.828548267819995e+07`}, @@ -361,7 +361,7 @@ var expectedRowsByQueryName = map[string][][]string{ {`VIETNAM`, `1993`, `4.5352676867199965e+07`}, {`VIETNAM`, `1992`, `4.7846355648499995e+07`}, }, - `10`: { + 10: { {`57040`, `Customer#000057040`, `734235.2455000001`, `632.87`, `JAPAN`, `Eioyzjf4pp`, `22-895-641-3466`, `sits. slyly regular requests sleep alongside of the regular inst`}, {`143347`, `Customer#000143347`, `721002.6947999999`, `2557.47`, `EGYPT`, `1aReFYv,Kw4`, `14-742-935-3718`, `ggle carefully enticing requests. final deposits use bold, bold pinto beans. ironic, idle re`}, {`60838`, `Customer#000060838`, `679127.3077000001`, `2454.77`, `BRAZIL`, `64EaJ5vMAHWJlBOxJklpNc2RJiWE`, `12-913-494-9813`, ` need to boost against the slyly regular account`}, @@ -384,11 +384,11 @@ var expectedRowsByQueryName = map[string][][]string{ {`23431`, `Customer#000023431`, `554269.536`, `3381.86`, `ROMANIA`, `HgiV0phqhaIa9aydNoIlb`, `29-915-458-2654`, `nusual, even instructions: furiously stealthy n`}, }, // Query 11 returns 1048 rows, so we verify only the number of rows returned. - `12`: { + 12: { {`MAIL`, `6202`, `9324`}, {`SHIP`, `6200`, `9262`}, }, - `13`: { + 13: { {`0`, `50005`}, {`9`, `6641`}, {`10`, `6532`}, @@ -432,17 +432,17 @@ var expectedRowsByQueryName = map[string][][]string{ {`41`, `2`}, {`39`, `1`}, }, - `14`: { + 14: { {`16.380778626395557`}, }, - `15`: { + 15: { {`8449`, `Supplier#000008449`, `Wp34zim9qYFbVctdW`, `20-469-856-8873`, `1.7726272086999996e+06`}, }, // Query 16 returns 18314 rows, so we verify only the number of rows returned. - `17`: { + 17: { {`348406.05428571376`}, }, - `18`: { + 18: { {`Customer#000128120`, `128120`, `4722021`, `1994-04-07 00:00:00 +0000 +0000`, `544089.09`, `323`}, {`Customer#000144617`, `144617`, `3043270`, `1997-02-12 00:00:00 +0000 +0000`, `530604.44`, `317`}, {`Customer#000013940`, `13940`, `2232932`, `1997-04-13 00:00:00 +0000 +0000`, `522720.61`, `304`}, @@ -501,10 +501,10 @@ var expectedRowsByQueryName = map[string][][]string{ {`Customer#000082441`, `82441`, `857959`, `1994-02-07 00:00:00 +0000 +0000`, `382579.74`, `305`}, {`Customer#000088703`, `88703`, `2995076`, `1994-01-30 00:00:00 +0000 +0000`, `363812.12`, `302`}, }, - `19`: { + 19: { {`3.0838430578e+06`}, }, - `20`: { + 20: { {`Supplier#000000020`, `iybAE,RmTymrZVYaFZva2SH,j`}, {`Supplier#000000091`, `YV45D7TkfdQanOOZ7q9QxkyGUapU1oOWU6q3`}, {`Supplier#000000205`, `rF uV8d0JNEk`}, @@ -692,7 +692,7 @@ var expectedRowsByQueryName = map[string][][]string{ {`Supplier#000009899`, `7XdpAHrzr1t,UQFZE`}, {`Supplier#000009974`, `7wJ,J5DKcxSU4Kp1cQLpbcAvB5AsvKT`}, }, - `21`: { + 21: { {`Supplier#000002829`, `20`}, {`Supplier#000005808`, `18`}, {`Supplier#000000262`, `17`}, @@ -794,7 +794,7 @@ var expectedRowsByQueryName = map[string][][]string{ {`Supplier#000002357`, `12`}, {`Supplier#000002483`, `12`}, }, - `22`: { + 22: { {`13`, `888`, `6.737713990000005e+06`}, {`17`, `861`, `6.460573720000007e+06`}, {`18`, `964`, `7.236687400000006e+06`}, diff --git a/pkg/workload/tpch/queries.go b/pkg/workload/tpch/queries.go index 4523566a5f9d..2fb2a24e0674 100644 --- a/pkg/workload/tpch/queries.go +++ b/pkg/workload/tpch/queries.go @@ -10,30 +10,37 @@ package tpch -var queriesByName = map[string]string{ - `1`: query1, - `2`: query2, - `3`: query3, - `4`: query4, - `5`: query5, - `6`: query6, - `7`: query7, - `8`: query8, - `9`: query9, - `10`: query10, - `11`: query11, - `12`: query12, - `13`: query13, - `14`: query14, - `15`: query15, - `16`: query16, - `17`: query17, - `18`: query18, - `19`: query19, - `20`: query20, - `21`: query21, - `22`: query22, -} +var ( + // QueriesByNumber is a mapping from the number of a TPC-H query to the actual + // query. + QueriesByNumber = map[int]string{ + 1: query1, + 2: query2, + 3: query3, + 4: query4, + 5: query5, + 6: query6, + 7: query7, + 8: query8, + 9: query9, + 10: query10, + 11: query11, + 12: query12, + 13: query13, + 14: query14, + 15: query15, + 16: query16, + 17: query17, + 18: query18, + 19: query19, + 20: query20, + 21: query21, + 22: query22, + } + + // NumQueries specifies the number of queries in TPC-H benchmark. + NumQueries = len(QueriesByNumber) +) const ( query1 = ` diff --git a/pkg/workload/tpch/tpch.go b/pkg/workload/tpch/tpch.go index d22c53692382..8559d0050e35 100644 --- a/pkg/workload/tpch/tpch.go +++ b/pkg/workload/tpch/tpch.go @@ -66,7 +66,7 @@ type tpch struct { verbose bool queriesRaw string - selectedQueries []string + selectedQueries []int textPool textPool localsPool *sync.Pool @@ -130,10 +130,14 @@ func (w *tpch) Hooks() workload.Hooks { w.disableChecks = true } for _, queryName := range strings.Split(w.queriesRaw, `,`) { - if _, ok := queriesByName[queryName]; !ok { + queryNum, err := strconv.Atoi(queryName) + if err != nil { + return err + } + if _, ok := QueriesByNumber[queryNum]; !ok { return errors.Errorf(`unknown query: %s`, queryName) } - w.selectedQueries = append(w.selectedQueries, queryName) + w.selectedQueries = append(w.selectedQueries, queryNum) } return nil }, @@ -325,10 +329,10 @@ type worker struct { } func (w *worker) run(ctx context.Context) error { - queryName := w.config.selectedQueries[w.ops%len(w.config.selectedQueries)] + queryNum := w.config.selectedQueries[w.ops%len(w.config.selectedQueries)] w.ops++ - query := fmt.Sprintf("SET vectorize = '%s'; %s", w.config.vectorize, queriesByName[queryName]) + query := fmt.Sprintf("SET vectorize = '%s'; %s", w.config.vectorize, QueriesByNumber[queryNum]) vals := make([]interface{}, maxCols) for i := range vals { @@ -341,7 +345,7 @@ func (w *worker) run(ctx context.Context) error { defer rows.Close() } if err != nil { - return errors.Errorf("[q%s]: %s", queryName, err) + return errors.Errorf("[q%d]: %s", queryNum, err) } var numRows int // NOTE: we should *NOT* return an error from this function right away @@ -350,12 +354,12 @@ func (w *worker) run(ctx context.Context) error { checkExpectedOutput := func() error { for rows.Next() { if !w.config.disableChecks { - if !queriesToCheckOnlyNumRows[queryName] { - if err = rows.Scan(vals[:numColsByQueryName[queryName]]...); err != nil { - return errors.Errorf("[q%s]: %s", queryName, err) + if _, checkOnlyRowCount := numExpectedRowsByQueryNumber[queryNum]; !checkOnlyRowCount { + if err = rows.Scan(vals[:numColsByQueryNumber[queryNum]]...); err != nil { + return errors.Errorf("[q%d]: %s", queryNum, err) } - expectedRow := expectedRowsByQueryName[queryName][numRows] + expectedRow := expectedRowsByQueryNumber[queryNum][numRows] for i, expectedValue := range expectedRow { if val := *vals[i].(*interface{}); val != nil { var actualValue string @@ -372,15 +376,15 @@ func (w *worker) run(ctx context.Context) error { var expectedFloatRounded, actualFloatRounded float64 expectedFloat, err = strconv.ParseFloat(expectedValue, 64) if err != nil { - return errors.Errorf("[q%s] failed parsing expected value as float64 with %s\n"+ + return errors.Errorf("[q%d] failed parsing expected value as float64 with %s\n"+ "wrong result in row %d in column %d: got %q, expected %q", - queryName, err, numRows, i, actualValue, expectedValue) + queryNum, err, numRows, i, actualValue, expectedValue) } actualFloat, err = strconv.ParseFloat(actualValue, 64) if err != nil { - return errors.Errorf("[q%s] failed parsing actual value as float64 with %s\n"+ + return errors.Errorf("[q%d] failed parsing actual value as float64 with %s\n"+ "wrong result in row %d in column %d: got %q, expected %q", - queryName, err, numRows, i, actualValue, expectedValue) + queryNum, err, numRows, i, actualValue, expectedValue) } // TPC-H spec requires 0.01 precision for DECIMALs, so we will // first round the values to use in the comparison. Note that we @@ -392,15 +396,15 @@ func (w *worker) run(ctx context.Context) error { // 0.01). expectedFloatRounded, err = strconv.ParseFloat(fmt.Sprintf("%.3f", expectedFloat), 64) if err != nil { - return errors.Errorf("[q%s] failed parsing rounded expected value as float64 with %s\n"+ + return errors.Errorf("[q%d] failed parsing rounded expected value as float64 with %s\n"+ "wrong result in row %d in column %d: got %q, expected %q", - queryName, err, numRows, i, actualValue, expectedValue) + queryNum, err, numRows, i, actualValue, expectedValue) } actualFloatRounded, err = strconv.ParseFloat(fmt.Sprintf("%.3f", actualFloat), 64) if err != nil { - return errors.Errorf("[q%s] failed parsing rounded actual value as float64 with %s\n"+ + return errors.Errorf("[q%d] failed parsing rounded actual value as float64 with %s\n"+ "wrong result in row %d in column %d: got %q, expected %q", - queryName, err, numRows, i, actualValue, expectedValue) + queryNum, err, numRows, i, actualValue, expectedValue) } if math.Abs(expectedFloatRounded-actualFloatRounded) > 0.02 { // We only fail the check if the difference is more than 0.02 @@ -412,9 +416,9 @@ func (w *worker) run(ctx context.Context) error { // "ideal" - expected < 0.01 && actual - "ideal" < 0.01 // so in the worst case, actual and expected might differ by // 0.02 and still be considered correct. - return errors.Errorf("[q%s] %f and %f differ by more than 0.02\n"+ + return errors.Errorf("[q%d] %f and %f differ by more than 0.02\n"+ "wrong result in row %d in column %d: got %q, expected %q", - queryName, actualFloatRounded, expectedFloatRounded, + queryNum, actualFloatRounded, expectedFloatRounded, numRows, i, actualValue, expectedValue) } } @@ -437,33 +441,34 @@ func (w *worker) run(ctx context.Context) error { // We first check whether there is any error that came from the server (for // example, an out of memory error). If there is, we return it. if err := rows.Err(); err != nil { - return errors.Errorf("[q%s]: %s", queryName, err) + return errors.Errorf("[q%d]: %s", queryNum, err) } // Now we check whether there was an error while consuming the rows. if expectedOutputError != nil { return wrongOutputError{error: expectedOutputError} } if !w.config.disableChecks { - if numRows != numExpectedRowsByQueryName[queryName] { + numRowsExpected, checkOnlyRowCount := numExpectedRowsByQueryNumber[queryNum] + if checkOnlyRowCount && numRows != numRowsExpected { return wrongOutputError{ error: errors.Errorf( - "[q%s] returned wrong number of rows: got %d, expected %d", - queryName, numRows, numExpectedRowsByQueryName[queryName], + "[q%d] returned wrong number of rows: got %d, expected %d", + queryNum, numRows, numRowsExpected, )} } } elapsed := timeutil.Since(start) if w.config.verbose { - w.hists.Get(queryName).Record(elapsed) + w.hists.Get(fmt.Sprintf("%d", queryNum)).Record(elapsed) // Note: if you are changing the output format here, please change the // regex in roachtest/tpchvec.go accordingly. - log.Infof(ctx, "[q%s] returned %d rows after %4.2f seconds:\n%s", - queryName, numRows, elapsed.Seconds(), query) + log.Infof(ctx, "[q%d] returned %d rows after %4.2f seconds:\n%s", + queryNum, numRows, elapsed.Seconds(), query) } else { // Note: if you are changing the output format here, please change the // regex in roachtest/tpchvec.go accordingly. - log.Infof(ctx, "[q%s] returned %d rows after %4.2f seconds", - queryName, numRows, elapsed.Seconds()) + log.Infof(ctx, "[q%d] returned %d rows after %4.2f seconds", + queryNum, numRows, elapsed.Seconds()) } return nil }