diff --git a/pkg/kv/kvserver/consistency_queue_test.go b/pkg/kv/kvserver/consistency_queue_test.go index ca3a6e74753e..35c63dc1cac0 100644 --- a/pkg/kv/kvserver/consistency_queue_test.go +++ b/pkg/kv/kvserver/consistency_queue_test.go @@ -254,15 +254,11 @@ func TestCheckConsistencyInconsistent(t *testing.T) { testKnobs.ConsistencyTestingKnobs.BadChecksumReportDiff = func(s roachpb.StoreIdent, diff kvserver.ReplicaSnapshotDiffSlice) { rangeDesc := tc.LookupRangeOrFatal(t, diffKey) - repl, pErr := tc.FindRangeLeaseHolder(rangeDesc, nil) - if pErr != nil { - t.Fatal(pErr) - } + repl, err := tc.FindRangeLeaseHolder(rangeDesc, nil) + require.NoError(t, err) // Servers start at 0, but NodeID starts at 1. - store, pErr := tc.Servers[repl.NodeID-1].Stores().GetStore(repl.StoreID) - if pErr != nil { - t.Fatal(pErr) - } + store, err := tc.Servers[repl.NodeID-1].Stores().GetStore(repl.StoreID) + require.NoError(t, err) if s != *store.Ident { t.Errorf("BadChecksumReportDiff called from follower (StoreIdent = %v)", s) return @@ -347,18 +343,14 @@ func TestCheckConsistencyInconsistent(t *testing.T) { Mode: roachpb.ChecksumMode_CHECK_VIA_QUEUE, } resp, err := kv.SendWrapped(context.Background(), store.DB().NonTransactionalSender(), &checkArgs) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err.GoError()) return resp.(*roachpb.CheckConsistencyResponse) } onDiskCheckpointPaths := func(nodeIdx int) []string { - fs, pErr := stickyEngineRegistry.GetUnderlyingFS( + fs, err := stickyEngineRegistry.GetUnderlyingFS( base.StoreSpec{StickyInMemoryEngineID: strconv.FormatInt(int64(nodeIdx), 10)}) - if pErr != nil { - t.Fatal(pErr) - } + require.NoError(t, err) store := tc.GetFirstStoreFromServer(t, nodeIdx) checkpointPath := filepath.Join(store.Engine().GetAuxiliaryDir(), "checkpoints") checkpoints, _ := fs.List(checkpointPath) @@ -391,11 +383,8 @@ func TestCheckConsistencyInconsistent(t *testing.T) { var val roachpb.Value val.SetInt(42) diffTimestamp = tc.Server(0).Clock().Now() - if err := storage.MVCCPut( - context.Background(), store1.Engine(), nil, diffKey, diffTimestamp, hlc.ClockTimestamp{}, val, nil, - ); err != nil { - t.Fatal(err) - } + require.NoError(t, storage.MVCCPut(context.Background(), store1.Engine(), nil, + diffKey, diffTimestamp, hlc.ClockTimestamp{}, val, nil)) // Run consistency check again, this time it should find something. resp := runConsistencyCheck() @@ -518,9 +507,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { RequestHeader: roachpb.RequestHeader{Key: key}, DryRun: true, }) - if err := db.Run(ctx, &b); err != nil { - t.Fatal(err) - } + require.NoError(t, db.Run(ctx, &b)) resp := b.RawResponse().Responses[0].GetInner().(*roachpb.RecomputeStatsResponse) delta := enginepb.MVCCStats(resp.AddedDelta) delta.AgeTo(0) @@ -536,9 +523,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { // Split off a range so that we get away from the timeseries writes, which // pollute the stats with ContainsEstimates=true. Note that the split clears // the right hand side (which is what we operate on) from that flag. - if err := db0.AdminSplit(ctx, key, hlc.MaxTimestamp /* expirationTime */); err != nil { - t.Fatal(err) - } + require.NoError(t, db0.AdminSplit(ctx, key, hlc.MaxTimestamp /* expirationTime */)) delta := computeDelta(db0) @@ -547,9 +532,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { } rangeDesc, err := tc.LookupRange(key) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return rangeDesc.RangeID }() @@ -561,16 +544,12 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { storage.Filesystem(path), storage.CacheSize(1<<20 /* 1 MiB */), storage.MustExist) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) defer eng.Close() rsl := stateloader.Make(rangeID) ms, err := rsl.LoadMVCCStats(ctx, eng) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) // Put some garbage in the stats that we're hoping the consistency queue will // trigger a removal of via RecomputeStats. SysCount was chosen because it is @@ -585,9 +564,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { // Overwrite with the new stats; remember that this range hasn't upreplicated, // so the consistency checker won't see any replica divergence when it runs, // but it should definitely see that its recomputed stats mismatch. - if err := rsl.SetMVCCStats(ctx, eng, &ms); err != nil { - t.Fatal(err) - } + require.NoError(t, rsl.SetMVCCStats(ctx, eng, &ms)) }() // Now that we've tampered with the stats, restart the cluster and extend it @@ -618,10 +595,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { // can starve the actual work to be done. done := time.After(5 * time.Second) for { - if err := db0.Put(ctx, fmt.Sprintf("%s%d", key, rand.Int63()), "ballast"); err != nil { - t.Error(err) - } - + require.NoError(t, db0.Put(ctx, fmt.Sprintf("%s%d", key, rand.Int63()), "ballast")) select { case <-ctx.Done(): return @@ -644,16 +618,12 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) { // Force a run of the consistency queue, otherwise it might take a while. store := tc.GetFirstStoreFromServer(t, 0) - if err := store.ForceConsistencyQueueProcess(); err != nil { - t.Fatal(err) - } + require.NoError(t, store.ForceConsistencyQueueProcess()) // The stats should magically repair themselves. We'll first do a quick check // and then a full recomputation. repl, _, err := tc.Servers[0].Stores().GetReplicaForRangeID(ctx, rangeID) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) ms := repl.GetMVCCStats() if ms.SysCount >= sysCountGarbage { t.Fatalf("still have a SysCount of %d", ms.SysCount)