Skip to content

Commit

Permalink
kvserver: use require package in tests
Browse files Browse the repository at this point in the history
Release note: None
  • Loading branch information
pav-kv committed Oct 3, 2022
1 parent 37e05b0 commit 339f165
Showing 1 changed file with 18 additions and 48 deletions.
66 changes: 18 additions & 48 deletions pkg/kv/kvserver/consistency_queue_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,15 +254,11 @@ func TestCheckConsistencyInconsistent(t *testing.T) {
testKnobs.ConsistencyTestingKnobs.BadChecksumReportDiff =
func(s roachpb.StoreIdent, diff kvserver.ReplicaSnapshotDiffSlice) {
rangeDesc := tc.LookupRangeOrFatal(t, diffKey)
repl, pErr := tc.FindRangeLeaseHolder(rangeDesc, nil)
if pErr != nil {
t.Fatal(pErr)
}
repl, err := tc.FindRangeLeaseHolder(rangeDesc, nil)
require.NoError(t, err)
// Servers start at 0, but NodeID starts at 1.
store, pErr := tc.Servers[repl.NodeID-1].Stores().GetStore(repl.StoreID)
if pErr != nil {
t.Fatal(pErr)
}
store, err := tc.Servers[repl.NodeID-1].Stores().GetStore(repl.StoreID)
require.NoError(t, err)
if s != *store.Ident {
t.Errorf("BadChecksumReportDiff called from follower (StoreIdent = %v)", s)
return
Expand Down Expand Up @@ -347,18 +343,14 @@ func TestCheckConsistencyInconsistent(t *testing.T) {
Mode: roachpb.ChecksumMode_CHECK_VIA_QUEUE,
}
resp, err := kv.SendWrapped(context.Background(), store.DB().NonTransactionalSender(), &checkArgs)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err.GoError())
return resp.(*roachpb.CheckConsistencyResponse)
}

onDiskCheckpointPaths := func(nodeIdx int) []string {
fs, pErr := stickyEngineRegistry.GetUnderlyingFS(
fs, err := stickyEngineRegistry.GetUnderlyingFS(
base.StoreSpec{StickyInMemoryEngineID: strconv.FormatInt(int64(nodeIdx), 10)})
if pErr != nil {
t.Fatal(pErr)
}
require.NoError(t, err)
store := tc.GetFirstStoreFromServer(t, nodeIdx)
checkpointPath := filepath.Join(store.Engine().GetAuxiliaryDir(), "checkpoints")
checkpoints, _ := fs.List(checkpointPath)
Expand Down Expand Up @@ -391,11 +383,8 @@ func TestCheckConsistencyInconsistent(t *testing.T) {
var val roachpb.Value
val.SetInt(42)
diffTimestamp = tc.Server(0).Clock().Now()
if err := storage.MVCCPut(
context.Background(), store1.Engine(), nil, diffKey, diffTimestamp, hlc.ClockTimestamp{}, val, nil,
); err != nil {
t.Fatal(err)
}
require.NoError(t, storage.MVCCPut(context.Background(), store1.Engine(), nil,
diffKey, diffTimestamp, hlc.ClockTimestamp{}, val, nil))

// Run consistency check again, this time it should find something.
resp := runConsistencyCheck()
Expand Down Expand Up @@ -518,9 +507,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) {
RequestHeader: roachpb.RequestHeader{Key: key},
DryRun: true,
})
if err := db.Run(ctx, &b); err != nil {
t.Fatal(err)
}
require.NoError(t, db.Run(ctx, &b))
resp := b.RawResponse().Responses[0].GetInner().(*roachpb.RecomputeStatsResponse)
delta := enginepb.MVCCStats(resp.AddedDelta)
delta.AgeTo(0)
Expand All @@ -536,9 +523,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) {
// Split off a range so that we get away from the timeseries writes, which
// pollute the stats with ContainsEstimates=true. Note that the split clears
// the right hand side (which is what we operate on) from that flag.
if err := db0.AdminSplit(ctx, key, hlc.MaxTimestamp /* expirationTime */); err != nil {
t.Fatal(err)
}
require.NoError(t, db0.AdminSplit(ctx, key, hlc.MaxTimestamp /* expirationTime */))

delta := computeDelta(db0)

Expand All @@ -547,9 +532,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) {
}

rangeDesc, err := tc.LookupRange(key)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)

return rangeDesc.RangeID
}()
Expand All @@ -561,16 +544,12 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) {
storage.Filesystem(path),
storage.CacheSize(1<<20 /* 1 MiB */),
storage.MustExist)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
defer eng.Close()

rsl := stateloader.Make(rangeID)
ms, err := rsl.LoadMVCCStats(ctx, eng)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)

// Put some garbage in the stats that we're hoping the consistency queue will
// trigger a removal of via RecomputeStats. SysCount was chosen because it is
Expand All @@ -585,9 +564,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) {
// Overwrite with the new stats; remember that this range hasn't upreplicated,
// so the consistency checker won't see any replica divergence when it runs,
// but it should definitely see that its recomputed stats mismatch.
if err := rsl.SetMVCCStats(ctx, eng, &ms); err != nil {
t.Fatal(err)
}
require.NoError(t, rsl.SetMVCCStats(ctx, eng, &ms))
}()

// Now that we've tampered with the stats, restart the cluster and extend it
Expand Down Expand Up @@ -618,10 +595,7 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) {
// can starve the actual work to be done.
done := time.After(5 * time.Second)
for {
if err := db0.Put(ctx, fmt.Sprintf("%s%d", key, rand.Int63()), "ballast"); err != nil {
t.Error(err)
}

require.NoError(t, db0.Put(ctx, fmt.Sprintf("%s%d", key, rand.Int63()), "ballast"))
select {
case <-ctx.Done():
return
Expand All @@ -644,16 +618,12 @@ func testConsistencyQueueRecomputeStatsImpl(t *testing.T, hadEstimates bool) {

// Force a run of the consistency queue, otherwise it might take a while.
store := tc.GetFirstStoreFromServer(t, 0)
if err := store.ForceConsistencyQueueProcess(); err != nil {
t.Fatal(err)
}
require.NoError(t, store.ForceConsistencyQueueProcess())

// The stats should magically repair themselves. We'll first do a quick check
// and then a full recomputation.
repl, _, err := tc.Servers[0].Stores().GetReplicaForRangeID(ctx, rangeID)
if err != nil {
t.Fatal(err)
}
require.NoError(t, err)
ms := repl.GetMVCCStats()
if ms.SysCount >= sysCountGarbage {
t.Fatalf("still have a SysCount of %d", ms.SysCount)
Expand Down

0 comments on commit 339f165

Please sign in to comment.