diff --git a/pkg/storage/client_merge_test.go b/pkg/storage/client_merge_test.go index 321126112467..b8a0156d6237 100644 --- a/pkg/storage/client_merge_test.go +++ b/pkg/storage/client_merge_test.go @@ -413,18 +413,18 @@ func TestStoreRangeMergeStats(t *testing.T) { if _, err := client.SendWrapped(context.Background(), rg1(store), &args); err != nil { t.Fatal(err) } - rngMerged := store.LookupReplica(aDesc.StartKey, nil) + replMerged := store.LookupReplica(aDesc.StartKey, nil) // Get the range stats for the merged range and verify. snap = store.Engine().NewSnapshot() defer snap.Close() - msMerged, err := engine.MVCCGetRangeStats(context.Background(), snap, rngMerged.RangeID) + msMerged, err := engine.MVCCGetRangeStats(context.Background(), snap, replMerged.RangeID) if err != nil { t.Fatal(err) } // Merged stats should agree with recomputation. - if err := verifyRecomputedStats(snap, rngMerged.Desc(), msMerged, manual.UnixNano()); err != nil { + if err := verifyRecomputedStats(snap, replMerged.Desc(), msMerged, manual.UnixNano()); err != nil { t.Errorf("failed to verify range's stats after merge: %v", err) } } diff --git a/pkg/storage/client_raft_test.go b/pkg/storage/client_raft_test.go index 69373170e888..329564d3743b 100644 --- a/pkg/storage/client_raft_test.go +++ b/pkg/storage/client_raft_test.go @@ -231,24 +231,24 @@ func TestReplicateRange(t *testing.T) { t.Fatal(err) } - rng, err := mtc.stores[0].GetReplica(1) + repl, err := mtc.stores[0].GetReplica(1) if err != nil { t.Fatal(err) } - if err := rng.ChangeReplicas( + if err := repl.ChangeReplicas( context.Background(), roachpb.ADD_REPLICA, roachpb.ReplicaDescriptor{ NodeID: mtc.stores[1].Ident.NodeID, StoreID: mtc.stores[1].Ident.StoreID, }, - rng.Desc(), + repl.Desc(), ); err != nil { t.Fatal(err) } // Verify no intent remains on range descriptor key. - key := keys.RangeDescriptorKey(rng.Desc().StartKey) + key := keys.RangeDescriptorKey(repl.Desc().StartKey) desc := roachpb.RangeDescriptor{} if ok, err := engine.MVCCGetProto(context.Background(), mtc.stores[0].Engine(), key, mtc.stores[0].Clock().Now(), true, nil, &desc); err != nil { t.Fatal(err) @@ -377,11 +377,11 @@ func TestRestoreReplicas(t *testing.T) { // Both replicas have a complete list in Desc.Replicas for i, store := range mtc.stores { - rng, err := store.GetReplica(1) + repl, err := store.GetReplica(1) if err != nil { t.Fatal(err) } - desc := rng.Desc() + desc := repl.Desc() if len(desc.Replicas) != 2 { t.Fatalf("store %d: expected 2 replicas, found %d", i, len(desc.Replicas)) } @@ -416,26 +416,26 @@ func TestFailedReplicaChange(t *testing.T) { mtc.Start(t, 2) defer mtc.Stop() - rng, err := mtc.stores[0].GetReplica(1) + repl, err := mtc.stores[0].GetReplica(1) if err != nil { t.Fatal(err) } - if err := rng.ChangeReplicas( + if err := repl.ChangeReplicas( context.Background(), roachpb.ADD_REPLICA, roachpb.ReplicaDescriptor{ NodeID: mtc.stores[1].Ident.NodeID, StoreID: mtc.stores[1].Ident.StoreID, }, - rng.Desc(), + repl.Desc(), ); !testutils.IsError(err, "boom") { t.Fatalf("did not get expected error: %v", err) } // After the aborted transaction, r.Desc was not updated. // TODO(bdarnell): expose and inspect raft's internal state. - if replicas := rng.Desc().Replicas; len(replicas) != 1 { + if replicas := repl.Desc().Replicas; len(replicas) != 1 { t.Fatalf("expected 1 replica, found %v", replicas) } @@ -447,14 +447,14 @@ func TestFailedReplicaChange(t *testing.T) { // are pushable by making the transaction abandoned. mtc.manualClock.Increment(10 * base.DefaultHeartbeatInterval.Nanoseconds()) - if err := rng.ChangeReplicas( + if err := repl.ChangeReplicas( context.Background(), roachpb.ADD_REPLICA, roachpb.ReplicaDescriptor{ NodeID: mtc.stores[1].Ident.NodeID, StoreID: mtc.stores[1].Ident.StoreID, }, - rng.Desc(), + repl.Desc(), ); err != nil { t.Fatal(err) } @@ -481,7 +481,7 @@ func TestReplicateAfterTruncation(t *testing.T) { mtc := startMultiTestContext(t, 2) defer mtc.Stop() - rng, err := mtc.stores[0].GetReplica(1) + repl, err := mtc.stores[0].GetReplica(1) if err != nil { t.Fatal(err) } @@ -493,7 +493,7 @@ func TestReplicateAfterTruncation(t *testing.T) { } // Get that command's log index. - index, err := rng.GetLastIndex() + index, err := repl.GetLastIndex() if err != nil { t.Fatal(err) } @@ -512,14 +512,14 @@ func TestReplicateAfterTruncation(t *testing.T) { } // Now add the second replica. - if err := rng.ChangeReplicas( + if err := repl.ChangeReplicas( context.Background(), roachpb.ADD_REPLICA, roachpb.ReplicaDescriptor{ NodeID: mtc.stores[1].Ident.NodeID, StoreID: mtc.stores[1].Ident.StoreID, }, - rng.Desc(), + repl.Desc(), ); err != nil { t.Fatal(err) } @@ -537,13 +537,13 @@ func TestReplicateAfterTruncation(t *testing.T) { return nil }) - rng2, err := mtc.stores[1].GetReplica(1) + repl2, err := mtc.stores[1].GetReplica(1) if err != nil { t.Fatal(err) } util.SucceedsSoon(t, func() error { - if mvcc, mvcc2 := rng.GetMVCCStats(), rng2.GetMVCCStats(); mvcc2 != mvcc { + if mvcc, mvcc2 := repl.GetMVCCStats(), repl2.GetMVCCStats(); mvcc2 != mvcc { return errors.Errorf("expected stats on new range:\n%+v\not equal old:\n%+v", mvcc2, mvcc) } return nil @@ -576,7 +576,7 @@ func TestSnapshotAfterTruncation(t *testing.T) { defer leaktest.AfterTest(t)() mtc := startMultiTestContext(t, 3) defer mtc.Stop() - rng, err := mtc.stores[0].GetReplica(1) + repl, err := mtc.stores[0].GetReplica(1) if err != nil { t.Fatal(err) } @@ -609,7 +609,7 @@ func TestSnapshotAfterTruncation(t *testing.T) { mtc.waitForValues(key, []int64{incAB, incA, incAB}) - index, err := rng.GetLastIndex() + index, err := repl.GetLastIndex() if err != nil { t.Fatal(err) } @@ -681,7 +681,7 @@ func TestConcurrentRaftSnapshots(t *testing.T) { defer leaktest.AfterTest(t)() mtc := startMultiTestContext(t, 5) defer mtc.Stop() - rng, err := mtc.stores[0].GetReplica(1) + repl, err := mtc.stores[0].GetReplica(1) if err != nil { t.Fatal(err) } @@ -715,7 +715,7 @@ func TestConcurrentRaftSnapshots(t *testing.T) { mtc.waitForValues(key, []int64{incAB, incA, incA, incAB, incAB}) - index, err := rng.GetLastIndex() + index, err := repl.GetLastIndex() if err != nil { t.Fatal(err) } @@ -857,11 +857,11 @@ func TestRefreshPendingCommands(t *testing.T) { } // Get the last increment's log index. - rng, err := mtc.stores[0].GetReplica(1) + repl, err := mtc.stores[0].GetReplica(1) if err != nil { t.Fatal(err) } - index, err := rng.GetLastIndex() + index, err := repl.GetLastIndex() if err != nil { t.Fatal(err) } @@ -1364,11 +1364,11 @@ func runReplicateRestartAfterTruncation(t *testing.T, removeBeforeTruncateAndReA // Truncate the logs. { // Get the last increment's log index. - rng, err := mtc.stores[0].GetReplica(rangeID) + repl, err := mtc.stores[0].GetReplica(rangeID) if err != nil { t.Fatal(err) } - index, err := rng.GetLastIndex() + index, err := repl.GetLastIndex() if err != nil { t.Fatal(err) } @@ -1812,14 +1812,14 @@ func TestRangeDescriptorSnapshotRace(t *testing.T) { defer stopper.Stop() // Call Snapshot() in a loop and ensure it never fails. work := func(key roachpb.RKey) error { - rng := mtc.stores[0].LookupReplica(key, nil) - if rng == nil { + repl := mtc.stores[0].LookupReplica(key, nil) + if repl == nil { return errors.Errorf("failed to look up replica for %s", key) } - if _, err := rng.GetSnapshot(context.Background()); err != nil { - return errors.Wrapf(err, "failed to snapshot range %s: %s", rng, key) + if _, err := repl.GetSnapshot(context.Background()); err != nil { + return errors.Wrapf(err, "failed to snapshot range %s: %s", repl, key) } - rng.CloseOutSnap() + repl.CloseOutSnap() return nil } @@ -1844,26 +1844,26 @@ func TestRangeDescriptorSnapshotRace(t *testing.T) { // initial range. The bug that this test was designed to find // usually occurred within the first 5 iterations. for i := 20; i > 0; i-- { - rng := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil) - if rng == nil { + repl := mtc.stores[0].LookupReplica(roachpb.RKeyMin, nil) + if repl == nil { t.Fatal("failed to look up min range") } - desc := rng.Desc() + desc := repl.Desc() args := adminSplitArgs(roachpb.KeyMin, []byte(fmt.Sprintf("A%03d", i))) - if _, err := rng.AdminSplit(context.Background(), args, desc); err != nil { + if _, err := repl.AdminSplit(context.Background(), args, desc); err != nil { t.Fatal(err) } } // Split again, carving chunks off the beginning of the final range. for i := 0; i < 20; i++ { - rng := mtc.stores[0].LookupReplica(roachpb.RKey("Z"), nil) - if rng == nil { + repl := mtc.stores[0].LookupReplica(roachpb.RKey("Z"), nil) + if repl == nil { t.Fatal("failed to look up max range") } - desc := rng.Desc() + desc := repl.Desc() args := adminSplitArgs(roachpb.KeyMin, []byte(fmt.Sprintf("B%03d", i))) - if _, err := rng.AdminSplit(context.Background(), args, desc); err != nil { + if _, err := repl.AdminSplit(context.Background(), args, desc); err != nil { t.Fatal(err) } } @@ -2715,18 +2715,18 @@ func TestTransferRaftLeadership(t *testing.T) { } } - rng := store0.LookupReplica(keys.MustAddr(key), nil) - if rng == nil { + repl := store0.LookupReplica(keys.MustAddr(key), nil) + if repl == nil { t.Fatalf("no replica found for key '%s'", key) } - mtc.replicateRange(rng.RangeID, 1, 2) + mtc.replicateRange(repl.RangeID, 1, 2) getArgs := getArgs([]byte("a")) - if _, pErr := client.SendWrappedWith(context.Background(), store0, roachpb.Header{RangeID: rng.RangeID}, &getArgs); pErr != nil { + if _, pErr := client.SendWrappedWith(context.Background(), store0, roachpb.Header{RangeID: repl.RangeID}, &getArgs); pErr != nil { t.Fatalf("expect get nil, actual get %v ", pErr) } - status := rng.RaftStatus() + status := repl.RaftStatus() if status != nil && status.Lead != 1 { t.Fatalf("raft leader should be 1, but got status %+v", status) } @@ -2739,7 +2739,7 @@ func TestTransferRaftLeadership(t *testing.T) { if _, pErr := client.SendWrappedWith( context.Background(), store1, - roachpb.Header{RangeID: rng.RangeID}, + roachpb.Header{RangeID: repl.RangeID}, &getArgs, ); pErr == nil { break @@ -2753,7 +2753,7 @@ func TestTransferRaftLeadership(t *testing.T) { } // Wait for raft leadership transferring to be finished. util.SucceedsSoon(t, func() error { - status = rng.RaftStatus() + status = repl.RaftStatus() if status.Lead != 2 { return errors.Errorf("expected raft leader be 2; got %d", status.Lead) } diff --git a/pkg/storage/client_split_test.go b/pkg/storage/client_split_test.go index b98354920fad..04a6bd745e5e 100644 --- a/pkg/storage/client_split_test.go +++ b/pkg/storage/client_split_test.go @@ -174,21 +174,21 @@ func TestStoreRangeSplitInsideRow(t *testing.T) { t.Fatalf("%s: split unexpected error: %s", col1Key, err) } - rng1 := store.LookupReplica(col1Key, nil) - rng2 := store.LookupReplica(col2Key, nil) + repl1 := store.LookupReplica(col1Key, nil) + repl2 := store.LookupReplica(col2Key, nil) // Verify the two columns are still on the same range. - if !reflect.DeepEqual(rng1, rng2) { - t.Fatalf("%s: ranges differ: %+v vs %+v", roachpb.Key(col1Key), rng1, rng2) + if !reflect.DeepEqual(repl1, repl2) { + t.Fatalf("%s: ranges differ: %+v vs %+v", roachpb.Key(col1Key), repl1, repl2) } // Verify we split on a row key. - if startKey := rng1.Desc().StartKey; !startKey.Equal(rowKey) { + if startKey := repl1.Desc().StartKey; !startKey.Equal(rowKey) { t.Fatalf("%s: expected split on %s, but found %s", roachpb.Key(col1Key), roachpb.Key(rowKey), startKey) } // Verify the previous range was split on a row key. - rng3 := store.LookupReplica(tableKey, nil) - if endKey := rng3.Desc().EndKey; !endKey.Equal(rowKey) { + repl3 := store.LookupReplica(tableKey, nil) + if endKey := repl3.Desc().EndKey; !endKey.Equal(rowKey) { t.Fatalf("%s: expected split on %s, but found %s", roachpb.Key(col1Key), roachpb.Key(rowKey), endKey) } @@ -399,8 +399,8 @@ func TestStoreRangeSplitIdempotency(t *testing.T) { } } - rng := store.LookupReplica(roachpb.RKeyMin, nil) - rngDesc := rng.Desc() + repl := store.LookupReplica(roachpb.RKeyMin, nil) + rngDesc := repl.Desc() newRng := store.LookupReplica([]byte("m"), nil) newRngDesc := newRng.Desc() if !bytes.Equal(newRngDesc.StartKey, splitKey) || !bytes.Equal(splitKey, rngDesc.EndKey) { @@ -497,28 +497,28 @@ func TestStoreRangeSplitStats(t *testing.T) { t.Fatal(pErr) } // Verify empty range has empty stats. - rng := store.LookupReplica(keyPrefix, nil) + repl := store.LookupReplica(keyPrefix, nil) // NOTE that this value is expected to change over time, depending on what // we store in the sys-local keyspace. Update it accordingly for this test. empty := enginepb.MVCCStats{LastUpdateNanos: manual.UnixNano()} - if err := verifyRangeStats(store.Engine(), rng.RangeID, empty); err != nil { + if err := verifyRangeStats(store.Engine(), repl.RangeID, empty); err != nil { t.Fatal(err) } // Write random data. - midKey := writeRandomDataToRange(t, store, rng.RangeID, keyPrefix) + midKey := writeRandomDataToRange(t, store, repl.RangeID, keyPrefix) // Get the range stats now that we have data. snap := store.Engine().NewSnapshot() defer snap.Close() - ms, err := engine.MVCCGetRangeStats(context.Background(), snap, rng.RangeID) + ms, err := engine.MVCCGetRangeStats(context.Background(), snap, repl.RangeID) if err != nil { t.Fatal(err) } - if err := verifyRecomputedStats(snap, rng.Desc(), ms, manual.UnixNano()); err != nil { + if err := verifyRecomputedStats(snap, repl.Desc(), ms, manual.UnixNano()); err != nil { t.Fatalf("failed to verify range's stats before split: %v", err) } - if inMemMS := rng.GetMVCCStats(); inMemMS != ms { + if inMemMS := repl.GetMVCCStats(); inMemMS != ms { t.Fatalf("in-memory and on-disk diverged:\n%+v\n!=\n%+v", inMemMS, ms) } @@ -527,19 +527,19 @@ func TestStoreRangeSplitStats(t *testing.T) { // Split the range at approximate halfway point. args = adminSplitArgs(keyPrefix, midKey) if _, pErr := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{ - RangeID: rng.RangeID, + RangeID: repl.RangeID, }, &args); pErr != nil { t.Fatal(pErr) } snap = store.Engine().NewSnapshot() defer snap.Close() - msLeft, err := engine.MVCCGetRangeStats(context.Background(), snap, rng.RangeID) + msLeft, err := engine.MVCCGetRangeStats(context.Background(), snap, repl.RangeID) if err != nil { t.Fatal(err) } - rngRight := store.LookupReplica(midKey, nil) - msRight, err := engine.MVCCGetRangeStats(context.Background(), snap, rngRight.RangeID) + replRight := store.LookupReplica(midKey, nil) + msRight, err := engine.MVCCGetRangeStats(context.Background(), snap, replRight.RangeID) if err != nil { t.Fatal(err) } @@ -571,10 +571,10 @@ func TestStoreRangeSplitStats(t *testing.T) { } // Stats should agree with recomputation. - if err := verifyRecomputedStats(snap, rng.Desc(), msLeft, now); err != nil { + if err := verifyRecomputedStats(snap, repl.Desc(), msLeft, now); err != nil { t.Fatalf("failed to verify left range's stats after split: %v", err) } - if err := verifyRecomputedStats(snap, rngRight.Desc(), msRight, now); err != nil { + if err := verifyRecomputedStats(snap, replRight.Desc(), msRight, now); err != nil { t.Fatalf("failed to verify right range's stats after split: %v", err) } } @@ -606,34 +606,34 @@ func TestStoreRangeSplitStatsWithMerges(t *testing.T) { t.Fatal(pErr) } // Verify empty range has empty stats. - rng := store.LookupReplica(keyPrefix, nil) + repl := store.LookupReplica(keyPrefix, nil) // NOTE that this value is expected to change over time, depending on what // we store in the sys-local keyspace. Update it accordingly for this test. empty := enginepb.MVCCStats{LastUpdateNanos: manual.UnixNano()} - if err := verifyRangeStats(store.Engine(), rng.RangeID, empty); err != nil { + if err := verifyRangeStats(store.Engine(), repl.RangeID, empty); err != nil { t.Fatal(err) } // Write random TimeSeries data. - midKey := writeRandomTimeSeriesDataToRange(t, store, rng.RangeID, keyPrefix) + midKey := writeRandomTimeSeriesDataToRange(t, store, repl.RangeID, keyPrefix) manual.Increment(100) // Split the range at approximate halfway point. args = adminSplitArgs(keyPrefix, midKey) if _, pErr := client.SendWrappedWith(context.Background(), rg1(store), roachpb.Header{ - RangeID: rng.RangeID, + RangeID: repl.RangeID, }, &args); pErr != nil { t.Fatal(pErr) } snap := store.Engine().NewSnapshot() defer snap.Close() - msLeft, err := engine.MVCCGetRangeStats(context.Background(), snap, rng.RangeID) + msLeft, err := engine.MVCCGetRangeStats(context.Background(), snap, repl.RangeID) if err != nil { t.Fatal(err) } - rngRight := store.LookupReplica(midKey, nil) - msRight, err := engine.MVCCGetRangeStats(context.Background(), snap, rngRight.RangeID) + replRight := store.LookupReplica(midKey, nil) + msRight, err := engine.MVCCGetRangeStats(context.Background(), snap, replRight.RangeID) if err != nil { t.Fatal(err) } @@ -648,10 +648,10 @@ func TestStoreRangeSplitStatsWithMerges(t *testing.T) { } // Stats should agree with recomputation. - if err := verifyRecomputedStats(snap, rng.Desc(), msLeft, now); err != nil { + if err := verifyRecomputedStats(snap, repl.Desc(), msLeft, now); err != nil { t.Fatalf("failed to verify left range's stats after split: %v", err) } - if err := verifyRecomputedStats(snap, rngRight.Desc(), msRight, now); err != nil { + if err := verifyRecomputedStats(snap, replRight.Desc(), msRight, now); err != nil { t.Fatalf("failed to verify right range's stats after split: %v", err) } } @@ -712,34 +712,34 @@ func TestStoreZoneUpdateAndRangeSplit(t *testing.T) { tableBoundary := keys.MakeTablePrefix(descID) { - var rng *storage.Replica + var repl *storage.Replica // Wait for the range to be split along table boundaries. expectedRSpan := roachpb.RSpan{Key: roachpb.RKey(tableBoundary), EndKey: roachpb.RKeyMax} util.SucceedsSoon(t, func() error { - rng = store.LookupReplica(tableBoundary, nil) - if actualRSpan := rng.Desc().RSpan(); !actualRSpan.Equal(expectedRSpan) { - return errors.Errorf("expected range %s to span %s", rng, expectedRSpan) + repl = store.LookupReplica(tableBoundary, nil) + if actualRSpan := repl.Desc().RSpan(); !actualRSpan.Equal(expectedRSpan) { + return errors.Errorf("expected range %s to span %s", repl, expectedRSpan) } return nil }) // Check range's max bytes settings. - if actualMaxBytes := rng.GetMaxBytes(); actualMaxBytes != maxBytes { - t.Fatalf("range %s max bytes mismatch, got: %d, expected: %d", rng, actualMaxBytes, maxBytes) + if actualMaxBytes := repl.GetMaxBytes(); actualMaxBytes != maxBytes { + t.Fatalf("range %s max bytes mismatch, got: %d, expected: %d", repl, actualMaxBytes, maxBytes) } // Look in the range after prefix we're writing to. - fillRange(store, rng.RangeID, tableBoundary, maxBytes, t) + fillRange(store, repl.RangeID, tableBoundary, maxBytes, t) } // Verify that the range is in fact split. util.SucceedsSoon(t, func() error { - rng := store.LookupReplica(keys.MakeTablePrefix(descID+1), nil) - rngDesc := rng.Desc() + repl := store.LookupReplica(keys.MakeTablePrefix(descID+1), nil) + rngDesc := repl.Desc() rngStart, rngEnd := rngDesc.StartKey, rngDesc.EndKey if rngStart.Equal(tableBoundary) || !rngEnd.Equal(roachpb.RKeyMax) { - return errors.Errorf("range %s has not yet split", rng) + return errors.Errorf("range %s has not yet split", repl) } return nil }) diff --git a/pkg/storage/gc_queue_test.go b/pkg/storage/gc_queue_test.go index f67913f0ff84..025fa12fbc25 100644 --- a/pkg/storage/gc_queue_test.go +++ b/pkg/storage/gc_queue_test.go @@ -61,11 +61,11 @@ func TestGCQueueShouldQueue(t *testing.T) { if !ok { t.Fatal("config not set") } - desc := tc.rng.Desc() + desc := tc.repl.Desc() zone, err := cfg.GetZoneConfigForKey(desc.StartKey) if err != nil { log.Errorf(context.Background(), "could not find GC policy for range %s: %s, got zone %+v", - tc.rng, err, zone) + tc.repl, err, zone) return } policy := zone.GC @@ -141,14 +141,14 @@ func TestGCQueueShouldQueue(t *testing.T) { func() { // Hold lock throughout to reduce chance of random commands // leading to inconsistent state. - tc.rng.mu.Lock() - defer tc.rng.mu.Unlock() - if err := setMVCCStats(context.Background(), tc.rng.store.Engine(), tc.rng.RangeID, ms); err != nil { + tc.repl.mu.Lock() + defer tc.repl.mu.Unlock() + if err := setMVCCStats(context.Background(), tc.repl.store.Engine(), tc.repl.RangeID, ms); err != nil { t.Fatal(err) } - tc.rng.mu.state.Stats = ms + tc.repl.mu.state.Stats = ms }() - shouldQ, priority := gcQ.shouldQueue(context.TODO(), now, tc.rng, cfg) + shouldQ, priority := gcQ.shouldQueue(context.TODO(), now, tc.repl, cfg) if shouldQ != test.shouldQ { t.Errorf("%d: should queue expected %t; got %t", i, test.shouldQ, shouldQ) } @@ -273,7 +273,7 @@ func TestGCQueueProcess(t *testing.T) { // Process through a scan queue. gcQ := newGCQueue(tc.store, tc.gossip) - if err := gcQ.process(context.Background(), tc.Clock().Now(), tc.rng, cfg); err != nil { + if err := gcQ.process(context.Background(), tc.Clock().Now(), tc.repl, cfg); err != nil { t.Fatal(err) } @@ -456,7 +456,7 @@ func TestGCQueueTransactionTable(t *testing.T) { tc.StartWithStoreConfig(t, tsc) defer tc.Stop() - outsideKey := tc.rng.Desc().EndKey.Next().AsRawKey() + outsideKey := tc.repl.Desc().EndKey.Next().AsRawKey() testIntents := []roachpb.Span{{Key: roachpb.Key("intent")}} txns := map[string]roachpb.Transaction{} @@ -480,7 +480,7 @@ func TestGCQueueTransactionTable(t *testing.T) { } } entry := roachpb.AbortCacheEntry{Key: txn.Key, Timestamp: txn.LastActive()} - if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil { + if err := tc.repl.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil { t.Fatal(err) } } @@ -492,7 +492,7 @@ func TestGCQueueTransactionTable(t *testing.T) { t.Fatal("config not set") } - if err := gcQ.process(context.Background(), tc.Clock().Now(), tc.rng, cfg); err != nil { + if err := gcQ.process(context.Background(), tc.Clock().Now(), tc.repl, cfg); err != nil { t.Fatal(err) } @@ -520,7 +520,7 @@ func TestGCQueueTransactionTable(t *testing.T) { strKey, expIntents, resolved[strKey]) } entry := &roachpb.AbortCacheEntry{} - abortExists, err := tc.rng.abortCache.Get(context.Background(), tc.store.Engine(), txns[strKey].ID, entry) + abortExists, err := tc.repl.abortCache.Get(context.Background(), tc.store.Engine(), txns[strKey].ID, entry) if err != nil { t.Fatal(err) } @@ -548,11 +548,11 @@ func TestGCQueueTransactionTable(t *testing.T) { batch := tc.engine.NewSnapshot() defer batch.Close() - tc.rng.assertState(batch) // check that in-mem and on-disk state were updated + tc.repl.assertState(batch) // check that in-mem and on-disk state were updated - tc.rng.mu.Lock() - txnSpanThreshold := tc.rng.mu.state.TxnSpanGCThreshold - tc.rng.mu.Unlock() + tc.repl.mu.Lock() + txnSpanThreshold := tc.repl.mu.state.TxnSpanGCThreshold + tc.repl.mu.Unlock() // Verify that the new TxnSpanGCThreshold has reached the Replica. if expWT := int64(gcTxnAndAC); txnSpanThreshold.WallTime != expWT { @@ -604,7 +604,7 @@ func TestGCQueueIntentResolution(t *testing.T) { // Process through a scan queue. gcQ := newGCQueue(tc.store, tc.gossip) - if err := gcQ.process(context.Background(), tc.Clock().Now(), tc.rng, cfg); err != nil { + if err := gcQ.process(context.Background(), tc.Clock().Now(), tc.repl, cfg); err != nil { t.Fatal(err) } diff --git a/pkg/storage/helpers_test.go b/pkg/storage/helpers_test.go index 99c65d7413c6..3ed7c272fb7b 100644 --- a/pkg/storage/helpers_test.go +++ b/pkg/storage/helpers_test.go @@ -35,10 +35,10 @@ import ( // AddReplica adds the replica to the store's replica map and to the sorted // replicasByKey slice. To be used only by unittests. -func (s *Store) AddReplica(rng *Replica) error { +func (s *Store) AddReplica(repl *Replica) error { s.mu.Lock() defer s.mu.Unlock() - if err := s.addReplicaInternalLocked(rng); err != nil { + if err := s.addReplicaInternalLocked(repl); err != nil { return err } s.metrics.ReplicaCount.Inc(1) diff --git a/pkg/storage/queue_test.go b/pkg/storage/queue_test.go index f80642bcc39d..4e58e22ad35b 100644 --- a/pkg/storage/queue_test.go +++ b/pkg/storage/queue_test.go @@ -135,11 +135,11 @@ func TestBaseQueueAddUpdateAndRemove(t *testing.T) { defer tc.Stop() // Remove replica for range 1 since it encompasses the entire keyspace. - rng1, err := tc.store.GetReplica(1) + repl1, err := tc.store.GetReplica(1) if err != nil { t.Error(err) } - if err := tc.store.RemoveReplica(rng1, *rng1.Desc(), true); err != nil { + if err := tc.store.RemoveReplica(repl1, *repl1.Desc(), true); err != nil { t.Error(err) } @@ -287,11 +287,11 @@ func TestBaseQueueProcess(t *testing.T) { defer tc.Stop() // Remove replica for range 1 since it encompasses the entire keyspace. - rng1, err := tc.store.GetReplica(1) + repl1, err := tc.store.GetReplica(1) if err != nil { t.Error(err) } - if err := tc.store.RemoveReplica(rng1, *rng1.Desc(), true); err != nil { + if err := tc.store.RemoveReplica(repl1, *repl1.Desc(), true); err != nil { t.Error(err) } @@ -415,11 +415,11 @@ func TestAcceptsUnsplitRanges(t *testing.T) { } // Remove replica for range 1 since it encompasses the entire keyspace. - rng1, err := s.GetReplica(1) + repl1, err := s.GetReplica(1) if err != nil { t.Error(err) } - if err := s.RemoveReplica(rng1, *rng1.Desc(), true); err != nil { + if err := s.RemoveReplica(repl1, *repl1.Desc(), true); err != nil { t.Error(err) } @@ -547,11 +547,11 @@ func TestBaseQueuePurgatory(t *testing.T) { } // Remove replica for range 1 since it encompasses the entire keyspace. - rng1, err := tc.store.GetReplica(1) + repl1, err := tc.store.GetReplica(1) if err != nil { t.Error(err) } - if err := tc.store.RemoveReplica(rng1, *rng1.Desc(), true); err != nil { + if err := tc.store.RemoveReplica(repl1, *repl1.Desc(), true); err != nil { t.Error(err) } diff --git a/pkg/storage/replica_data_iter_test.go b/pkg/storage/replica_data_iter_test.go index cfb1754c086e..b3ce4a89aa4f 100644 --- a/pkg/storage/replica_data_iter_test.go +++ b/pkg/storage/replica_data_iter_test.go @@ -121,10 +121,10 @@ func TestReplicaDataIteratorEmptyRange(t *testing.T) { // Adjust the range descriptor to avoid existing data such as meta // records and config entries during the iteration. This is a rather // nasty little hack, but since it's test code, meh. - newDesc := *tc.rng.Desc() + newDesc := *tc.repl.Desc() newDesc.RangeID = 125125125 - iter := NewReplicaDataIterator(&newDesc, tc.rng.store.Engine(), false /* !replicatedOnly */) + iter := NewReplicaDataIterator(&newDesc, tc.repl.store.Engine(), false /* !replicatedOnly */) defer iter.Close() for ; iter.Valid(); iter.Next() { t.Errorf("unexpected: %s", iter.Key()) @@ -150,10 +150,10 @@ func TestReplicaDataIterator(t *testing.T) { defer tc.Stop() // See notes in EmptyRange test method for adjustment to descriptor. - newDesc := *tc.rng.Desc() + newDesc := *tc.repl.Desc() newDesc.StartKey = roachpb.RKey("b") newDesc.EndKey = roachpb.RKey("c") - if err := tc.rng.setDesc(&newDesc); err != nil { + if err := tc.repl.setDesc(&newDesc); err != nil { t.Fatal(err) } // Create two more ranges, one before the test range and one after. @@ -168,11 +168,11 @@ func TestReplicaDataIterator(t *testing.T) { // Create range data for all three ranges. preKeys := createRangeData(t, preRng) - curKeys := createRangeData(t, tc.rng) + curKeys := createRangeData(t, tc.repl) postKeys := createRangeData(t, postRng) // Verify the contents of the "b"-"c" range. - iter := NewReplicaDataIterator(tc.rng.Desc(), tc.rng.store.Engine(), false /* !replicatedOnly */) + iter := NewReplicaDataIterator(tc.repl.Desc(), tc.repl.store.Engine(), false /* !replicatedOnly */) defer iter.Close() i := 0 for ; iter.Valid(); iter.Next() { @@ -194,8 +194,8 @@ func TestReplicaDataIterator(t *testing.T) { } // Verify that the replicated-only iterator ignores unreplicated keys. - unreplicatedPrefix := keys.MakeRangeIDUnreplicatedPrefix(tc.rng.RangeID) - iter = NewReplicaDataIterator(tc.rng.Desc(), tc.rng.store.Engine(), true /* replicatedOnly */) + unreplicatedPrefix := keys.MakeRangeIDUnreplicatedPrefix(tc.repl.RangeID) + iter = NewReplicaDataIterator(tc.repl.Desc(), tc.repl.store.Engine(), true /* replicatedOnly */) defer iter.Close() for ; iter.Valid(); iter.Next() { if err := iter.Error(); err != nil { @@ -207,15 +207,15 @@ func TestReplicaDataIterator(t *testing.T) { } // Destroy range and verify that its data has been completely cleared. - if err := tc.store.removeReplicaImpl(tc.rng, *tc.rng.Desc(), true); err != nil { + if err := tc.store.removeReplicaImpl(tc.repl, *tc.repl.Desc(), true); err != nil { t.Fatal(err) } - iter = NewReplicaDataIterator(tc.rng.Desc(), tc.rng.store.Engine(), false /* !replicatedOnly */) + iter = NewReplicaDataIterator(tc.repl.Desc(), tc.repl.store.Engine(), false /* !replicatedOnly */) defer iter.Close() if iter.Valid() { // If the range is destroyed, only a tombstone key should be there. k1 := iter.Key().Key - if tombstoneKey := keys.RaftTombstoneKey(tc.rng.RangeID); !bytes.Equal(k1, tombstoneKey) { + if tombstoneKey := keys.RaftTombstoneKey(tc.repl.RangeID); !bytes.Equal(k1, tombstoneKey) { t.Errorf("expected a tombstone key %q, but found %q", tombstoneKey, k1) } diff --git a/pkg/storage/replica_gc_queue.go b/pkg/storage/replica_gc_queue.go index 25541943b3e3..5d177dccaebc 100644 --- a/pkg/storage/replica_gc_queue.go +++ b/pkg/storage/replica_gc_queue.go @@ -96,17 +96,17 @@ func newReplicaGCQueue(store *Store, db *client.DB, gossip *gossip.Gossip) *repl // check must have occurred more than ReplicaGCQueueInactivityThreshold // in the past. func (q *replicaGCQueue) shouldQueue( - ctx context.Context, now hlc.Timestamp, rng *Replica, _ config.SystemConfig, + ctx context.Context, now hlc.Timestamp, repl *Replica, _ config.SystemConfig, ) (bool, float64) { - lastCheck, err := rng.getLastReplicaGCTimestamp(ctx) + lastCheck, err := repl.getLastReplicaGCTimestamp(ctx) if err != nil { log.Errorf(ctx, "could not read last replica GC timestamp: %s", err) return false, 0 } - lastActivity := hlc.ZeroTimestamp.Add(rng.store.startedAt, 0) + lastActivity := hlc.ZeroTimestamp.Add(repl.store.startedAt, 0) - lease, nextLease := rng.getLease() + lease, nextLease := repl.getLease() if lease != nil { lastActivity.Forward(lease.Expiration) } @@ -115,7 +115,7 @@ func (q *replicaGCQueue) shouldQueue( } var isCandidate bool - if raftStatus := rng.RaftStatus(); raftStatus != nil { + if raftStatus := repl.RaftStatus(); raftStatus != nil { isCandidate = (raftStatus.SoftState.RaftState == raft.StateCandidate) } return replicaGCShouldQueueImpl(now, lastCheck, lastActivity, isCandidate) @@ -154,12 +154,12 @@ func replicaGCShouldQueueImpl( // process performs a consistent lookup on the range descriptor to see if we are // still a member of the range. func (q *replicaGCQueue) process( - ctx context.Context, now hlc.Timestamp, rng *Replica, _ config.SystemConfig, + ctx context.Context, now hlc.Timestamp, repl *Replica, _ config.SystemConfig, ) error { // Note that the Replicas field of desc is probably out of date, so // we should only use `desc` for its static fields like RangeID and // StartKey (and avoid rng.GetReplica() for the same reason). - desc := rng.Desc() + desc := repl.Desc() // Calls to RangeLookup typically use inconsistent reads, but we // want to do a consistent read here. This is important when we are @@ -183,10 +183,10 @@ func (q *replicaGCQueue) process( } replyDesc := reply.Ranges[0] - if _, currentMember := replyDesc.GetReplicaDescriptor(rng.store.StoreID()); !currentMember { + if _, currentMember := replyDesc.GetReplicaDescriptor(repl.store.StoreID()); !currentMember { // We are no longer a member of this range; clean up our local data. log.VEventf(ctx, 1, "destroying local data") - if err := rng.store.RemoveReplica(rng, replyDesc, true); err != nil { + if err := repl.store.RemoveReplica(repl, replyDesc, true); err != nil { return err } } else if desc.RangeID != replyDesc.RangeID { @@ -195,7 +195,7 @@ func (q *replicaGCQueue) process( // subsuming range. Shut down raft processing for the former range // and delete any remaining metadata, but do not delete the data. log.VEventf(ctx, 1, "removing merged range") - if err := rng.store.RemoveReplica(rng, replyDesc, false); err != nil { + if err := repl.store.RemoveReplica(repl, replyDesc, false); err != nil { return err } @@ -210,7 +210,7 @@ func (q *replicaGCQueue) process( // Replica (see #8111) when inactive ones can be starved by // event-driven additions. log.Event(ctx, "not gc'able") - if err := rng.setLastReplicaGCTimestamp(ctx, now); err != nil { + if err := repl.setLastReplicaGCTimestamp(ctx, now); err != nil { return err } } diff --git a/pkg/storage/replica_test.go b/pkg/storage/replica_test.go index 1855def0280d..78dcaa56f0fa 100644 --- a/pkg/storage/replica_test.go +++ b/pkg/storage/replica_test.go @@ -102,8 +102,8 @@ const ( // leaseExpiry returns a duration in nanos after which any range lease the // Replica may hold is expired. It is more precise than LeaseExpiration // in that it returns the minimal duration necessary. -func leaseExpiry(rng *Replica) int64 { - if l, _ := rng.getLease(); l != nil { +func leaseExpiry(repl *Replica) int64 { + if l, _ := repl.getLease(); l != nil { return l.Expiration.WallTime + 1 } return 0 @@ -117,7 +117,7 @@ type testContext struct { testing.TB transport *RaftTransport store *Store - rng *Replica + repl *Replica rangeID roachpb.RangeID gossip *gossip.Gossip engine engine.Engine @@ -181,7 +181,7 @@ func (tc *testContext) StartWithStoreConfig(t testing.TB, cfg StoreConfig) { // We created the store without a real KV client, so it can't perform splits. tc.store.splitQueue.SetDisabled(true) - if tc.rng == nil && tc.bootstrapMode == bootstrapRangeWithMetadata { + if tc.repl == nil && tc.bootstrapMode == bootstrapRangeWithMetadata { if err := tc.store.BootstrapRange(nil); err != nil { t.Fatal(err) } @@ -192,7 +192,7 @@ func (tc *testContext) StartWithStoreConfig(t testing.TB, cfg StoreConfig) { tc.store.WaitForInit() } - realRange := tc.rng == nil + realRange := tc.repl == nil if realRange { if tc.bootstrapMode == bootstrapRangeOnly { @@ -207,20 +207,20 @@ func (tc *testContext) StartWithStoreConfig(t testing.TB, cfg StoreConfig) { ); err != nil { t.Fatal(err) } - rng, err := NewReplica(testDesc, tc.store, 0) + repl, err := NewReplica(testDesc, tc.store, 0) if err != nil { t.Fatal(err) } - if err := tc.store.AddReplica(rng); err != nil { + if err := tc.store.AddReplica(repl); err != nil { t.Fatal(err) } } var err error - tc.rng, err = tc.store.GetReplica(1) + tc.repl, err = tc.store.GetReplica(1) if err != nil { t.Fatal(err) } - tc.rangeID = tc.rng.RangeID + tc.rangeID = tc.repl.RangeID } if err := tc.initConfigs(realRange, t); err != nil { @@ -229,7 +229,7 @@ func (tc *testContext) StartWithStoreConfig(t testing.TB, cfg StoreConfig) { } func (tc *testContext) Sender() client.Sender { - return client.Wrap(tc.rng, func(ba roachpb.BatchRequest) roachpb.BatchRequest { + return client.Wrap(tc.repl, func(ba roachpb.BatchRequest) roachpb.BatchRequest { if ba.RangeID != 0 { ba.RangeID = 1 } @@ -419,9 +419,9 @@ func TestReplicaReadConsistency(t *testing.T) { StoreID: 2, ReplicaID: 2, } - rngDesc := *tc.rng.Desc() + rngDesc := *tc.repl.Desc() rngDesc.Replicas = append(rngDesc.Replicas, secondReplica) - tc.rng.setDescWithoutProcessUpdate(&rngDesc) + tc.repl.setDescWithoutProcessUpdate(&rngDesc) gArgs := getArgs(roachpb.Key("a")) @@ -451,9 +451,9 @@ func TestReplicaReadConsistency(t *testing.T) { // Lose the lease and verify CONSISTENT reads receive NotLeaseHolderError // and INCONSISTENT reads work as expected. - tc.manualClock.Set(leaseExpiry(tc.rng)) + tc.manualClock.Set(leaseExpiry(tc.repl)) start := tc.Clock().Now() - if err := sendLeaseRequest(tc.rng, &roachpb.Lease{ + if err := sendLeaseRequest(tc.repl, &roachpb.Lease{ Start: start, StartStasis: start.Add(10, 0), Expiration: start.Add(10, 0), @@ -498,16 +498,16 @@ func TestApplyCmdLeaseError(t *testing.T) { StoreID: 2, ReplicaID: 2, } - rngDesc := *tc.rng.Desc() + rngDesc := *tc.repl.Desc() rngDesc.Replicas = append(rngDesc.Replicas, secondReplica) - tc.rng.setDescWithoutProcessUpdate(&rngDesc) + tc.repl.setDescWithoutProcessUpdate(&rngDesc) pArgs := putArgs(roachpb.Key("a"), []byte("asd")) // Lose the lease. - tc.manualClock.Set(leaseExpiry(tc.rng)) + tc.manualClock.Set(leaseExpiry(tc.repl)) start := tc.Clock().Now() - if err := sendLeaseRequest(tc.rng, &roachpb.Lease{ + if err := sendLeaseRequest(tc.repl, &roachpb.Lease{ Start: start, StartStasis: start.Add(10, 0), Expiration: start.Add(10, 0), @@ -558,9 +558,9 @@ func TestReplicaRangeBoundsChecking(t *testing.T) { // hasLease returns whether the most recent range lease was held by the given // range replica and whether it's expired for the given timestamp. -func hasLease(rng *Replica, timestamp hlc.Timestamp) (owned bool, expired bool) { - l, _ := rng.getLease() - return l.OwnedBy(rng.store.StoreID()), !l.Covers(timestamp) +func hasLease(repl *Replica, timestamp hlc.Timestamp) (owned bool, expired bool) { + l, _ := repl.getLease() + return l.OwnedBy(repl.store.StoreID()), !l.Covers(timestamp) } func TestReplicaLease(t *testing.T) { @@ -577,9 +577,9 @@ func TestReplicaLease(t *testing.T) { StoreID: 2, ReplicaID: 2, } - rngDesc := *tc.rng.Desc() + rngDesc := *tc.repl.Desc() rngDesc.Replicas = append(rngDesc.Replicas, secondReplica) - tc.rng.setDescWithoutProcessUpdate(&rngDesc) + tc.repl.setDescWithoutProcessUpdate(&rngDesc) // Test that leases with invalid times are rejected. // Start leases at a point that avoids overlapping with the existing lease. @@ -588,7 +588,7 @@ func TestReplicaLease(t *testing.T) { {Start: one, StartStasis: one}, {Start: one, StartStasis: one.Next(), Expiration: one}, } { - if _, _, err := tc.rng.RequestLease(context.Background(), tc.store.Engine(), nil, + if _, _, err := tc.repl.RequestLease(context.Background(), tc.store.Engine(), nil, roachpb.Header{}, roachpb.RequestLeaseRequest{ Lease: lease, }); !testutils.IsError(err, "illegal lease interval") { @@ -596,12 +596,12 @@ func TestReplicaLease(t *testing.T) { } } - if held, _ := hasLease(tc.rng, tc.Clock().Now()); !held { + if held, _ := hasLease(tc.repl, tc.Clock().Now()); !held { t.Errorf("expected lease on range start") } - tc.manualClock.Set(leaseExpiry(tc.rng)) + tc.manualClock.Set(leaseExpiry(tc.repl)) now := tc.Clock().Now() - if err := sendLeaseRequest(tc.rng, &roachpb.Lease{ + if err := sendLeaseRequest(tc.repl, &roachpb.Lease{ Start: now.Add(10, 0), StartStasis: now.Add(20, 0), Expiration: now.Add(20, 0), @@ -609,12 +609,12 @@ func TestReplicaLease(t *testing.T) { }); err != nil { t.Fatal(err) } - if held, expired := hasLease(tc.rng, tc.Clock().Now().Add(15, 0)); held || expired { + if held, expired := hasLease(tc.repl, tc.Clock().Now().Add(15, 0)); held || expired { t.Errorf("expected second replica to have range lease") } { - pErr := tc.rng.redirectOnOrAcquireLease(context.Background()) + pErr := tc.repl.redirectOnOrAcquireLease(context.Background()) if lErr, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); !ok || lErr == nil { t.Fatalf("wanted NotLeaseHolderError, got %s", pErr) } @@ -622,26 +622,26 @@ func TestReplicaLease(t *testing.T) { // Advance clock past expiration and verify that another has // range lease will not be true. tc.manualClock.Increment(21) // 21ns have passed - if held, expired := hasLease(tc.rng, tc.Clock().Now()); held || !expired { + if held, expired := hasLease(tc.repl, tc.Clock().Now()); held || !expired { t.Errorf("expected another replica to have expired lease") } // Verify that command returns NotLeaseHolderError when lease is rejected. - rng, err := NewReplica(testRangeDescriptor(), tc.store, 0) + repl, err := NewReplica(testRangeDescriptor(), tc.store, 0) if err != nil { t.Fatal(err) } - rng.mu.Lock() - rng.mu.submitProposalFn = func(*ProposalData) error { + repl.mu.Lock() + repl.mu.submitProposalFn = func(*ProposalData) error { return &roachpb.LeaseRejectedError{ Message: "replica not found", } } - rng.mu.Unlock() + repl.mu.Unlock() { - if _, ok := rng.redirectOnOrAcquireLease(context.Background()).GetDetail().(*roachpb.NotLeaseHolderError); !ok { + if _, ok := repl.redirectOnOrAcquireLease(context.Background()).GetDetail().(*roachpb.NotLeaseHolderError); !ok { t.Fatalf("expected %T, got %s", &roachpb.NotLeaseHolderError{}, err) } } @@ -662,13 +662,13 @@ func TestReplicaNotLeaseHolderError(t *testing.T) { StoreID: 2, ReplicaID: 2, } - rngDesc := *tc.rng.Desc() + rngDesc := *tc.repl.Desc() rngDesc.Replicas = append(rngDesc.Replicas, secondReplica) - tc.rng.setDescWithoutProcessUpdate(&rngDesc) + tc.repl.setDescWithoutProcessUpdate(&rngDesc) - tc.manualClock.Set(leaseExpiry(tc.rng)) + tc.manualClock.Set(leaseExpiry(tc.repl)) now := tc.Clock().Now() - if err := sendLeaseRequest(tc.rng, &roachpb.Lease{ + if err := sendLeaseRequest(tc.repl, &roachpb.Lease{ Start: now, StartStasis: now.Add(10, 0), Expiration: now.Add(10, 0), @@ -727,7 +727,7 @@ func TestReplicaLeaseCounters(t *testing.T) { } return nil } - metrics := tc.rng.store.metrics + metrics := tc.repl.store.metrics if err := assert(metrics.LeaseRequestSuccessCount.Count(), 1, 1000); err != nil { t.Fatal(err) } @@ -736,7 +736,7 @@ func TestReplicaLeaseCounters(t *testing.T) { } now := tc.Clock().Now() - if err := sendLeaseRequest(tc.rng, &roachpb.Lease{ + if err := sendLeaseRequest(tc.repl, &roachpb.Lease{ Start: now, StartStasis: now.Add(10, 0), Expiration: now.Add(10, 0), @@ -756,7 +756,7 @@ func TestReplicaLeaseCounters(t *testing.T) { } // Make lease request fail by requesting overlapping lease from bogus Replica. - if err := sendLeaseRequest(tc.rng, &roachpb.Lease{ + if err := sendLeaseRequest(tc.repl, &roachpb.Lease{ Start: now, StartStasis: now.Add(10, 0), Expiration: now.Add(10, 0), @@ -793,9 +793,9 @@ func TestReplicaGossipConfigsOnLease(t *testing.T) { StoreID: 2, ReplicaID: 2, } - rngDesc := *tc.rng.Desc() + rngDesc := *tc.repl.Desc() rngDesc.Replicas = append(rngDesc.Replicas, secondReplica) - tc.rng.setDescWithoutProcessUpdate(&rngDesc) + tc.repl.setDescWithoutProcessUpdate(&rngDesc) // Write some arbitrary data in the system config span. key := keys.MakeTablePrefix(keys.MaxSystemConfigDescID) @@ -815,11 +815,11 @@ func TestReplicaGossipConfigsOnLease(t *testing.T) { // Expire our own lease which we automagically acquired due to being // first range and config holder. - tc.manualClock.Set(leaseExpiry(tc.rng)) + tc.manualClock.Set(leaseExpiry(tc.repl)) now := tc.Clock().Now() // Give lease to someone else. - if err := sendLeaseRequest(tc.rng, &roachpb.Lease{ + if err := sendLeaseRequest(tc.repl, &roachpb.Lease{ Start: now, StartStasis: now.Add(10, 0), Expiration: now.Add(10, 0), @@ -837,7 +837,7 @@ func TestReplicaGossipConfigsOnLease(t *testing.T) { now = tc.Clock().Now() // Give lease to this range. - if err := sendLeaseRequest(tc.rng, &roachpb.Lease{ + if err := sendLeaseRequest(tc.repl, &roachpb.Lease{ Start: now.Add(11, 0), StartStasis: now.Add(20, 0), Expiration: now.Add(20, 0), @@ -889,11 +889,11 @@ func TestReplicaTSCacheLowWaterOnLease(t *testing.T) { StoreID: 2, ReplicaID: 2, } - rngDesc := *tc.rng.Desc() + rngDesc := *tc.repl.Desc() rngDesc.Replicas = append(rngDesc.Replicas, secondReplica) - tc.rng.setDescWithoutProcessUpdate(&rngDesc) + tc.repl.setDescWithoutProcessUpdate(&rngDesc) - tc.manualClock.Set(leaseExpiry(tc.rng)) + tc.manualClock.Set(leaseExpiry(tc.repl)) now := tc.Clock().Now() testCases := []struct { @@ -932,7 +932,7 @@ func TestReplicaTSCacheLowWaterOnLease(t *testing.T) { } for i, test := range testCases { - if err := sendLeaseRequest(tc.rng, &roachpb.Lease{ + if err := sendLeaseRequest(tc.repl, &roachpb.Lease{ Start: test.start, StartStasis: test.expiration.Add(-1, 0), // smaller than durations used Expiration: test.expiration, @@ -945,10 +945,10 @@ func TestReplicaTSCacheLowWaterOnLease(t *testing.T) { t.Fatalf("%d: unexpected error %v", i, err) } // Verify expected low water mark. - tc.rng.mu.Lock() - rTS, _, _ := tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil) - wTS, _, _ := tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("a"), nil) - tc.rng.mu.Unlock() + tc.repl.mu.Lock() + rTS, _, _ := tc.repl.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil) + wTS, _, _ := tc.repl.mu.tsCache.GetMaxWrite(roachpb.Key("a"), nil) + tc.repl.mu.Unlock() if test.expLowWater == 0 { continue @@ -969,7 +969,7 @@ func TestReplicaLeaseRejectUnknownRaftNodeID(t *testing.T) { tc.Start(t) defer tc.Stop() - tc.manualClock.Set(leaseExpiry(tc.rng)) + tc.manualClock.Set(leaseExpiry(tc.repl)) now := tc.Clock().Now() lease := &roachpb.Lease{ Start: now, @@ -982,9 +982,9 @@ func TestReplicaLeaseRejectUnknownRaftNodeID(t *testing.T) { }, } ba := roachpb.BatchRequest{} - ba.Timestamp = tc.rng.store.Clock().Now() + ba.Timestamp = tc.repl.store.Clock().Now() ba.Add(&roachpb.RequestLeaseRequest{Lease: *lease}) - ch, _, err := tc.rng.propose(context.Background(), ba) + ch, _, err := tc.repl.propose(context.Background(), ba) if err == nil { // Next if the command was committed, wait for the range to apply it. // TODO(bdarnell): refactor to a more conventional error-handling pattern. @@ -1005,7 +1005,7 @@ func TestReplicaDrainLease(t *testing.T) { defer tc.Stop() // Acquire initial lease. - if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil { + if pErr := tc.repl.redirectOnOrAcquireLease(context.Background()); pErr != nil { t.Fatal(pErr) } var slept atomic.Value @@ -1018,7 +1018,7 @@ func TestReplicaDrainLease(t *testing.T) { slept.Store(true) // Expire the lease (and any others that may race in before we drain). for { - tc.manualClock.Increment(leaseExpiry(tc.rng)) + tc.manualClock.Increment(leaseExpiry(tc.repl)) select { case <-time.After(10 * time.Millisecond): // real code would use Ticker case <-tc.stopper.ShouldQuiesce(): @@ -1035,9 +1035,9 @@ func TestReplicaDrainLease(t *testing.T) { if !slept.Load().(bool) { t.Fatal("DrainLeases returned with active lease") } - tc.rng.mu.Lock() - pErr := <-tc.rng.requestLeaseLocked(tc.Clock().Now()) - tc.rng.mu.Unlock() + tc.repl.mu.Lock() + pErr := <-tc.repl.requestLeaseLocked(tc.Clock().Now()) + tc.repl.mu.Unlock() _, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError) if !ok { t.Fatalf("expected NotLeaseHolderError, not %v", pErr) @@ -1046,7 +1046,7 @@ func TestReplicaDrainLease(t *testing.T) { t.Fatal(err) } // Newly unfrozen, leases work again. - if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil { + if pErr := tc.repl.redirectOnOrAcquireLease(context.Background()); pErr != nil { t.Fatal(pErr) } } @@ -1191,14 +1191,14 @@ func TestReplicaNoGossipFromNonLeader(t *testing.T) { } // Increment the clock's timestamp to expire the range lease. - tc.manualClock.Set(leaseExpiry(tc.rng)) - if lease, _ := tc.rng.getLease(); lease.Covers(tc.Clock().Now()) { + tc.manualClock.Set(leaseExpiry(tc.repl)) + if lease, _ := tc.repl.getLease(); lease.Covers(tc.Clock().Now()) { t.Fatal("range lease should have been expired") } // Make sure the information for db1 is not gossiped. Since obtaining // a lease updates the gossiped information, we do that. - if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil { + if pErr := tc.repl.redirectOnOrAcquireLease(context.Background()); pErr != nil { t.Fatal(pErr) } // Fetch the raw gossip info. GetSystemConfig is based on callbacks at @@ -1575,18 +1575,18 @@ func TestAcquireLease(t *testing.T) { // the start of a lease as far as possible, and since there is an auto- // matic lease for us at the beginning, we'll basically create a lease from // then on. - lease, _ := tc.rng.getLease() + lease, _ := tc.repl.getLease() expStart := lease.Start - tc.manualClock.Set(leaseExpiry(tc.rng)) + tc.manualClock.Set(leaseExpiry(tc.repl)) ts := tc.Clock().Now().Next() if _, pErr := tc.SendWrappedWith(roachpb.Header{Timestamp: ts}, test); pErr != nil { t.Error(pErr) } - if held, expired := hasLease(tc.rng, ts); !held || expired { + if held, expired := hasLease(tc.repl, ts); !held || expired { t.Errorf("%d: expected lease acquisition", i) } - lease, _ = tc.rng.getLease() + lease, _ = tc.repl.getLease() if !lease.Start.Equal(expStart) { t.Errorf("%d: unexpected lease start: %s; expected %s", i, lease.Start, expStart) } @@ -1603,7 +1603,7 @@ func TestAcquireLease(t *testing.T) { // Since the command we sent above does not get blocked on the lease // extension, we need to wait for it to go through. util.SucceedsSoon(t, func() error { - newLease, _ := tc.rng.getLease() + newLease, _ := tc.repl.getLease() if !lease.StartStasis.Less(newLease.StartStasis) { return errors.Errorf("%d: lease did not get extended: %+v to %+v", i, lease, newLease) } @@ -1644,12 +1644,12 @@ func TestLeaseConcurrent(t *testing.T) { active.Store(false) var seen int32 - tc.rng.mu.Lock() - tc.rng.mu.submitProposalFn = func(cmd *ProposalData) error { + tc.repl.mu.Lock() + tc.repl.mu.submitProposalFn = func(cmd *ProposalData) error { ll, ok := cmd.Cmd.Requests[0]. GetInner().(*roachpb.RequestLeaseRequest) if !ok || !active.Load().(bool) { - return defaultSubmitProposalLocked(tc.rng, cmd) + return defaultSubmitProposalLocked(tc.repl, cmd) } if c := atomic.AddInt32(&seen, 1); c > 1 { // Morally speaking, this is an error, but reproposals can @@ -1659,35 +1659,35 @@ func TestLeaseConcurrent(t *testing.T) { } go func() { wg.Wait() - tc.rng.mu.Lock() - defer tc.rng.mu.Unlock() + tc.repl.mu.Lock() + defer tc.repl.mu.Unlock() if withError { // When we complete the command, we have to remove it from the map; // otherwise its context (and tracing span) may be used after the // client cleaned up. - delete(tc.rng.mu.proposals, cmd.idKey) + delete(tc.repl.mu.proposals, cmd.idKey) cmd.done <- proposalResult{ Err: roachpb.NewErrorf(origMsg), } return } - if err := defaultSubmitProposalLocked(tc.rng, cmd); err != nil { + if err := defaultSubmitProposalLocked(tc.repl, cmd); err != nil { panic(err) // unlikely, so punt on proper handling } }() return nil } - tc.rng.mu.Unlock() + tc.repl.mu.Unlock() active.Store(true) - tc.manualClock.Increment(leaseExpiry(tc.rng)) + tc.manualClock.Increment(leaseExpiry(tc.repl)) ts := tc.Clock().Now() pErrCh := make(chan *roachpb.Error, num) for i := 0; i < num; i++ { if err := tc.stopper.RunAsyncTask(context.Background(), func(_ context.Context) { - tc.rng.mu.Lock() - leaseCh := tc.rng.requestLeaseLocked(ts) - tc.rng.mu.Unlock() + tc.repl.mu.Lock() + leaseCh := tc.repl.requestLeaseLocked(ts) + tc.repl.mu.Unlock() wg.Done() pErr := <-leaseCh // Mutate the errors as we receive them to expose races. @@ -1754,28 +1754,28 @@ func TestReplicaUpdateTSCache(t *testing.T) { t.Error(pErr) } // Verify the timestamp cache has rTS=1s and wTS=0s for "a". - tc.rng.mu.Lock() - defer tc.rng.mu.Unlock() - _, _, rOK := tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil) - _, _, wOK := tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("a"), nil) + tc.repl.mu.Lock() + defer tc.repl.mu.Unlock() + _, _, rOK := tc.repl.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil) + _, _, wOK := tc.repl.mu.tsCache.GetMaxWrite(roachpb.Key("a"), nil) if rOK || wOK { t.Errorf("expected rOK=false and wOK=false; rOK=%t, wOK=%t", rOK, wOK) } - tc.rng.mu.tsCache.ExpandRequests(hlc.ZeroTimestamp) - rTS, _, rOK := tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil) - wTS, _, wOK := tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("a"), nil) + tc.repl.mu.tsCache.ExpandRequests(hlc.ZeroTimestamp) + rTS, _, rOK := tc.repl.mu.tsCache.GetMaxRead(roachpb.Key("a"), nil) + wTS, _, wOK := tc.repl.mu.tsCache.GetMaxWrite(roachpb.Key("a"), nil) if rTS.WallTime != t0.Nanoseconds() || wTS.WallTime != startNanos || !rOK || wOK { t.Errorf("expected rTS=1s and wTS=0s, but got %s, %s; rOK=%t, wOK=%t", rTS, wTS, rOK, wOK) } // Verify the timestamp cache has rTS=0s and wTS=2s for "b". - rTS, _, rOK = tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("b"), nil) - wTS, _, wOK = tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("b"), nil) + rTS, _, rOK = tc.repl.mu.tsCache.GetMaxRead(roachpb.Key("b"), nil) + wTS, _, wOK = tc.repl.mu.tsCache.GetMaxWrite(roachpb.Key("b"), nil) if rTS.WallTime != startNanos || wTS.WallTime != t1.Nanoseconds() || rOK || !wOK { t.Errorf("expected rTS=0s and wTS=2s, but got %s, %s; rOK=%t, wOK=%t", rTS, wTS, rOK, wOK) } // Verify another key ("c") has 0sec in timestamp cache. - rTS, _, rOK = tc.rng.mu.tsCache.GetMaxRead(roachpb.Key("c"), nil) - wTS, _, wOK = tc.rng.mu.tsCache.GetMaxWrite(roachpb.Key("c"), nil) + rTS, _, rOK = tc.repl.mu.tsCache.GetMaxRead(roachpb.Key("c"), nil) + wTS, _, wOK = tc.repl.mu.tsCache.GetMaxWrite(roachpb.Key("c"), nil) if rTS.WallTime != startNanos || wTS.WallTime != startNanos || rOK || wOK { t.Errorf("expected rTS=0s and wTS=0s, but got %s %s; rOK=%t, wOK=%t", rTS, wTS, rOK, wOK) } @@ -2046,9 +2046,9 @@ func TestReplicaCommandQueueCancellation(t *testing.T) { // Wait until both commands are in the command queue. util.SucceedsSoon(t, func() error { - tc.rng.cmdQMu.Lock() - chans := tc.rng.cmdQMu.global.getWait(false, roachpb.Span{Key: key1}, roachpb.Span{Key: key2}) - tc.rng.cmdQMu.Unlock() + tc.repl.cmdQMu.Lock() + chans := tc.repl.cmdQMu.global.getWait(false, roachpb.Span{Key: key1}, roachpb.Span{Key: key2}) + tc.repl.cmdQMu.Unlock() if a, e := len(chans), 2; a < e { return errors.Errorf("%d of %d commands in the command queue", a, e) } @@ -2266,7 +2266,7 @@ func TestReplicaAbortCacheReadError(t *testing.T) { } // Overwrite Abort cache entry with garbage for the last op. - key := keys.AbortCacheKey(tc.rng.RangeID, txn.ID) + key := keys.AbortCacheKey(tc.repl.RangeID, txn.ID) err := engine.MVCCPut(context.Background(), tc.engine, nil, key, hlc.ZeroTimestamp, roachpb.MakeValueFromString("never read in this test"), nil) if err != nil { t.Fatal(err) @@ -2298,7 +2298,7 @@ func TestReplicaAbortCacheStoredTxnRetryError(t *testing.T) { Timestamp: txn.Timestamp, Priority: 0, } - if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil { + if err := tc.repl.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil { t.Fatal(err) } @@ -2407,7 +2407,7 @@ func TestReplicaAbortCacheOnlyWithIntent(t *testing.T) { Timestamp: txn.Timestamp, Priority: 0, } - if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil { + if err := tc.repl.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil { t.Fatal(err) } @@ -2501,7 +2501,7 @@ func TestEndTransactionTxnSpanGCThreshold(t *testing.T) { defer tc.Stop() key := roachpb.Key("a") - desc := tc.rng.Desc() + desc := tc.repl.Desc() // This test avoids a zero-timestamp regression (see LastActive() below), // so avoid zero timestamps. tc.manualClock.Increment(123) @@ -2895,7 +2895,7 @@ func TestEndTransactionWithErrors(t *testing.T) { txnKey := keys.TransactionKey(test.key, txn.ID) if test.existStatus != doesNotExist { - if err := engine.MVCCPutProto(context.Background(), tc.rng.store.Engine(), nil, txnKey, hlc.ZeroTimestamp, + if err := engine.MVCCPutProto(context.Background(), tc.repl.store.Engine(), nil, txnKey, hlc.ZeroTimestamp, nil, &existTxn); err != nil { t.Fatal(err) } @@ -3111,7 +3111,7 @@ func TestRaftReplayProtectionInTxn(t *testing.T) { // Reach in and manually send to raft (to simulate Raft replay) and // also avoid updating the timestamp cache; verify WriteTooOldError. ba.Timestamp = txn.OrigTimestamp - ch, _, err := tc.rng.propose(context.Background(), ba) + ch, _, err := tc.repl.propose(context.Background(), ba) if err != nil { t.Fatalf("%d: unexpected error: %s", i, err) } @@ -3133,7 +3133,7 @@ func TestReplicaLaziness(t *testing.T) { tc.Start(t) defer tc.Stop() - if status := tc.rng.RaftStatus(); status != nil { + if status := tc.repl.RaftStatus(); status != nil { t.Fatalf("expected raft group to not be initialized, got RaftStatus() of %v", status) } var ba roachpb.BatchRequest @@ -3143,7 +3143,7 @@ func TestReplicaLaziness(t *testing.T) { t.Fatalf("unexpected error: %s", pErr) } - if tc.rng.RaftStatus() == nil { + if tc.repl.RaftStatus() == nil { t.Fatalf("expected raft group to be initialized") } } @@ -3215,7 +3215,7 @@ func TestReplayProtection(t *testing.T) { // Verify txn record is cleaned. var readTxn roachpb.Transaction txnKey := keys.TransactionKey(txn.Key, txn.ID) - ok, err := engine.MVCCGetProto(context.Background(), tc.rng.store.Engine(), txnKey, hlc.ZeroTimestamp, true /* consistent */, nil /* txn */, &readTxn) + ok, err := engine.MVCCGetProto(context.Background(), tc.repl.store.Engine(), txnKey, hlc.ZeroTimestamp, true /* consistent */, nil /* txn */, &readTxn) if err != nil || ok { t.Errorf("%d: expected transaction record to be cleared (%t): %s", i, ok, err) } @@ -3332,7 +3332,7 @@ func TestEndTransactionLocalGC(t *testing.T) { } var readTxn roachpb.Transaction txnKey := keys.TransactionKey(txn.Key, txn.ID) - ok, err := engine.MVCCGetProto(context.Background(), tc.rng.store.Engine(), txnKey, hlc.ZeroTimestamp, + ok, err := engine.MVCCGetProto(context.Background(), tc.repl.store.Engine(), txnKey, hlc.ZeroTimestamp, true /* consistent */, nil /* txn */, &readTxn) if err != nil { t.Fatal(err) @@ -3459,7 +3459,7 @@ func TestEndTransactionDirectGC(t *testing.T) { rightRng, txn := setupResolutionTest(t, tc, testKey, splitKey, false /* generate abort cache entry */) util.SucceedsSoon(t, func() error { - if gr, _, err := tc.rng.Get( + if gr, _, err := tc.repl.Get( ctx, tc.engine, roachpb.Header{}, roachpb.GetRequest{Span: roachpb.Span{ Key: keys.TransactionKey(txn.Key, txn.ID), @@ -3471,7 +3471,7 @@ func TestEndTransactionDirectGC(t *testing.T) { } var entry roachpb.AbortCacheEntry - if aborted, err := tc.rng.abortCache.Get(ctx, tc.engine, txn.ID, &entry); err != nil { + if aborted, err := tc.repl.abortCache.Get(ctx, tc.engine, txn.ID, &entry); err != nil { t.Fatal(err) } else if aborted { return errors.Errorf("%d: abort cache still populated: %v", i, entry) @@ -3558,7 +3558,7 @@ func TestEndTransactionDirectGC_1PC(t *testing.T) { } var entry roachpb.AbortCacheEntry - if aborted, err := tc.rng.abortCache.Get(context.Background(), tc.engine, txn.ID, &entry); err != nil { + if aborted, err := tc.repl.abortCache.Get(context.Background(), tc.engine, txn.ID, &entry); err != nil { t.Fatal(err) } else if aborted { t.Fatalf("commit=%t: abort cache still populated: %v", commit, entry) @@ -3742,11 +3742,11 @@ func TestAbortCacheError(t *testing.T) { Timestamp: ts, Priority: priority, } - if err := tc.rng.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil { + if err := tc.repl.abortCache.Put(context.Background(), tc.engine, nil, txn.ID, &entry); err != nil { t.Fatal(err) } - pErr := tc.rng.checkIfTxnAborted(context.Background(), tc.engine, txn) + pErr := tc.repl.checkIfTxnAborted(context.Background(), tc.engine, txn) if _, ok := pErr.GetDetail().(*roachpb.TransactionAbortedError); ok { expected := txn.Clone() expected.Timestamp = txn.Timestamp @@ -3946,7 +3946,7 @@ func TestPushTxnHeartbeatTimeout(t *testing.T) { pushee.LastHeartbeat = &test.heartbeat } _, btH := beginTxnArgs(key, pushee) - btH.Timestamp = tc.rng.store.Clock().Now() + btH.Timestamp = tc.repl.store.Clock().Now() put := putArgs(key, key) if _, pErr := maybeWrapWithBeginTransaction(context.Background(), tc.Sender(), btH, &put); pErr != nil { t.Fatalf("%d: %s", i, pErr) @@ -3996,19 +3996,19 @@ func TestResolveIntentPushTxnReplyTxn(t *testing.T) { ctx := context.Background() // Should not be able to push or resolve in a transaction. - if _, err := tc.rng.PushTxn(ctx, b, &ms, roachpb.Header{Txn: txn}, pa); !testutils.IsError(err, errTransactionUnsupported.Error()) { + if _, err := tc.repl.PushTxn(ctx, b, &ms, roachpb.Header{Txn: txn}, pa); !testutils.IsError(err, errTransactionUnsupported.Error()) { t.Fatalf("transactional PushTxn returned unexpected error: %v", err) } - if _, err := tc.rng.ResolveIntent(ctx, b, &ms, roachpb.Header{Txn: txn}, ra); !testutils.IsError(err, errTransactionUnsupported.Error()) { + if _, err := tc.repl.ResolveIntent(ctx, b, &ms, roachpb.Header{Txn: txn}, ra); !testutils.IsError(err, errTransactionUnsupported.Error()) { t.Fatalf("transactional ResolveIntent returned unexpected error: %v", err) } - if _, err := tc.rng.ResolveIntentRange(ctx, b, &ms, roachpb.Header{Txn: txn}, rra); !testutils.IsError(err, errTransactionUnsupported.Error()) { + if _, err := tc.repl.ResolveIntentRange(ctx, b, &ms, roachpb.Header{Txn: txn}, rra); !testutils.IsError(err, errTransactionUnsupported.Error()) { t.Fatalf("transactional ResolveIntentRange returned unexpected error: %v", err) } // Should not get a transaction back from PushTxn. It used to erroneously // return args.PusherTxn. - if reply, err := tc.rng.PushTxn(ctx, b, &ms, roachpb.Header{}, pa); err != nil { + if reply, err := tc.repl.PushTxn(ctx, b, &ms, roachpb.Header{}, pa); err != nil { t.Fatal(err) } else if reply.Txn != nil { t.Fatalf("expected nil response txn, but got %s", reply.Txn) @@ -4233,7 +4233,7 @@ func TestPushTxnSerializableRestart(t *testing.T) { pushee.Restart(1, 1, pusher.Timestamp) // Next push pushee to advance timestamp of txn record. - pusher.Timestamp = tc.rng.store.Clock().Now() + pusher.Timestamp = tc.repl.store.Clock().Now() args := pushTxnArgs(pusher, &pusheeCopy, roachpb.PUSH_TIMESTAMP) if _, pErr := tc.SendWrapped(&args); pErr != nil { t.Fatal(pErr) @@ -4353,7 +4353,7 @@ func TestReplicaStatsComputation(t *testing.T) { ValCount: 1, }) - if err := verifyRangeStats(tc.engine, tc.rng.RangeID, expMS); err != nil { + if err := verifyRangeStats(tc.engine, tc.repl.RangeID, expMS); err != nil { t.Fatal(err) } @@ -4385,7 +4385,7 @@ func TestReplicaStatsComputation(t *testing.T) { ValCount: 2, IntentCount: 1, }) - if err := verifyRangeStats(tc.engine, tc.rng.RangeID, expMS); err != nil { + if err := verifyRangeStats(tc.engine, tc.repl.RangeID, expMS); err != nil { t.Fatal(err) } @@ -4410,7 +4410,7 @@ func TestReplicaStatsComputation(t *testing.T) { KeyCount: 2, ValCount: 2, }) - if err := verifyRangeStats(tc.engine, tc.rng.RangeID, expMS); err != nil { + if err := verifyRangeStats(tc.engine, tc.repl.RangeID, expMS); err != nil { t.Fatal(err) } @@ -4429,7 +4429,7 @@ func TestReplicaStatsComputation(t *testing.T) { KeyCount: 2, ValCount: 3, }) - if err := verifyRangeStats(tc.engine, tc.rng.RangeID, expMS); err != nil { + if err := verifyRangeStats(tc.engine, tc.repl.RangeID, expMS); err != nil { t.Fatal(err) } } @@ -4504,7 +4504,7 @@ func TestTruncateLog(t *testing.T) { tc := testContext{} tc.Start(t) defer tc.Stop() - tc.rng.store.SetRaftLogQueueActive(false) + tc.repl.store.SetRaftLogQueueActive(false) // Populate the log with 10 entries. Save the LastIndex after each write. var indexes []uint64 @@ -4514,14 +4514,14 @@ func TestTruncateLog(t *testing.T) { if _, pErr := tc.SendWrapped(&args); pErr != nil { t.Fatal(pErr) } - idx, err := tc.rng.GetLastIndex() + idx, err := tc.repl.GetLastIndex() if err != nil { t.Fatal(err) } indexes = append(indexes, idx) } - rangeID := tc.rng.RangeID + rangeID := tc.repl.RangeID // Discard the first half of the log. truncateArgs := truncateLogArgs(indexes[5], rangeID) @@ -4530,7 +4530,7 @@ func TestTruncateLog(t *testing.T) { } // FirstIndex has changed. - firstIndex, err := tc.rng.GetFirstIndex() + firstIndex, err := tc.repl.GetFirstIndex() if err != nil { t.Fatal(err) } @@ -4539,9 +4539,9 @@ func TestTruncateLog(t *testing.T) { } // We can still get what remains of the log. - tc.rng.mu.Lock() - entries, err := tc.rng.Entries(indexes[5], indexes[9], math.MaxUint64) - tc.rng.mu.Unlock() + tc.repl.mu.Lock() + entries, err := tc.repl.Entries(indexes[5], indexes[9], math.MaxUint64) + tc.repl.mu.Unlock() if err != nil { t.Fatal(err) } @@ -4550,17 +4550,17 @@ func TestTruncateLog(t *testing.T) { } // But any range that includes the truncated entries returns an error. - tc.rng.mu.Lock() - _, err = tc.rng.Entries(indexes[4], indexes[9], math.MaxUint64) - tc.rng.mu.Unlock() + tc.repl.mu.Lock() + _, err = tc.repl.Entries(indexes[4], indexes[9], math.MaxUint64) + tc.repl.mu.Unlock() if err != raft.ErrCompacted { t.Errorf("expected ErrCompacted, got %s", err) } // The term of the last truncated entry is still available. - tc.rng.mu.Lock() - term, err := tc.rng.Term(indexes[4]) - tc.rng.mu.Unlock() + tc.repl.mu.Lock() + term, err := tc.repl.Term(indexes[4]) + tc.repl.mu.Unlock() if err != nil { t.Fatal(err) } @@ -4569,9 +4569,9 @@ func TestTruncateLog(t *testing.T) { } // The terms of older entries are gone. - tc.rng.mu.Lock() - _, err = tc.rng.Term(indexes[3]) - tc.rng.mu.Unlock() + tc.repl.mu.Lock() + _, err = tc.repl.Term(indexes[3]) + tc.repl.mu.Unlock() if err != raft.ErrCompacted { t.Errorf("expected ErrCompacted, got %s", err) } @@ -4590,10 +4590,10 @@ func TestTruncateLog(t *testing.T) { t.Fatal(pErr) } - tc.rng.mu.Lock() + tc.repl.mu.Lock() // The term of the last truncated entry is still available. - term, err = tc.rng.Term(indexes[4]) - tc.rng.mu.Unlock() + term, err = tc.repl.Term(indexes[4]) + tc.repl.mu.Unlock() if err != nil { t.Fatal(err) } @@ -4690,9 +4690,9 @@ func TestAppliedIndex(t *testing.T) { t.Errorf("expected %d, got %d", sum, reply.NewValue) } - tc.rng.mu.Lock() - newAppliedIndex := tc.rng.mu.state.RaftAppliedIndex - tc.rng.mu.Unlock() + tc.repl.mu.Lock() + newAppliedIndex := tc.repl.mu.state.RaftAppliedIndex + tc.repl.mu.Unlock() if newAppliedIndex <= appliedIndex { t.Errorf("appliedIndex did not advance. Was %d, now %d", appliedIndex, newAppliedIndex) } @@ -4765,14 +4765,14 @@ func TestChangeReplicasDuplicateError(t *testing.T) { tc.Start(t) defer tc.Stop() - if err := tc.rng.ChangeReplicas( + if err := tc.repl.ChangeReplicas( context.Background(), roachpb.ADD_REPLICA, roachpb.ReplicaDescriptor{ NodeID: tc.store.Ident.NodeID, StoreID: 9999, }, - tc.rng.Desc(), + tc.repl.Desc(), ); err == nil || !strings.Contains(err.Error(), "already present") { t.Fatalf("must not be able to add second replica to same node (err=%s)", err) } @@ -5025,7 +5025,7 @@ func TestRangeLookup(t *testing.T) { tc.Start(t) defer tc.Stop() - expected := []roachpb.RangeDescriptor{*tc.rng.Desc()} + expected := []roachpb.RangeDescriptor{*tc.repl.Desc()} testCases := []struct { key roachpb.RKey reverse bool @@ -5079,15 +5079,15 @@ func TestRequestLeaderEncounterGroupDeleteError(t *testing.T) { return &roachpb.RaftGroupDeletedError{} } - rng := tc.rng + repl := tc.repl - rng.mu.Lock() - rng.mu.submitProposalFn = submitProposalFn - rng.mu.Unlock() + repl.mu.Lock() + repl.mu.submitProposalFn = submitProposalFn + repl.mu.Unlock() gArgs := getArgs(roachpb.Key("a")) // Force the read command request a new lease. - tc.manualClock.Set(leaseExpiry(rng)) + tc.manualClock.Set(leaseExpiry(repl)) _, pErr := client.SendWrappedWith(context.Background(), tc.store, roachpb.Header{ Timestamp: tc.Clock().Now(), RangeID: 1, @@ -5207,15 +5207,15 @@ func TestReplicaLoadSystemConfigSpanIntent(t *testing.T) { if err != nil { t.Fatal(err) } - rng := tc.store.LookupReplica(scStartSddr, nil) - if rng == nil { + repl := tc.store.LookupReplica(scStartSddr, nil) + if repl == nil { t.Fatalf("no replica contains the SystemConfig span") } // Create a transaction and write an intent to the system // config span. key := keys.SystemConfigSpan.Key - _, btH := beginTxnArgs(key, newTransaction("test", key, 1, enginepb.SERIALIZABLE, rng.store.Clock())) + _, btH := beginTxnArgs(key, newTransaction("test", key, 1, enginepb.SERIALIZABLE, repl.store.Clock())) btH.Txn.Priority = 1 // low so it can be pushed put := putArgs(key, []byte("foo")) if _, pErr := maybeWrapWithBeginTransaction(context.Background(), tc.Sender(), btH, &put); pErr != nil { @@ -5225,7 +5225,7 @@ func TestReplicaLoadSystemConfigSpanIntent(t *testing.T) { // Abort the transaction so that the async intent resolution caused // by loading the system config span doesn't waste any time in // clearing the intent. - pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, rng.store.Clock()) + pusher := newTransaction("test", key, 1, enginepb.SERIALIZABLE, repl.store.Clock()) pusher.Priority = 2 // will push successfully pushArgs := pushTxnArgs(pusher, btH.Txn, roachpb.PUSH_ABORT) if _, pErr := tc.SendWrapped(&pushArgs); pErr != nil { @@ -5233,7 +5233,7 @@ func TestReplicaLoadSystemConfigSpanIntent(t *testing.T) { } // Verify that the intent trips up loading the SystemConfig data. - if _, _, err := rng.loadSystemConfigSpan(); err != errSystemConfigIntent { + if _, _, err := repl.loadSystemConfigSpan(); err != errSystemConfigIntent { t.Fatal(err) } @@ -5241,12 +5241,12 @@ func TestReplicaLoadSystemConfigSpanIntent(t *testing.T) { // there and verify that we can now load the data as expected. v := roachpb.MakeValueFromString("foo") util.SucceedsSoon(t, func() error { - if err := engine.MVCCPut(context.Background(), rng.store.Engine(), &enginepb.MVCCStats{}, - keys.SystemConfigSpan.Key, rng.store.Clock().Now(), v, nil); err != nil { + if err := engine.MVCCPut(context.Background(), repl.store.Engine(), &enginepb.MVCCStats{}, + keys.SystemConfigSpan.Key, repl.store.Clock().Now(), v, nil); err != nil { return err } - kvs, _, err := rng.loadSystemConfigSpan() + kvs, _, err := repl.loadSystemConfigSpan() if err != nil { return err } @@ -5264,13 +5264,13 @@ func TestReplicaDestroy(t *testing.T) { tc.Start(t) defer tc.Stop() - rep, err := tc.store.GetReplica(1) + repl, err := tc.store.GetReplica(1) if err != nil { t.Fatal(err) } // First try and fail with a stale descriptor. - origDesc := rep.Desc() + origDesc := repl.Desc() newDesc := protoutil.Clone(origDesc).(*roachpb.RangeDescriptor) for i := range newDesc.Replicas { if newDesc.Replicas[i].StoreID == tc.store.StoreID() { @@ -5280,15 +5280,15 @@ func TestReplicaDestroy(t *testing.T) { } } - if err := rep.setDesc(newDesc); err != nil { + if err := repl.setDesc(newDesc); err != nil { t.Fatal(err) } - if err := tc.store.removeReplicaImpl(tc.rng, *origDesc, true); !testutils.IsError(err, "replica ID has changed") { + if err := tc.store.removeReplicaImpl(tc.repl, *origDesc, true); !testutils.IsError(err, "replica ID has changed") { t.Fatalf("expected error 'replica ID has changed' but got %v", err) } // Now try a fresh descriptor and succeed. - if err := tc.store.removeReplicaImpl(tc.rng, *rep.Desc(), true); err != nil { + if err := tc.store.removeReplicaImpl(tc.repl, *repl.Desc(), true); err != nil { t.Fatal(err) } } @@ -5298,10 +5298,10 @@ func TestEntries(t *testing.T) { tc := testContext{} tc.Start(t) defer tc.Stop() - tc.rng.store.SetRaftLogQueueActive(false) + tc.repl.store.SetRaftLogQueueActive(false) - rng := tc.rng - rangeID := rng.RangeID + repl := tc.repl + rangeID := repl.RangeID var indexes []uint64 populateLogs := func(from, to int) []uint64 { @@ -5311,7 +5311,7 @@ func TestEntries(t *testing.T) { if _, pErr := tc.SendWrapped(&args); pErr != nil { t.Fatal(pErr) } - idx, err := rng.GetLastIndex() + idx, err := repl.GetLastIndex() if err != nil { t.Fatal(err) } @@ -5390,7 +5390,7 @@ func TestEntries(t *testing.T) { // Case 19: lo and hi are available, but entry cache evicted. {lo: indexes[5], hi: indexes[9], expResultCount: 4, expCacheCount: 0, setup: func() { // Manually evict cache for the first 10 log entries. - rng.store.raftEntryCache.delEntries(rangeID, indexes[0], indexes[9]+1) + repl.store.raftEntryCache.delEntries(rangeID, indexes[0], indexes[9]+1) indexes = append(indexes, populateLogs(10, 40)...) }}, // Case 20: lo and hi are available, entry cache evicted and hi available in cache. @@ -5403,13 +5403,13 @@ func TestEntries(t *testing.T) { if tc.setup != nil { tc.setup() } - cacheEntries, _, _ := rng.store.raftEntryCache.getEntries(rangeID, tc.lo, tc.hi, tc.maxBytes) + cacheEntries, _, _ := repl.store.raftEntryCache.getEntries(rangeID, tc.lo, tc.hi, tc.maxBytes) if len(cacheEntries) != tc.expCacheCount { t.Errorf("%d: expected cache count %d, got %d", i, tc.expCacheCount, len(cacheEntries)) } - rng.mu.Lock() - ents, err := rng.Entries(tc.lo, tc.hi, tc.maxBytes) - rng.mu.Unlock() + repl.mu.Lock() + ents, err := repl.Entries(tc.lo, tc.hi, tc.maxBytes) + repl.mu.Unlock() if tc.expError == nil && err != nil { t.Errorf("%d: expected no error, got %s", i, err) continue @@ -5423,26 +5423,26 @@ func TestEntries(t *testing.T) { } // Case 23: Lo must be less than or equal to hi. - rng.mu.Lock() - if _, err := rng.Entries(indexes[9], indexes[5], 0); err == nil { + repl.mu.Lock() + if _, err := repl.Entries(indexes[9], indexes[5], 0); err == nil { t.Errorf("23: error expected, got none") } - rng.mu.Unlock() + repl.mu.Unlock() // Case 24: add a gap to the indexes. if err := engine.MVCCDelete(context.Background(), tc.store.Engine(), nil, keys.RaftLogKey(rangeID, indexes[6]), hlc.ZeroTimestamp, nil); err != nil { t.Fatal(err) } - rng.store.raftEntryCache.delEntries(rangeID, indexes[6], indexes[6]+1) + repl.store.raftEntryCache.delEntries(rangeID, indexes[6], indexes[6]+1) - rng.mu.Lock() - defer rng.mu.Unlock() - if _, err := rng.Entries(indexes[5], indexes[9], 0); err == nil { + repl.mu.Lock() + defer repl.mu.Unlock() + if _, err := repl.Entries(indexes[5], indexes[9], 0); err == nil { t.Errorf("24: error expected, got none") } // Case 25: don't hit the gap due to maxBytes. - ents, err := rng.Entries(indexes[5], indexes[9], 1) + ents, err := repl.Entries(indexes[5], indexes[9], 1) if err != nil { t.Errorf("25: expected no error, got %s", err) } @@ -5451,7 +5451,7 @@ func TestEntries(t *testing.T) { } // Case 26: don't hit the gap due to truncation. - if _, err := rng.Entries(indexes[4], indexes[9], 0); err != raft.ErrCompacted { + if _, err := repl.Entries(indexes[4], indexes[9], 0); err != raft.ErrCompacted { t.Errorf("26: expected error %s , got %s", raft.ErrCompacted, err) } } @@ -5461,10 +5461,10 @@ func TestTerm(t *testing.T) { tc := testContext{} tc.Start(t) defer tc.Stop() - tc.rng.store.SetRaftLogQueueActive(false) + tc.repl.store.SetRaftLogQueueActive(false) - rng := tc.rng - rangeID := rng.RangeID + repl := tc.repl + rangeID := repl.RangeID // Populate the log with 10 entries. Save the LastIndex after each write. var indexes []uint64 @@ -5474,7 +5474,7 @@ func TestTerm(t *testing.T) { if _, pErr := tc.SendWrapped(&args); pErr != nil { t.Fatal(pErr) } - idx, err := tc.rng.GetLastIndex() + idx, err := tc.repl.GetLastIndex() if err != nil { t.Fatal(err) } @@ -5487,10 +5487,10 @@ func TestTerm(t *testing.T) { t.Fatal(pErr) } - rng.mu.Lock() - defer rng.mu.Unlock() + repl.mu.Lock() + defer repl.mu.Unlock() - firstIndex, err := rng.FirstIndex() + firstIndex, err := repl.FirstIndex() if err != nil { t.Fatal(err) } @@ -5499,20 +5499,20 @@ func TestTerm(t *testing.T) { } // Truncated logs should return an ErrCompacted error. - if _, err := tc.rng.Term(indexes[1]); err != raft.ErrCompacted { + if _, err := tc.repl.Term(indexes[1]); err != raft.ErrCompacted { t.Errorf("expected ErrCompacted, got %s", err) } - if _, err := tc.rng.Term(indexes[3]); err != raft.ErrCompacted { + if _, err := tc.repl.Term(indexes[3]); err != raft.ErrCompacted { t.Errorf("expected ErrCompacted, got %s", err) } // FirstIndex-1 should return the term of firstIndex. - firstIndexTerm, err := tc.rng.Term(firstIndex) + firstIndexTerm, err := tc.repl.Term(firstIndex) if err != nil { t.Errorf("expect no error, got %s", err) } - term, err := tc.rng.Term(indexes[4]) + term, err := tc.repl.Term(indexes[4]) if err != nil { t.Errorf("expect no error, got %s", err) } @@ -5520,21 +5520,21 @@ func TestTerm(t *testing.T) { t.Errorf("expected firstIndex-1's term:%d to equal that of firstIndex:%d", term, firstIndexTerm) } - lastIndex, err := rng.LastIndex() + lastIndex, err := repl.LastIndex() if err != nil { t.Fatal(err) } // Last index should return correctly. - if _, err := tc.rng.Term(lastIndex); err != nil { + if _, err := tc.repl.Term(lastIndex); err != nil { t.Errorf("expected no error, got %s", err) } // Terms for after the last index should return ErrUnavailable. - if _, err := tc.rng.Term(lastIndex + 1); err != raft.ErrUnavailable { + if _, err := tc.repl.Term(lastIndex + 1); err != raft.ErrUnavailable { t.Errorf("expected ErrUnavailable, got %s", err) } - if _, err := tc.rng.Term(indexes[9] + 1000); err != raft.ErrUnavailable { + if _, err := tc.repl.Term(indexes[9] + 1000); err != raft.ErrUnavailable { t.Errorf("expected ErrUnavailable, got %s", err) } } @@ -5547,8 +5547,8 @@ func TestGCIncorrectRange(t *testing.T) { // Split range into two ranges. splitKey := roachpb.RKey("c") - rng1 := tc.rng - rng2 := splitTestRange(tc.store, splitKey, splitKey, t) + repl1 := tc.repl + repl2 := splitTestRange(tc.store, splitKey, splitKey, t) // Write a key to range 2 at two different timestamps so we can // GC the earlier timestamp without needing to delete it. @@ -5560,10 +5560,10 @@ func TestGCIncorrectRange(t *testing.T) { ts2 := now.Add(2, 0) ts1Header := roachpb.Header{Timestamp: ts1} ts2Header := roachpb.Header{Timestamp: ts2} - if _, pErr := client.SendWrappedWith(context.Background(), rng2, ts1Header, &putReq); pErr != nil { + if _, pErr := client.SendWrappedWith(context.Background(), repl2, ts1Header, &putReq); pErr != nil { t.Errorf("unexpected pError on put key request: %s", pErr) } - if _, pErr := client.SendWrappedWith(context.Background(), rng2, ts2Header, &putReq); pErr != nil { + if _, pErr := client.SendWrappedWith(context.Background(), repl2, ts2Header, &putReq); pErr != nil { t.Errorf("unexpected pError on put key request: %s", pErr) } @@ -5571,27 +5571,27 @@ func TestGCIncorrectRange(t *testing.T) { // should succeed even though it doesn't contain the key, because // the request for the incorrect key will be silently dropped. gKey := gcKey(key, ts1) - gcReq := gcArgs(rng1.Desc().StartKey, rng1.Desc().EndKey, gKey) - if _, pErr := client.SendWrappedWith(context.Background(), rng1, roachpb.Header{Timestamp: tc.Clock().Now()}, &gcReq); pErr != nil { + gcReq := gcArgs(repl1.Desc().StartKey, repl1.Desc().EndKey, gKey) + if _, pErr := client.SendWrappedWith(context.Background(), repl1, roachpb.Header{Timestamp: tc.Clock().Now()}, &gcReq); pErr != nil { t.Errorf("unexpected pError on garbage collection request to incorrect range: %s", pErr) } // Make sure the key still exists on range 2. getReq := getArgs(key) - if res, pErr := client.SendWrappedWith(context.Background(), rng2, ts1Header, &getReq); pErr != nil { + if res, pErr := client.SendWrappedWith(context.Background(), repl2, ts1Header, &getReq); pErr != nil { t.Errorf("unexpected pError on get request to correct range: %s", pErr) } else if resVal := res.(*roachpb.GetResponse).Value; resVal == nil { t.Errorf("expected value %s to exists after GC to incorrect range but before GC to correct range, found %v", val, resVal) } // Send GC request to range 2 for the same key. - gcReq = gcArgs(rng2.Desc().StartKey, rng2.Desc().EndKey, gKey) - if _, pErr := client.SendWrappedWith(context.Background(), rng2, roachpb.Header{Timestamp: tc.Clock().Now()}, &gcReq); pErr != nil { + gcReq = gcArgs(repl2.Desc().StartKey, repl2.Desc().EndKey, gKey) + if _, pErr := client.SendWrappedWith(context.Background(), repl2, roachpb.Header{Timestamp: tc.Clock().Now()}, &gcReq); pErr != nil { t.Errorf("unexpected pError on garbage collection request to correct range: %s", pErr) } // Make sure the key no longer exists on range 2. - if res, pErr := client.SendWrappedWith(context.Background(), rng2, ts1Header, &getReq); pErr != nil { + if res, pErr := client.SendWrappedWith(context.Background(), repl2, ts1Header, &getReq); pErr != nil { t.Errorf("unexpected pError on get request to correct range: %s", pErr) } else if resVal := res.(*roachpb.GetResponse).Value; resVal != nil { t.Errorf("expected value at key %s to no longer exist after GC to correct range, found value %v", key, resVal) @@ -5618,7 +5618,7 @@ func TestReplicaCancelRaft(t *testing.T) { // test panic as we still need the engine open (at least with proposer- // evaluated KV). tc.stopper.Quiesce() - _, pErr := tc.rng.addWriteCmd(context.Background(), ba) + _, pErr := tc.repl.addWriteCmd(context.Background(), ba) if _, ok := pErr.GetDetail().(*roachpb.AmbiguousResultError); !ok { t.Fatalf("expected an ambiguous result error; got %v", pErr) } @@ -5631,15 +5631,15 @@ func TestComputeChecksumVersioning(t *testing.T) { tc := testContext{} tc.Start(t) defer tc.Stop() - rng := tc.rng + repl := tc.repl - if _, pct, _ := rng.ComputeChecksum(context.TODO(), nil, nil, roachpb.Header{}, + if _, pct, _ := repl.ComputeChecksum(context.TODO(), nil, nil, roachpb.Header{}, roachpb.ComputeChecksumRequest{ChecksumID: uuid.MakeV4(), Version: replicaChecksumVersion}, ); pct.ComputeChecksum == nil { t.Error("right checksum version: expected post-commit trigger") } - if _, pct, _ := rng.ComputeChecksum(context.TODO(), nil, nil, roachpb.Header{}, + if _, pct, _ := repl.ComputeChecksum(context.TODO(), nil, nil, roachpb.Header{}, roachpb.ComputeChecksumRequest{ChecksumID: uuid.MakeV4(), Version: replicaChecksumVersion + 1}, ); pct.ComputeChecksum != nil { t.Errorf("wrong checksum version: expected no post-commit trigger: %s", pct.ComputeChecksum) @@ -5795,10 +5795,10 @@ func TestSyncSnapshot(t *testing.T) { // With enough time in BlockingSnapshotDuration, we succeed on the // first try. - tc.rng.mu.Lock() - snap, err := tc.rng.Snapshot() - tc.rng.mu.Unlock() - tc.rng.CloseOutSnap() + tc.repl.mu.Lock() + snap, err := tc.repl.Snapshot() + tc.repl.mu.Unlock() + tc.repl.CloseOutSnap() if err != nil { t.Fatal(err) @@ -5820,12 +5820,12 @@ func TestReplicaIDChangePending(t *testing.T) { cfg.RaftTickInterval = math.MaxInt32 tc.StartWithStoreConfig(t, cfg) defer tc.Stop() - rng := tc.rng + repl := tc.repl // Stop the command from being proposed to the raft group and being removed. - rng.mu.Lock() - rng.mu.submitProposalFn = func(p *ProposalData) error { return nil } - rng.mu.Unlock() + repl.mu.Lock() + repl.mu.submitProposalFn = func(p *ProposalData) error { return nil } + repl.mu.Unlock() // Add a command to the pending list. magicTS := tc.Clock().Now() @@ -5836,7 +5836,7 @@ func TestReplicaIDChangePending(t *testing.T) { Key: roachpb.Key("a"), }, }) - _, _, err := rng.propose(context.Background(), ba) + _, _, err := repl.propose(context.Background(), ba) if err != nil { t.Fatal(err) } @@ -5844,17 +5844,17 @@ func TestReplicaIDChangePending(t *testing.T) { // Set the raft command handler so we can tell if the command has been // re-proposed. commandProposed := make(chan struct{}, 1) - rng.mu.Lock() - rng.mu.submitProposalFn = func(p *ProposalData) error { + repl.mu.Lock() + repl.mu.submitProposalFn = func(p *ProposalData) error { if p.Cmd.Timestamp.Equal(magicTS) { commandProposed <- struct{}{} } return nil } - rng.mu.Unlock() + repl.mu.Unlock() // Set the ReplicaID on the replica. - if err := rng.setReplicaID(2); err != nil { + if err := repl.setReplicaID(2); err != nil { t.Fatal(err) } @@ -5874,16 +5874,16 @@ func TestReplicaRetryRaftProposal(t *testing.T) { var c int32 // updated atomically var wrongLeaseIndex uint64 // populated below - tc.rng.mu.Lock() - tc.rng.mu.submitProposalFn = func(cmd *ProposalData) error { + tc.repl.mu.Lock() + tc.repl.mu.submitProposalFn = func(cmd *ProposalData) error { if v := cmd.ctx.Value(magicKey{}); v != nil { if curAttempt := atomic.AddInt32(&c, 1); curAttempt == 1 { cmd.MaxLeaseIndex = wrongLeaseIndex } } - return defaultSubmitProposalLocked(tc.rng, cmd) + return defaultSubmitProposalLocked(tc.repl, cmd) } - tc.rng.mu.Unlock() + tc.repl.mu.Unlock() pArg := putArgs(roachpb.Key("a"), []byte("asd")) { @@ -5895,9 +5895,9 @@ func TestReplicaRetryRaftProposal(t *testing.T) { } } - tc.rng.mu.Lock() - ai := tc.rng.mu.state.LeaseAppliedIndex - tc.rng.mu.Unlock() + tc.repl.mu.Lock() + ai := tc.repl.mu.state.LeaseAppliedIndex + tc.repl.mu.Unlock() if ai < 1 { t.Fatal("committed a batch, but still at lease index zero") @@ -5913,7 +5913,7 @@ func TestReplicaRetryRaftProposal(t *testing.T) { iArg := incrementArgs(roachpb.Key("b"), expInc) ba.Add(&iArg) { - br, pErr, shouldRetry := tc.rng.tryAddWriteCmd( + br, pErr, shouldRetry := tc.repl.tryAddWriteCmd( context.WithValue(ctx, magicKey{}, "foo"), ba, ) @@ -5927,7 +5927,7 @@ func TestReplicaRetryRaftProposal(t *testing.T) { atomic.StoreInt32(&c, 0) { - br, pErr := tc.rng.addWriteCmd( + br, pErr := tc.repl.addWriteCmd( context.WithValue(ctx, magicKey{}, "foo"), ba, ) @@ -5952,15 +5952,15 @@ func TestReplicaCancelRaftCommandProgress(t *testing.T) { var tc testContext tc.Start(t) defer tc.Stop() - rng := tc.rng - repDesc, err := rng.GetReplicaDescriptor() + repl := tc.repl + repDesc, err := repl.GetReplicaDescriptor() if err != nil { t.Fatal(err) } const num = 10 - var chs []chan proposalResult // protected by rng.mu + var chs []chan proposalResult // protected by repl.mu func() { for i := 0; i < num; i++ { @@ -5968,27 +5968,27 @@ func TestReplicaCancelRaftCommandProgress(t *testing.T) { ba.Timestamp = tc.Clock().Now() ba.Add(&roachpb.PutRequest{Span: roachpb.Span{ Key: roachpb.Key(fmt.Sprintf("k%d", i))}}) - cmd, pErr := rng.evaluateProposal( + cmd, pErr := repl.evaluateProposal( context.Background(), propEvalKV, makeIDKey(), repDesc, ba, ) if pErr != nil { t.Fatal(pErr) } - rng.mu.Lock() - rng.insertProposalLocked(cmd) + repl.mu.Lock() + repl.insertProposalLocked(cmd) // We actually propose the command only if we don't // cancel it to simulate the case in which Raft loses // the command and it isn't reproposed due to the // client abandoning it. if rand.Intn(2) == 0 { log.Infof(context.Background(), "abandoning command %d", i) - delete(rng.mu.proposals, cmd.idKey) - } else if err := rng.submitProposalLocked(cmd); err != nil { + delete(repl.mu.proposals, cmd.idKey) + } else if err := repl.submitProposalLocked(cmd); err != nil { t.Error(err) } else { chs = append(chs, cmd.done) } - rng.mu.Unlock() + repl.mu.Unlock() } }() @@ -6011,7 +6011,7 @@ func TestReplicaBurstPendingCommandsAndRepropose(t *testing.T) { defer tc.Stop() const num = 10 - repDesc, err := tc.rng.GetReplicaDescriptor() + repDesc, err := tc.repl.GetReplicaDescriptor() if err != nil { t.Fatal(err) } @@ -6019,23 +6019,23 @@ func TestReplicaBurstPendingCommandsAndRepropose(t *testing.T) { type magicKey struct{} var seenCmds []int - tc.rng.mu.Lock() - tc.rng.mu.submitProposalFn = func(cmd *ProposalData) error { + tc.repl.mu.Lock() + tc.repl.mu.submitProposalFn = func(cmd *ProposalData) error { if v := cmd.ctx.Value(magicKey{}); v != nil { seenCmds = append(seenCmds, int(cmd.MaxLeaseIndex)) } - return defaultSubmitProposalLocked(tc.rng, cmd) + return defaultSubmitProposalLocked(tc.repl, cmd) } - tc.rng.mu.Unlock() + tc.repl.mu.Unlock() - if pErr := tc.rng.redirectOnOrAcquireLease(context.Background()); pErr != nil { + if pErr := tc.repl.redirectOnOrAcquireLease(context.Background()); pErr != nil { t.Fatal(pErr) } expIndexes := make([]int, 0, num) chs := func() []chan proposalResult { - tc.rng.mu.Lock() - defer tc.rng.mu.Unlock() + tc.repl.mu.Lock() + defer tc.repl.mu.Unlock() chs := make([]chan proposalResult, 0, num) origIndexes := make([]int, 0, num) @@ -6046,24 +6046,24 @@ func TestReplicaBurstPendingCommandsAndRepropose(t *testing.T) { ba.Timestamp = tc.Clock().Now() ba.Add(&roachpb.PutRequest{Span: roachpb.Span{ Key: roachpb.Key(fmt.Sprintf("k%d", i))}}) - cmd, pErr := tc.rng.evaluateProposal(ctx, propEvalKV, makeIDKey(), repDesc, ba) + cmd, pErr := tc.repl.evaluateProposal(ctx, propEvalKV, makeIDKey(), repDesc, ba) if pErr != nil { t.Fatal(pErr) } - tc.rng.mu.Lock() - tc.rng.insertProposalLocked(cmd) + tc.repl.mu.Lock() + tc.repl.insertProposalLocked(cmd) chs = append(chs, cmd.done) - tc.rng.mu.Unlock() + tc.repl.mu.Unlock() } - tc.rng.mu.Lock() - for _, p := range tc.rng.mu.proposals { + tc.repl.mu.Lock() + for _, p := range tc.repl.mu.proposals { if v := p.ctx.Value(magicKey{}); v != nil { origIndexes = append(origIndexes, int(p.MaxLeaseIndex)) } } - tc.rng.mu.Unlock() + tc.repl.mu.Unlock() sort.Ints(origIndexes) @@ -6071,7 +6071,7 @@ func TestReplicaBurstPendingCommandsAndRepropose(t *testing.T) { t.Fatalf("wanted required indexes %v, got %v", expIndexes, origIndexes) } - tc.rng.refreshProposalsLocked(0, reasonTicks) + tc.repl.refreshProposalsLocked(0, reasonTicks) return chs }() for _, ch := range chs { @@ -6084,15 +6084,15 @@ func TestReplicaBurstPendingCommandsAndRepropose(t *testing.T) { t.Fatalf("expected indexes %v, got %v", expIndexes, seenCmds) } - tc.rng.mu.Lock() - defer tc.rng.mu.Unlock() - nonePending := len(tc.rng.mu.proposals) == 0 - c := int(tc.rng.mu.lastAssignedLeaseIndex) - int(tc.rng.mu.state.LeaseAppliedIndex) + tc.repl.mu.Lock() + defer tc.repl.mu.Unlock() + nonePending := len(tc.repl.mu.proposals) == 0 + c := int(tc.repl.mu.lastAssignedLeaseIndex) - int(tc.repl.mu.state.LeaseAppliedIndex) if nonePending && c > 0 { t.Errorf("no pending cmds, but have required index offset %d", c) } if !nonePending { - t.Fatalf("still pending commands: %+v", tc.rng.mu.proposals) + t.Fatalf("still pending commands: %+v", tc.repl.mu.proposals) } } @@ -6108,7 +6108,7 @@ func TestReplicaRefreshPendingCommandsTicks(t *testing.T) { // Grab Replica.raftMu in order to block normal raft replica processing. This // test is ticking the replica manually and doesn't want the store to be // doing so concurrently. - r := tc.rng + r := tc.repl repDesc, err := r.GetReplicaDescriptor() if err != nil { @@ -6320,7 +6320,7 @@ func TestReplicaTimestampCacheBumpNotLost(t *testing.T) { minNewTS := func() hlc.Timestamp { var ba roachpb.BatchRequest - scan := scanArgs(key, tc.rng.Desc().EndKey.AsRawKey()) + scan := scanArgs(key, tc.repl.Desc().EndKey.AsRawKey()) ba.Add(&scan) resp, pErr := tc.Sender().Send(ctx, ba) @@ -6366,7 +6366,7 @@ func TestReplicaEvaluationNotTxnMutation(t *testing.T) { tc.Start(t) defer tc.Stop() - ctx := tc.rng.AnnotateCtx(context.TODO()) + ctx := tc.repl.AnnotateCtx(context.TODO()) key := keys.LocalMax txn := newTransaction("test", key, 1, enginepb.SERIALIZABLE, tc.Clock()) @@ -6383,7 +6383,7 @@ func TestReplicaEvaluationNotTxnMutation(t *testing.T) { ba.Add(&txnPut) ba.Add(&txnPut) - batch, _, _, _, pErr := tc.rng.executeWriteBatch(ctx, makeIDKey(), ba) + batch, _, _, _, pErr := tc.repl.executeWriteBatch(ctx, makeIDKey(), ba) defer batch.Close() if pErr != nil { t.Fatal(pErr) diff --git a/pkg/storage/scanner_test.go b/pkg/storage/scanner_test.go index 9d82774f4e89..2293314514e5 100644 --- a/pkg/storage/scanner_test.go +++ b/pkg/storage/scanner_test.go @@ -54,23 +54,23 @@ func newTestRangeSet(count int, t *testing.T) *testRangeSet { EndKey: roachpb.RKey(fmt.Sprintf("%03d", i+1)), } // Initialize the range stat so the scanner can use it. - rng := &Replica{ + repl := &Replica{ RangeID: desc.RangeID, } - rng.mu.TimedMutex = syncutil.MakeTimedMutex(defaultMuLogger) - rng.cmdQMu.TimedMutex = syncutil.MakeTimedMutex(defaultMuLogger) - rng.mu.state.Stats = enginepb.MVCCStats{ + repl.mu.TimedMutex = syncutil.MakeTimedMutex(defaultMuLogger) + repl.cmdQMu.TimedMutex = syncutil.MakeTimedMutex(defaultMuLogger) + repl.mu.state.Stats = enginepb.MVCCStats{ KeyBytes: 1, ValBytes: 2, KeyCount: 1, LiveCount: 1, } - if err := rng.setDesc(desc); err != nil { + if err := repl.setDesc(desc); err != nil { t.Fatal(err) } - if exRngItem := rs.replicasByKey.ReplaceOrInsert(rng); exRngItem != nil { - t.Fatalf("failed to insert range %s", rng) + if exRngItem := rs.replicasByKey.ReplaceOrInsert(repl); exRngItem != nil { + t.Fatalf("failed to insert range %s", repl) } } return rs @@ -103,11 +103,11 @@ func (rs *testRangeSet) remove(index int, t *testing.T) *Replica { endKey := roachpb.Key(fmt.Sprintf("%03d", index+1)) rs.Lock() defer rs.Unlock() - rng := rs.replicasByKey.Delete((rangeBTreeKey)(endKey)) - if rng == nil { + repl := rs.replicasByKey.Delete((rangeBTreeKey)(endKey)) + if repl == nil { t.Fatalf("failed to delete range of end key %s", endKey) } - return rng.(*Replica) + return repl.(*Replica) } // Test implementation of a range queue which adds range to an @@ -148,11 +148,11 @@ func (tq *testQueue) Start(clock *hlc.Clock, stopper *stop.Stopper) { }) } -func (tq *testQueue) MaybeAdd(rng *Replica, now hlc.Timestamp) { +func (tq *testQueue) MaybeAdd(repl *Replica, now hlc.Timestamp) { tq.Lock() defer tq.Unlock() - if index := tq.indexOf(rng.RangeID); index == -1 { - tq.ranges = append(tq.ranges, rng) + if index := tq.indexOf(repl.RangeID); index == -1 { + tq.ranges = append(tq.ranges, repl) } } diff --git a/pkg/storage/split_queue.go b/pkg/storage/split_queue.go index ab994d1d7320..2c00f74e1d21 100644 --- a/pkg/storage/split_queue.go +++ b/pkg/storage/split_queue.go @@ -68,9 +68,9 @@ func newSplitQueue(store *Store, db *client.DB, gossip *gossip.Gossip) *splitQue // splitting. This is true if the range is intersected by a zone config // prefix or if the range's size in bytes exceeds the limit for the zone. func (sq *splitQueue) shouldQueue( - ctx context.Context, now hlc.Timestamp, rng *Replica, sysCfg config.SystemConfig, + ctx context.Context, now hlc.Timestamp, repl *Replica, sysCfg config.SystemConfig, ) (shouldQ bool, priority float64) { - desc := rng.Desc() + desc := repl.Desc() if len(sysCfg.ComputeSplitKeys(desc.StartKey, desc.EndKey)) > 0 { // Set priority to 1 in the event the range is split by zone configs. priority = 1 @@ -85,7 +85,7 @@ func (sq *splitQueue) shouldQueue( return } - if ratio := float64(rng.GetMVCCStats().Total()) / float64(zone.RangeMaxBytes); ratio > 1 { + if ratio := float64(repl.GetMVCCStats().Total()) / float64(zone.RangeMaxBytes); ratio > 1 { priority += ratio shouldQ = true } diff --git a/pkg/storage/split_queue_test.go b/pkg/storage/split_queue_test.go index 97f33ac620d0..9aecd2158e2d 100644 --- a/pkg/storage/split_queue_test.go +++ b/pkg/storage/split_queue_test.go @@ -85,22 +85,22 @@ func TestSplitQueueShouldQueue(t *testing.T) { func() { // Hold lock throughout to reduce chance of random commands leading // to inconsistent state. - tc.rng.mu.Lock() - defer tc.rng.mu.Unlock() + tc.repl.mu.Lock() + defer tc.repl.mu.Unlock() ms := enginepb.MVCCStats{KeyBytes: test.bytes} - if err := setMVCCStats(context.Background(), tc.rng.store.Engine(), tc.rng.RangeID, ms); err != nil { + if err := setMVCCStats(context.Background(), tc.repl.store.Engine(), tc.repl.RangeID, ms); err != nil { t.Fatal(err) } - tc.rng.mu.state.Stats = ms + tc.repl.mu.state.Stats = ms }() - copy := *tc.rng.Desc() + copy := *tc.repl.Desc() copy.StartKey = test.start copy.EndKey = test.end - if err := tc.rng.setDesc(©); err != nil { + if err := tc.repl.setDesc(©); err != nil { t.Fatal(err) } - shouldQ, priority := splitQ.shouldQueue(context.TODO(), hlc.ZeroTimestamp, tc.rng, cfg) + shouldQ, priority := splitQ.shouldQueue(context.TODO(), hlc.ZeroTimestamp, tc.repl, cfg) if shouldQ != test.shouldQ { t.Errorf("%d: should queue expected %t; got %t", i, test.shouldQ, shouldQ) } diff --git a/pkg/storage/stats_test.go b/pkg/storage/stats_test.go index 80abdbb0fed8..a3a64e67ac20 100644 --- a/pkg/storage/stats_test.go +++ b/pkg/storage/stats_test.go @@ -44,7 +44,7 @@ func TestRangeStatsEmpty(t *testing.T) { tc.Start(t) defer tc.Stop() - ms := tc.rng.GetMVCCStats() + ms := tc.repl.GetMVCCStats() if exp := initialStats(); !reflect.DeepEqual(ms, exp) { t.Errorf("expected stats %+v; got %+v", exp, ms) } @@ -71,7 +71,7 @@ func TestRangeStatsInit(t *testing.T) { if err := engine.MVCCSetRangeStats(context.Background(), tc.engine, 1, &ms); err != nil { t.Fatal(err) } - loadMS, err := engine.MVCCGetRangeStats(context.Background(), tc.engine, tc.rng.RangeID) + loadMS, err := engine.MVCCGetRangeStats(context.Background(), tc.engine, tc.repl.RangeID) if err != nil { t.Fatal(err) } diff --git a/pkg/storage/store.go b/pkg/storage/store.go index c53572d29729..9d078d992773 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -1251,9 +1251,9 @@ func (s *Store) maybeGossipFirstRange(ctx context.Context) error { retryOptions := base.DefaultRetryOptions() retryOptions.Closer = s.stopper.ShouldStop() for loop := retry.Start(retryOptions); loop.Next(); { - rng := s.LookupReplica(roachpb.RKeyMin, nil) - if rng != nil { - pErr := rng.maybeGossipFirstRange(ctx) + repl := s.LookupReplica(roachpb.RKeyMin, nil) + if repl != nil { + pErr := repl.maybeGossipFirstRange(ctx) if nlErr, ok := pErr.GetDetail().(*roachpb.NotLeaseHolderError); !ok || nlErr.LeaseHolder != nil { return pErr.GoError() } @@ -1269,8 +1269,8 @@ func (s *Store) maybeGossipFirstRange(ctx context.Context) error { // Replica.maybeGossipSystemConfig and Replica.maybeGossipNodeLiveness. func (s *Store) maybeGossipSystemData(ctx context.Context) error { for _, span := range keys.GossipedSystemSpans { - rng := s.LookupReplica(roachpb.RKey(span.Key), nil) - if rng == nil { + repl := s.LookupReplica(roachpb.RKey(span.Key), nil) + if repl == nil { // This store has no range with this configuration. continue } @@ -1278,7 +1278,7 @@ func (s *Store) maybeGossipSystemData(ctx context.Context) error { // gossip. If an unexpected error occurs (i.e. nobody else seems to // have an active lease but we still failed to obtain it), return // that error. - if _, pErr := rng.getLeaseForGossip(ctx); pErr != nil { + if _, pErr := repl.getLeaseForGossip(ctx); pErr != nil { return pErr.GoError() } } @@ -1441,8 +1441,8 @@ func (s *Store) GetReplica(rangeID roachpb.RangeID) (*Replica, error) { // getReplicaLocked fetches a replica by RangeID. The store's lock must be held. func (s *Store) getReplicaLocked(rangeID roachpb.RangeID) (*Replica, error) { - if rng, ok := s.mu.replicas[rangeID]; ok { - return rng, nil + if repl, ok := s.mu.replicas[rangeID]; ok { + return repl, nil } return nil, roachpb.NewRangeNotFoundError(rangeID) } @@ -1456,15 +1456,15 @@ func (s *Store) LookupReplica(start, end roachpb.RKey) *Replica { s.mu.Lock() defer s.mu.Unlock() - var rng *Replica - s.visitReplicasLocked(start, roachpb.RKeyMax, func(repl *Replica) bool { - rng = repl + var repl *Replica + s.visitReplicasLocked(start, roachpb.RKeyMax, func(replIter *Replica) bool { + repl = replIter return false }) - if rng == nil || !rng.Desc().ContainsKeyRange(start, end) { + if repl == nil || !repl.Desc().ContainsKeyRange(start, end) { return nil } - return rng + return repl } // getOverlappingKeyRangeLocked returns a KeyRange from the Store overlapping the given @@ -1861,22 +1861,22 @@ func (s *Store) maybeMergeTimestampCaches( // replicasByKey btree. Returns an error if a replica with // the same Range ID or a KeyRange that overlaps has already been added to // this store. addReplicaInternalLocked requires that the store lock is held. -func (s *Store) addReplicaInternalLocked(rng *Replica) error { - if !rng.IsInitialized() { - return errors.Errorf("attempted to add uninitialized range %s", rng) +func (s *Store) addReplicaInternalLocked(repl *Replica) error { + if !repl.IsInitialized() { + return errors.Errorf("attempted to add uninitialized range %s", repl) } // TODO(spencer); will need to determine which range is // newer, and keep that one. - if err := s.addReplicaToRangeMapLocked(rng); err != nil { + if err := s.addReplicaToRangeMapLocked(repl); err != nil { return err } - if exRange := s.getOverlappingKeyRangeLocked(rng.Desc()); exRange != nil { - return errors.Errorf("%s: cannot addReplicaInternalLocked; range %s has overlapping range %s", s, rng, exRange.Desc()) + if exRange := s.getOverlappingKeyRangeLocked(repl.Desc()); exRange != nil { + return errors.Errorf("%s: cannot addReplicaInternalLocked; range %s has overlapping range %s", s, repl, exRange.Desc()) } - if exRngItem := s.mu.replicasByKey.ReplaceOrInsert(rng); exRngItem != nil { + if exRngItem := s.mu.replicasByKey.ReplaceOrInsert(repl); exRngItem != nil { return errors.Errorf("%s: cannot addReplicaInternalLocked; range for key %v already exists in replicasByKey btree", s, exRngItem.(KeyRange).endKey()) } @@ -1906,11 +1906,11 @@ func (s *Store) removePlaceholder(rngID roachpb.RangeID) bool { } func (s *Store) removePlaceholderLocked(rngID roachpb.RangeID) bool { - rng, ok := s.mu.replicaPlaceholders[rngID] + repl, ok := s.mu.replicaPlaceholders[rngID] if !ok { return false } - switch exRng := s.mu.replicasByKey.Delete(rng).(type) { + switch exRng := s.mu.replicasByKey.Delete(repl).(type) { case *ReplicaPlaceholder: delete(s.mu.replicaPlaceholders, rngID) return true @@ -1926,11 +1926,11 @@ func (s *Store) removePlaceholderLocked(rngID roachpb.RangeID) bool { // addReplicaToRangeMapLocked adds the replica to the replicas map. // addReplicaToRangeMapLocked requires that the store lock is held. -func (s *Store) addReplicaToRangeMapLocked(rng *Replica) error { - if _, ok := s.mu.replicas[rng.RangeID]; ok { - return errors.Errorf("%s: replica already exists", rng) +func (s *Store) addReplicaToRangeMapLocked(repl *Replica) error { + if _, ok := s.mu.replicas[repl.RangeID]; ok { + return errors.Errorf("%s: replica already exists", repl) } - s.mu.replicas[rng.RangeID] = rng + s.mu.replicas[repl.RangeID] = repl return nil } @@ -2026,20 +2026,20 @@ func (s *Store) removeReplicaImpl( // the updated descriptor. Since the latter update requires acquiring the store // lock (which cannot always safely be done by replicas), this function call // should be deferred until it is safe to acquire the store lock. -func (s *Store) processRangeDescriptorUpdate(rng *Replica) error { +func (s *Store) processRangeDescriptorUpdate(repl *Replica) error { s.mu.Lock() defer s.mu.Unlock() - return s.processRangeDescriptorUpdateLocked(rng) + return s.processRangeDescriptorUpdateLocked(repl) } // processRangeDescriptorUpdateLocked requires that Store.mu and Replica.raftMu // are locked. -func (s *Store) processRangeDescriptorUpdateLocked(rng *Replica) error { - if !rng.IsInitialized() { - return errors.Errorf("attempted to process uninitialized range %s", rng) +func (s *Store) processRangeDescriptorUpdateLocked(repl *Replica) error { + if !repl.IsInitialized() { + return errors.Errorf("attempted to process uninitialized range %s", repl) } - rangeID := rng.RangeID + rangeID := repl.RangeID if _, ok := s.mu.uninitReplicas[rangeID]; !ok { // Do nothing if the range has already been initialized. @@ -2047,10 +2047,10 @@ func (s *Store) processRangeDescriptorUpdateLocked(rng *Replica) error { } delete(s.mu.uninitReplicas, rangeID) - if exRange := s.getOverlappingKeyRangeLocked(rng.Desc()); exRange != nil { - return errors.Errorf("%s: cannot processRangeDescriptorUpdate; range %s has overlapping range %s", s, rng, exRange.Desc()) + if exRange := s.getOverlappingKeyRangeLocked(repl.Desc()); exRange != nil { + return errors.Errorf("%s: cannot processRangeDescriptorUpdate; range %s has overlapping range %s", s, repl, exRange.Desc()) } - if exRngItem := s.mu.replicasByKey.ReplaceOrInsert(rng); exRngItem != nil { + if exRngItem := s.mu.replicasByKey.ReplaceOrInsert(repl); exRngItem != nil { return errors.Errorf("range for key %v already exists in replicasByKey btree", (exRngItem.(*Replica)).endKey()) } @@ -2312,7 +2312,7 @@ func (s *Store) Send( } return r.Next() } - var rng *Replica + var repl *Replica // Add the command to the range for execution; exit retry loop on success. s.mu.Lock() @@ -2321,15 +2321,15 @@ func (s *Store) Send( for r := retry.Start(retryOpts); next(&r); { // Get range and add command to the range for execution. var err error - rng, err = s.GetReplica(ba.RangeID) + repl, err = s.GetReplica(ba.RangeID) if err != nil { pErr = roachpb.NewError(err) return nil, pErr } - if !rng.IsInitialized() { - rng.mu.Lock() - replicaID := rng.mu.replicaID - rng.mu.Unlock() + if !repl.IsInitialized() { + repl.mu.Lock() + replicaID := repl.mu.replicaID + repl.mu.Unlock() // If we have an uninitialized copy of the range, then we are // probably a valid member of the range, we're just in the @@ -2340,18 +2340,18 @@ func (s *Store) Send( // leader. return nil, roachpb.NewError(&roachpb.NotLeaseHolderError{ RangeID: ba.RangeID, - LeaseHolder: rng.creatingReplica, + LeaseHolder: repl.creatingReplica, // The replica doesn't have a range descriptor yet, so we have to build // a ReplicaDescriptor manually. Replica: roachpb.ReplicaDescriptor{ - NodeID: rng.store.nodeDesc.NodeID, - StoreID: rng.store.StoreID(), + NodeID: repl.store.nodeDesc.NodeID, + StoreID: repl.store.StoreID(), ReplicaID: replicaID, }, }) } - rng.assert5725(ba) - br, pErr = rng.Send(ctx, ba) + repl.assert5725(ba) + br, pErr = repl.Send(ctx, ba) if pErr == nil { return br, nil } @@ -2410,7 +2410,7 @@ func (s *Store) Send( } // Update the batch transaction, if applicable, in case it has // been independently pushed and has more recent information. - rng.assert5725(ba) + repl.assert5725(ba) if ba.Txn != nil { updatedTxn, pErr := s.maybeUpdateTransaction(ba.Txn, now) if pErr != nil { diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go index f40d1ae96c40..0e2d10c0d6ba 100644 --- a/pkg/storage/store_test.go +++ b/pkg/storage/store_test.go @@ -115,12 +115,12 @@ func (db *testSender) Send( if err != nil { return nil, roachpb.NewError(err) } - rng := db.store.LookupReplica(rs.Key, rs.EndKey) - if rng == nil { + repl := db.store.LookupReplica(rs.Key, rs.EndKey) + if repl == nil { return nil, roachpb.NewError(roachpb.NewRangeKeyMismatchError(rs.Key.AsRawKey(), rs.EndKey.AsRawKey(), nil)) } - ba.RangeID = rng.RangeID - repDesc, err := rng.GetReplicaDescriptor() + ba.RangeID = repl.RangeID + repDesc, err := repl.GetReplicaDescriptor() if err != nil { return nil, roachpb.NewError(err) } @@ -340,36 +340,36 @@ func TestStoreAddRemoveRanges(t *testing.T) { t.Error("expected GetRange to fail on missing range") } // Range 1 already exists. Make sure we can fetch it. - rng1, err := store.GetReplica(1) + repl1, err := store.GetReplica(1) if err != nil { t.Error(err) } // Remove range 1. - if err := store.RemoveReplica(rng1, *rng1.Desc(), true); err != nil { + if err := store.RemoveReplica(repl1, *repl1.Desc(), true); err != nil { t.Error(err) } // Create a new range (id=2). - rng2 := createReplica(store, 2, roachpb.RKey("a"), roachpb.RKey("b")) - if err := store.AddReplica(rng2); err != nil { + repl2 := createReplica(store, 2, roachpb.RKey("a"), roachpb.RKey("b")) + if err := store.AddReplica(repl2); err != nil { t.Fatal(err) } // Try to add the same range twice - err = store.AddReplica(rng2) + err = store.AddReplica(repl2) if err == nil { t.Fatal("expected error re-adding same range") } // Try to remove range 1 again. - if err := store.RemoveReplica(rng1, *rng1.Desc(), true); err == nil { + if err := store.RemoveReplica(repl1, *repl1.Desc(), true); err == nil { t.Fatal("expected error re-removing same range") } // Try to add a range with previously-used (but now removed) ID. - rng2Dup := createReplica(store, 1, roachpb.RKey("a"), roachpb.RKey("b")) - if err := store.AddReplica(rng2Dup); err == nil { + repl2Dup := createReplica(store, 1, roachpb.RKey("a"), roachpb.RKey("b")) + if err := store.AddReplica(repl2Dup); err == nil { t.Fatal("expected error inserting a duplicated range") } // Add another range with different key range and then test lookup. - rng3 := createReplica(store, 3, roachpb.RKey("c"), roachpb.RKey("d")) - if err := store.AddReplica(rng3); err != nil { + repl3 := createReplica(store, 3, roachpb.RKey("c"), roachpb.RKey("d")) + if err := store.AddReplica(repl3); err != nil { t.Fatal(err) } @@ -377,17 +377,17 @@ func TestStoreAddRemoveRanges(t *testing.T) { start, end roachpb.RKey expRng *Replica }{ - {roachpb.RKey("a"), roachpb.RKey("a\x00"), rng2}, - {roachpb.RKey("a"), roachpb.RKey("b"), rng2}, - {roachpb.RKey("a\xff\xff"), roachpb.RKey("b"), rng2}, - {roachpb.RKey("c"), roachpb.RKey("c\x00"), rng3}, - {roachpb.RKey("c"), roachpb.RKey("d"), rng3}, - {roachpb.RKey("c\xff\xff"), roachpb.RKey("d"), rng3}, + {roachpb.RKey("a"), roachpb.RKey("a\x00"), repl2}, + {roachpb.RKey("a"), roachpb.RKey("b"), repl2}, + {roachpb.RKey("a\xff\xff"), roachpb.RKey("b"), repl2}, + {roachpb.RKey("c"), roachpb.RKey("c\x00"), repl3}, + {roachpb.RKey("c"), roachpb.RKey("d"), repl3}, + {roachpb.RKey("c\xff\xff"), roachpb.RKey("d"), repl3}, {roachpb.RKey("x60\xff\xff"), roachpb.RKey("a"), nil}, {roachpb.RKey("x60\xff\xff"), roachpb.RKey("a\x00"), nil}, {roachpb.RKey("d"), roachpb.RKey("d"), nil}, {roachpb.RKey("c\xff\xff"), roachpb.RKey("d\x00"), nil}, - {roachpb.RKey("a"), nil, rng2}, + {roachpb.RKey("a"), nil, repl2}, {roachpb.RKey("d"), nil, nil}, } @@ -488,31 +488,31 @@ func TestStoreRemoveReplicaDestroy(t *testing.T) { store, _, stopper := createTestStore(t) defer stopper.Stop() - rng1, err := store.GetReplica(1) + repl1, err := store.GetReplica(1) if err != nil { t.Fatal(err) } - if err := store.RemoveReplica(rng1, *rng1.Desc(), true); err != nil { + if err := store.RemoveReplica(repl1, *repl1.Desc(), true); err != nil { t.Fatal(err) } // Verify that removal of a replica marks it as destroyed so that future raft // commands on the Replica will silently be dropped. - if err := rng1.withRaftGroup(func(r *raft.RawNode) (bool, error) { + if err := repl1.withRaftGroup(func(r *raft.RawNode) (bool, error) { return true, errors.Errorf("unexpectedly created a raft group") }); err != nil { t.Fatal(err) } - rng1.mu.Lock() - expErr := rng1.mu.destroyed - rng1.mu.Unlock() + repl1.mu.Lock() + expErr := repl1.mu.destroyed + repl1.mu.Unlock() if expErr == nil { t.Fatal("replica was not marked as destroyed") } - if _, _, err := rng1.propose( + if _, _, err := repl1.propose( context.Background(), roachpb.BatchRequest{}, ); err != expErr { t.Fatalf("expected error %s, but got %v", expErr, err) @@ -525,19 +525,19 @@ func TestStoreReplicaVisitor(t *testing.T) { defer stopper.Stop() // Remove range 1. - rng1, err := store.GetReplica(1) + repl1, err := store.GetReplica(1) if err != nil { t.Error(err) } - if err := store.RemoveReplica(rng1, *rng1.Desc(), true); err != nil { + if err := store.RemoveReplica(repl1, *repl1.Desc(), true); err != nil { t.Error(err) } // Add 10 new ranges. const newCount = 10 for i := 0; i < newCount; i++ { - rng := createReplica(store, roachpb.RangeID(i+1), roachpb.RKey(fmt.Sprintf("a%02d", i)), roachpb.RKey(fmt.Sprintf("a%02d", i+1))) - if err := store.AddReplica(rng); err != nil { + repl := createReplica(store, roachpb.RangeID(i+1), roachpb.RKey(fmt.Sprintf("a%02d", i)), roachpb.RKey(fmt.Sprintf("a%02d", i+1))) + if err := store.AddReplica(repl); err != nil { t.Fatal(err) } } @@ -555,13 +555,13 @@ func TestStoreReplicaVisitor(t *testing.T) { } i := 1 seen := make(map[roachpb.RangeID]struct{}) - ranges.Visit(func(rng *Replica) bool { - _, ok := seen[rng.RangeID] + ranges.Visit(func(repl *Replica) bool { + _, ok := seen[repl.RangeID] if ok { - t.Fatalf("already saw %d", rng.RangeID) + t.Fatalf("already saw %d", repl.RangeID) } - seen[rng.RangeID] = struct{}{} + seen[repl.RangeID] = struct{}{} if ec := ranges.EstimatedCount(); ec != 10-i { t.Fatalf( "expected %d remaining; got %d after seeing %+v", @@ -588,12 +588,12 @@ func TestHasOverlappingReplica(t *testing.T) { t.Error("expected GetRange to fail on missing range") } // Range 1 already exists. Make sure we can fetch it. - rng1, err := store.GetReplica(1) + repl1, err := store.GetReplica(1) if err != nil { t.Error(err) } // Remove range 1. - if err := store.RemoveReplica(rng1, *rng1.Desc(), true); err != nil { + if err := store.RemoveReplica(repl1, *repl1.Desc(), true); err != nil { t.Error(err) } @@ -608,8 +608,8 @@ func TestHasOverlappingReplica(t *testing.T) { } for _, desc := range rngDescs { - rng := createReplica(store, roachpb.RangeID(desc.id), desc.start, desc.end) - if err := store.AddReplica(rng); err != nil { + repl := createReplica(store, roachpb.RangeID(desc.id), desc.start, desc.end) + if err := store.AddReplica(repl); err != nil { t.Fatal(err) } } @@ -643,16 +643,16 @@ func TestProcessRangeDescriptorUpdate(t *testing.T) { defer stopper.Stop() // Clobber the existing range so we can test overlaps that aren't KeyMin or KeyMax. - rng1, err := store.GetReplica(1) + repl1, err := store.GetReplica(1) if err != nil { t.Error(err) } - if err := store.RemoveReplica(rng1, *rng1.Desc(), true); err != nil { + if err := store.RemoveReplica(repl1, *repl1.Desc(), true); err != nil { t.Error(err) } - rng := createReplica(store, roachpb.RangeID(2), roachpb.RKey("a"), roachpb.RKey("c")) - if err := store.AddReplica(rng); err != nil { + repl := createReplica(store, roachpb.RangeID(2), roachpb.RKey("a"), roachpb.RKey("c")) + if err := store.AddReplica(repl); err != nil { t.Fatal(err) } @@ -1019,11 +1019,11 @@ func TestStoreSendBadRange(t *testing.T) { // See #702 // TODO(bdarnell): convert tests that use this function to use AdminSplit instead. func splitTestRange(store *Store, key, splitKey roachpb.RKey, t *testing.T) *Replica { - rng := store.LookupReplica(key, nil) - if rng == nil { + repl := store.LookupReplica(key, nil) + if repl == nil { t.Fatalf("couldn't lookup range for key %q", key) } - desc, err := store.NewRangeDescriptor(splitKey, rng.Desc().EndKey, rng.Desc().Replicas) + desc, err := store.NewRangeDescriptor(splitKey, repl.Desc().EndKey, repl.Desc().Replicas) if err != nil { t.Fatal(err) } @@ -1039,7 +1039,7 @@ func splitTestRange(store *Store, key, splitKey roachpb.RKey, t *testing.T) *Rep if err != nil { t.Fatal(err) } - if err = store.SplitRange(rng, newRng); err != nil { + if err = store.SplitRange(repl, newRng); err != nil { t.Fatal(err) } return newRng @@ -1052,7 +1052,7 @@ func TestStoreSendOutOfRange(t *testing.T) { store, _, stopper := createTestStore(t) defer stopper.Stop() - rng2 := splitTestRange(store, roachpb.RKeyMin, roachpb.RKey(roachpb.Key("b")), t) + repl2 := splitTestRange(store, roachpb.RKeyMin, roachpb.RKey(roachpb.Key("b")), t) // Range 1 is from KeyMin to "b", so reading "b" from range 1 should // fail because it's just after the range boundary. @@ -1065,7 +1065,7 @@ func TestStoreSendOutOfRange(t *testing.T) { // fail because it's before the start of the range. args = getArgs([]byte("a")) if _, err := client.SendWrappedWith(context.Background(), store.testSender(), roachpb.Header{ - RangeID: rng2.RangeID, + RangeID: repl2.RangeID, }, &args); err == nil { t.Error("expected key to be out of range") } @@ -1144,7 +1144,7 @@ func TestStoreSetRangesMaxBytes(t *testing.T) { baseID := uint32(keys.MaxReservedDescID + 1) testData := []struct { - rng *Replica + repl *Replica expMaxBytes int64 }{ {store.LookupReplica(roachpb.RKeyMin, nil), @@ -1168,7 +1168,7 @@ func TestStoreSetRangesMaxBytes(t *testing.T) { util.SucceedsSoon(t, func() error { for _, test := range testData { - if mb := test.rng.GetMaxBytes(); mb != test.expMaxBytes { + if mb := test.repl.GetMaxBytes(); mb != test.expMaxBytes { return errors.Errorf("range max bytes values did not change to %d; got %d", test.expMaxBytes, mb) } } @@ -2008,16 +2008,16 @@ func TestMaybeRemove(t *testing.T) { } store.WaitForInit() - rng, err := store.GetReplica(1) + repl, err := store.GetReplica(1) if err != nil { t.Error(err) } - if err := store.RemoveReplica(rng, *rng.Desc(), true); err != nil { + if err := store.RemoveReplica(repl, *repl.Desc(), true); err != nil { t.Error(err) } // MaybeRemove is called. removedRng := <-fq.maybeRemovedRngs - if removedRng != rng.RangeID { + if removedRng != repl.RangeID { t.Errorf("Unexpected removed range %v", removedRng) } } @@ -2070,7 +2070,7 @@ func TestStoreChangeFrozen(t *testing.T) { b := tc.store.Engine().NewBatch() defer b.Close() var h roachpb.Header - if _, _, err := tc.rng.ChangeFrozen(context.Background(), b, nil, h, *fReqVersMismatch); err != nil { + if _, _, err := tc.repl.ChangeFrozen(context.Background(), b, nil, h, *fReqVersMismatch); err != nil { t.Fatal(err) } assertFrozen(no) // since we do not commit the above batch @@ -2231,11 +2231,11 @@ func TestStoreRangePlaceholders(t *testing.T) { } // Clobber the existing range so we can test non-overlapping placeholders. - rng1, err := s.GetReplica(1) + repl1, err := s.GetReplica(1) if err != nil { t.Error(err) } - if err := s.RemoveReplica(rng1, *rng1.Desc(), true); err != nil { + if err := s.RemoveReplica(repl1, *repl1.Desc(), true); err != nil { t.Error(err) } @@ -2322,17 +2322,17 @@ func TestStoreRemovePlaceholderOnError(t *testing.T) { ctx := context.Background() // Clobber the existing range so we can test nonoverlapping placeholders. - rng1, err := s.GetReplica(1) + repl1, err := s.GetReplica(1) if err != nil { t.Fatal(err) } - if err := s.RemoveReplica(rng1, *rng1.Desc(), true); err != nil { + if err := s.RemoveReplica(repl1, *repl1.Desc(), true); err != nil { t.Fatal(err) } // Generate a minimal fake snapshot. snapData := &roachpb.RaftSnapshotData{ - RangeDescriptor: *rng1.Desc(), + RangeDescriptor: *repl1.Desc(), } data, err := protoutil.Marshal(snapData) if err != nil { @@ -2365,7 +2365,7 @@ func TestStoreRemovePlaceholderOnError(t *testing.T) { if err := s.processRaftRequest(ctx, req, IncomingSnapshot{ SnapUUID: uuid.MakeV4(), - RangeDescriptor: *rng1.Desc(), + RangeDescriptor: *repl1.Desc(), }); !testutils.IsPError(err, expected) { t.Fatalf("expected %s, but found %v", expected, err) } @@ -2394,16 +2394,16 @@ func TestStoreRemovePlaceholderOnRaftIgnored(t *testing.T) { ctx := context.Background() // Clobber the existing range so we can test nonoverlapping placeholders. - rng1, err := s.GetReplica(1) + repl1, err := s.GetReplica(1) if err != nil { t.Fatal(err) } - if err := s.RemoveReplica(rng1, *rng1.Desc(), true); err != nil { + if err := s.RemoveReplica(repl1, *repl1.Desc(), true); err != nil { t.Fatal(err) } if _, err := writeInitialState( - ctx, s.Engine(), enginepb.MVCCStats{}, *rng1.Desc(), + ctx, s.Engine(), enginepb.MVCCStats{}, *repl1.Desc(), raftpb.HardState{}, &roachpb.Lease{}, ); err != nil { t.Fatal(err) @@ -2411,7 +2411,7 @@ func TestStoreRemovePlaceholderOnRaftIgnored(t *testing.T) { // Generate a minimal fake snapshot. snapData := &roachpb.RaftSnapshotData{ - RangeDescriptor: *rng1.Desc(), + RangeDescriptor: *repl1.Desc(), } data, err := protoutil.Marshal(snapData) if err != nil { @@ -2447,7 +2447,7 @@ func TestStoreRemovePlaceholderOnRaftIgnored(t *testing.T) { if err := s.processRaftRequest(ctx, req, IncomingSnapshot{ SnapUUID: uuid.MakeV4(), - RangeDescriptor: *rng1.Desc(), + RangeDescriptor: *repl1.Desc(), }); err != nil { t.Fatal(err) } diff --git a/pkg/storage/ts_maintenance_queue_test.go b/pkg/storage/ts_maintenance_queue_test.go index e088e28b5583..b01a4a6d4ca7 100644 --- a/pkg/storage/ts_maintenance_queue_test.go +++ b/pkg/storage/ts_maintenance_queue_test.go @@ -105,10 +105,10 @@ func TestTimeSeriesMaintenanceQueue(t *testing.T) { // Generate several splits. splitKeys := []roachpb.Key{roachpb.Key("c"), roachpb.Key("b"), roachpb.Key("a")} for _, k := range splitKeys { - rng := store.LookupReplica(roachpb.RKey(k), nil) + repl := store.LookupReplica(roachpb.RKey(k), nil) args := adminSplitArgs(k, k) if _, pErr := client.SendWrappedWith(context.Background(), store, roachpb.Header{ - RangeID: rng.RangeID, + RangeID: repl.RangeID, }, &args); pErr != nil { t.Fatal(pErr) }