diff --git a/pkg/storage/apply/task.go b/pkg/storage/apply/task.go index 8bfa62acd0a1..26e9ef9511dd 100644 --- a/pkg/storage/apply/task.go +++ b/pkg/storage/apply/task.go @@ -12,6 +12,7 @@ package apply import ( "context" + "errors" "go.etcd.io/etcd/raft/raftpb" ) @@ -54,6 +55,10 @@ type StateMachine interface { ApplySideEffects(CheckedCommand) (AppliedCommand, error) } +// ErrRemoved can be returned from ApplySideEffects which will stop the +// task from processing more commands and return immediately. +var ErrRemoved = errors.New("replica removed") + // Batch accumulates a series of updates from Commands and performs them // all at once to its StateMachine when applied. Groups of Commands will be // staged in the Batch such that one or more trivial Commands are staged or diff --git a/pkg/storage/client_merge_test.go b/pkg/storage/client_merge_test.go index 9917ada54c26..0b83e5a9126a 100644 --- a/pkg/storage/client_merge_test.go +++ b/pkg/storage/client_merge_test.go @@ -57,7 +57,6 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.etcd.io/etcd/raft" "go.etcd.io/etcd/raft/raftpb" ) @@ -1641,6 +1640,7 @@ func TestStoreReplicaGCAfterMerge(t *testing.T) { storeCfg.TestingKnobs.DisableReplicateQueue = true storeCfg.TestingKnobs.DisableReplicaGCQueue = true storeCfg.TestingKnobs.DisableMergeQueue = true + storeCfg.TestingKnobs.DisableEagerReplicaRemoval = true mtc := &multiTestContext{storeConfig: &storeCfg} mtc.Start(t, 2) defer mtc.Stop() @@ -2060,6 +2060,7 @@ func TestStoreRangeMergeAbandonedFollowers(t *testing.T) { storeCfg.TestingKnobs.DisableReplicaGCQueue = true storeCfg.TestingKnobs.DisableSplitQueue = true storeCfg.TestingKnobs.DisableMergeQueue = true + storeCfg.TestingKnobs.DisableEagerReplicaRemoval = true mtc := &multiTestContext{storeConfig: &storeCfg} mtc.Start(t, 3) defer mtc.Stop() @@ -2827,74 +2828,6 @@ func TestStoreRangeMergeSlowWatcher(t *testing.T) { } } -// unreliableRaftHandler drops all Raft messages that are addressed to the -// specified rangeID, but lets all other messages through. -type unreliableRaftHandler struct { - rangeID roachpb.RangeID - storage.RaftMessageHandler - // If non-nil, can return false to avoid dropping a msg to rangeID - dropReq func(*storage.RaftMessageRequest) bool - dropHB func(*storage.RaftHeartbeat) bool - dropResp func(*storage.RaftMessageResponse) bool -} - -func (h *unreliableRaftHandler) HandleRaftRequest( - ctx context.Context, - req *storage.RaftMessageRequest, - respStream storage.RaftMessageResponseStream, -) *roachpb.Error { - if len(req.Heartbeats)+len(req.HeartbeatResps) > 0 { - reqCpy := *req - req = &reqCpy - req.Heartbeats = h.filterHeartbeats(req.Heartbeats) - req.HeartbeatResps = h.filterHeartbeats(req.HeartbeatResps) - if len(req.Heartbeats)+len(req.HeartbeatResps) == 0 { - // Entirely filtered. - return nil - } - } else if req.RangeID == h.rangeID { - if h.dropReq == nil || h.dropReq(req) { - log.Infof( - ctx, - "dropping Raft message %s", - raft.DescribeMessage(req.Message, func([]byte) string { - return "" - }), - ) - - return nil - } - } - return h.RaftMessageHandler.HandleRaftRequest(ctx, req, respStream) -} - -func (h *unreliableRaftHandler) filterHeartbeats( - hbs []storage.RaftHeartbeat, -) []storage.RaftHeartbeat { - if len(hbs) == 0 { - return hbs - } - var cpy []storage.RaftHeartbeat - for i := range hbs { - hb := &hbs[i] - if hb.RangeID != h.rangeID || (h.dropHB != nil && !h.dropHB(hb)) { - cpy = append(cpy, *hb) - } - } - return cpy -} - -func (h *unreliableRaftHandler) HandleRaftResponse( - ctx context.Context, resp *storage.RaftMessageResponse, -) error { - if resp.RangeID == h.rangeID { - if h.dropResp == nil || h.dropResp(resp) { - return nil - } - } - return h.RaftMessageHandler.HandleRaftResponse(ctx, resp) -} - func TestStoreRangeMergeRaftSnapshot(t *testing.T) { defer leaktest.AfterTest(t)() @@ -3372,9 +3305,12 @@ func TestMergeQueue(t *testing.T) { t.Run("non-collocated", func(t *testing.T) { reset(t) verifyUnmerged(t) - mtc.replicateRange(rhs().RangeID, 1) - mtc.transferLease(ctx, rhs().RangeID, 0, 1) - mtc.unreplicateRange(rhs().RangeID, 0) + rhsRangeID := rhs().RangeID + mtc.replicateRange(rhsRangeID, 1) + mtc.transferLease(ctx, rhsRangeID, 0, 1) + mtc.unreplicateRange(rhsRangeID, 0) + require.NoError(t, mtc.waitForUnreplicated(rhsRangeID, 0)) + clearRange(t, lhsStartKey, rhsEndKey) store.MustForceMergeScanAndProcess() verifyMerged(t) diff --git a/pkg/storage/client_metrics_test.go b/pkg/storage/client_metrics_test.go index 4fd43fcbcc21..c91d386c9e81 100644 --- a/pkg/storage/client_metrics_test.go +++ b/pkg/storage/client_metrics_test.go @@ -26,6 +26,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/util/leaktest" "github.com/cockroachdb/cockroach/pkg/util/metric" "github.com/pkg/errors" + "github.com/stretchr/testify/require" ) func checkGauge(t *testing.T, id string, g *metric.Gauge, e int64) { @@ -313,8 +314,9 @@ func TestStoreMetrics(t *testing.T) { return mtc.unreplicateRangeNonFatal(replica.RangeID, 0) }) - // Force GC Scan on store 0 in order to fully remove range. - mtc.stores[1].MustForceReplicaGCScanAndProcess() + // Wait until we're sure that store 0 has successfully processed its removal. + require.NoError(t, mtc.waitForUnreplicated(replica.RangeID, 0)) + mtc.waitForValues(roachpb.Key("z"), []int64{0, 5, 5}) // Verify range count is as expected. diff --git a/pkg/storage/client_raft_helpers_test.go b/pkg/storage/client_raft_helpers_test.go new file mode 100644 index 000000000000..5d7c862dc520 --- /dev/null +++ b/pkg/storage/client_raft_helpers_test.go @@ -0,0 +1,191 @@ +// Copyright 2019 The Cockroach Authors. +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package storage_test + +import ( + "context" + "sync/atomic" + + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/storage" + "github.com/cockroachdb/cockroach/pkg/util/log" + "go.etcd.io/etcd/raft" +) + +// unreliableRaftHandler drops all Raft messages that are addressed to the +// specified rangeID, but lets all other messages through. +type unreliableRaftHandler struct { + rangeID roachpb.RangeID + storage.RaftMessageHandler + // If non-nil, can return false to avoid dropping a msg to rangeID + dropReq func(*storage.RaftMessageRequest) bool + dropHB func(*storage.RaftHeartbeat) bool + dropResp func(*storage.RaftMessageResponse) bool +} + +func (h *unreliableRaftHandler) HandleRaftRequest( + ctx context.Context, + req *storage.RaftMessageRequest, + respStream storage.RaftMessageResponseStream, +) *roachpb.Error { + if len(req.Heartbeats)+len(req.HeartbeatResps) > 0 { + reqCpy := *req + req = &reqCpy + req.Heartbeats = h.filterHeartbeats(req.Heartbeats) + req.HeartbeatResps = h.filterHeartbeats(req.HeartbeatResps) + if len(req.Heartbeats)+len(req.HeartbeatResps) == 0 { + // Entirely filtered. + return nil + } + } else if req.RangeID == h.rangeID { + if h.dropReq == nil || h.dropReq(req) { + log.Infof( + ctx, + "dropping Raft message %s", + raft.DescribeMessage(req.Message, func([]byte) string { + return "" + }), + ) + + return nil + } + } + return h.RaftMessageHandler.HandleRaftRequest(ctx, req, respStream) +} + +func (h *unreliableRaftHandler) filterHeartbeats( + hbs []storage.RaftHeartbeat, +) []storage.RaftHeartbeat { + if len(hbs) == 0 { + return hbs + } + var cpy []storage.RaftHeartbeat + for i := range hbs { + hb := &hbs[i] + if hb.RangeID != h.rangeID || (h.dropHB != nil && !h.dropHB(hb)) { + cpy = append(cpy, *hb) + } + } + return cpy +} + +func (h *unreliableRaftHandler) HandleRaftResponse( + ctx context.Context, resp *storage.RaftMessageResponse, +) error { + if resp.RangeID == h.rangeID { + if h.dropResp == nil || h.dropResp(resp) { + return nil + } + } + return h.RaftMessageHandler.HandleRaftResponse(ctx, resp) +} + +// mtcStoreRaftMessageHandler exists to allows a store to be stopped and +// restarted while maintaining a partition using an unreliableRaftHandler. +type mtcStoreRaftMessageHandler struct { + mtc *multiTestContext + storeIdx int +} + +func (h *mtcStoreRaftMessageHandler) HandleRaftRequest( + ctx context.Context, + req *storage.RaftMessageRequest, + respStream storage.RaftMessageResponseStream, +) *roachpb.Error { + return h.mtc.Store(h.storeIdx).HandleRaftRequest(ctx, req, respStream) +} + +func (h *mtcStoreRaftMessageHandler) HandleRaftResponse( + ctx context.Context, resp *storage.RaftMessageResponse, +) error { + return h.mtc.Store(h.storeIdx).HandleRaftResponse(ctx, resp) +} + +func (h *mtcStoreRaftMessageHandler) HandleSnapshot( + header *storage.SnapshotRequest_Header, respStream storage.SnapshotResponseStream, +) error { + return h.mtc.Store(h.storeIdx).HandleSnapshot(header, respStream) +} + +// mtcPartitionedRange is a convenient abstraction to create a range on a node +// in a multiTestContext which can be partitioned and unpartitioned. +type mtcPartitionedRange struct { + rangeID roachpb.RangeID + partitionedNode int + partitioned atomic.Value +} + +// setupPartitionedRange sets up an mtcPartitionedRange for the provided mtc, +// rangeID, and node index in the mtc. The range is initially not partitioned. +// +// We're going to set up the cluster with partitioning so that we can +// partition node p from the others. We do this by installing +// unreliableRaftHandler listeners on all three Stores which we can enable +// and disable with an atomic. The handler on the partitioned store filters +// out all messages while the handler on the other two stores only filters +// out messages from the partitioned store. When activated the configuration +// looks like: +// +// [p] +// x x +// / \ +// x x +// [*]<---->[*] +// +// The activated argument controls whether the partition is activated when this +// function returns. +func setupPartitionedRange( + mtc *multiTestContext, rangeID roachpb.RangeID, partitionedNode int, activated bool, +) (*mtcPartitionedRange, error) { + partRange := &mtcPartitionedRange{ + rangeID: rangeID, + partitionedNode: partitionedNode, + } + partRange.partitioned.Store(activated) + partRepl, err := mtc.stores[partitionedNode].GetReplica(rangeID) + if err != nil { + return nil, err + } + partReplDesc, err := partRepl.GetReplicaDescriptor() + if err != nil { + return nil, err + } + for i := range mtc.stores { + s := i + h := &unreliableRaftHandler{ + rangeID: rangeID, + RaftMessageHandler: &mtcStoreRaftMessageHandler{ + mtc: mtc, + storeIdx: s, + }, + } + // Only filter messages from the partitioned store on the other + // two stores. + h.dropReq = func(req *storage.RaftMessageRequest) bool { + return partRange.partitioned.Load().(bool) && + (s == partitionedNode || req.FromReplica.StoreID == partRepl.StoreID()) + } + h.dropHB = func(hb *storage.RaftHeartbeat) bool { + return partRange.partitioned.Load().(bool) && + (s == partitionedNode || hb.FromReplicaID == partReplDesc.ReplicaID) + } + mtc.transport.Listen(mtc.stores[s].Ident.StoreID, h) + } + return partRange, nil +} + +func (pr *mtcPartitionedRange) activate() { + pr.partitioned.Store(true) +} + +func (pr *mtcPartitionedRange) deactivate() { + pr.partitioned.Store(false) +} diff --git a/pkg/storage/client_raft_test.go b/pkg/storage/client_raft_test.go index 12335fcc96ea..ff83cce66189 100644 --- a/pkg/storage/client_raft_test.go +++ b/pkg/storage/client_raft_test.go @@ -33,6 +33,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/storage/engine" + "github.com/cockroachdb/cockroach/pkg/storage/stateloader" "github.com/cockroachdb/cockroach/pkg/storage/storagebase" "github.com/cockroachdb/cockroach/pkg/storage/storagepb" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -1192,15 +1193,12 @@ func TestReplicateAfterRemoveAndSplit(t *testing.T) { return err } - if err := replicateRHS(); !testutils.IsError(err, storage.IntersectingSnapshotMsg) { - t.Fatalf("unexpected error %v", err) - } - - // Enable the replica GC queue so that the next attempt to replicate the RHS - // to store 2 will cause the obsolete replica to be GC'd allowing a - // subsequent replication to succeed. - mtc.stores[2].SetReplicaGCQueueActive(true) - + // This used to fail with IntersectingSnapshotMsg because we relied on replica + // GC to remove the LHS and that queue is disabled. Now we will detect that + // the LHS is not part of the range because of a ReplicaTooOldError and then + // we'll replicaGC the LHS in response. + // TODO(ajwerner): filter the reponses to node 2 or disable this eager + // replicaGC. testutils.SucceedsSoon(t, replicateRHS) } @@ -3000,6 +2998,10 @@ func TestReplicateRogueRemovedNode(t *testing.T) { defer mtc.Stop() mtc.Start(t, 3) + // We're going to set up the cluster with partitioning so that we can + // partition node 0 from the others. The partition is not initially active. + partRange, err := setupPartitionedRange(mtc, 1, 0, false /* activated */) + require.NoError(t, err) // First put the range on all three nodes. raftID := roachpb.RangeID(1) mtc.replicateRange(raftID, 1, 2) @@ -3044,7 +3046,9 @@ func TestReplicateRogueRemovedNode(t *testing.T) { } return nil }) - + // Partition nodes 1 and 2 from node 0. Otherwise they'd get a + // ReplicaTooOldError from node 0 and proceed to remove themselves. + partRange.activate() // Bring node 2 back up. mtc.restartStore(2) @@ -3547,6 +3551,7 @@ func TestRemoveRangeWithoutGC(t *testing.T) { sc := storage.TestStoreConfig(nil) sc.TestingKnobs.DisableReplicaGCQueue = true + sc.TestingKnobs.DisableEagerReplicaRemoval = true mtc := &multiTestContext{storeConfig: &sc} defer mtc.Stop() mtc.Start(t, 2) @@ -3597,12 +3602,21 @@ func TestRemoveRangeWithoutGC(t *testing.T) { mtc.advanceClock(context.TODO()) mtc.manualClock.Increment(int64(storage.ReplicaGCQueueInactivityThreshold + 1)) mtc.stores[0].SetReplicaGCQueueActive(true) - mtc.stores[0].MustForceReplicaGCScanAndProcess() + // There's a fun flake where between when the queue detects that this replica + // needs to be removed and when it actually gets processed that an older + // replica will send this replica a raft message which will give it an ID. + // When our replica ID changes the queue will ignore the previous addition and + // we won't be removed. + testutils.SucceedsSoon(t, func() error { + mtc.stores[0].MustForceReplicaGCScanAndProcess() - // The Replica object should be removed. - if _, err := mtc.stores[0].GetReplica(rangeID); !testutils.IsError(err, "r[0-9]+ was not found") { - t.Fatalf("expected replica to be missing; got %v", err) - } + // The Replica object should be removed. + const msg = "r[0-9]+ was not found" + if _, err := mtc.stores[0].GetReplica(rangeID); !testutils.IsError(err, msg) { + return errors.Errorf("expected %s, got %v", msg, err) + } + return nil + }) // And the data should no longer be on disk. if ok, err := engine.MVCCGetProto(context.Background(), mtc.stores[0].Engine(), descKey, @@ -4595,3 +4609,196 @@ func TestAckWriteBeforeApplication(t *testing.T) { }) } } + +// TestProcessSplitAfterRightHandSideHasBeenRemoved tests cases where we have +// a follower replica which has received information about the RHS of a split +// before it has processed that split. The replica can't both have an +// initialized RHS and process the split but it can have (1) an uninitialized +// RHS with a higher replica ID than lives in the split and (2) a RHS with +// an unknown replica ID and a tombstone at exactly the replica ID of the RHS. +// +// Starting in 19.2 if a replica discovers from a raft message that it is an +// old replica then it knows that it has been removed and re-added to the range. +// In this case the Replica eagerly destroys itself and its data. +// +// Given this behavior there are 2 troubling cases with regards to splits. +// +// * In all cases we begin with s1 processing a presplit snapshot for +// r20. After the split the store should have r21/3. +// +// * s1 receives a message for r21/3 prior to acquiring the split lock +// in r21. This will create an uninitialized r21/3 which may write +// HardState. +// +// * Before the r20 processes the split r21 is removed and re-added to +// s1 as r21/4. s1 receives a raft message destined for r21/4 and proceeds +// to destroy its uninitialized r21/3, laying down a tombstone at 4 in the +// process. +// +// (1) s1 processes the split and finds the RHS to be an uninitialized replica +// with a higher replica ID. +// +// (2) s1 crashes before processing the split, forgetting the replica ID of the +// RHS. +// +// In both cases we know that the RHS could not have committed anything because +// it cannot have gotten a snapshot but we want to be sure to not synthesize a +// HardState for the RHS that contains a non-zero commit index if we know that +// the RHS will need another snapshot later. +// +func TestProcessSplitAfterRightHandSideHasBeenRemoved(t *testing.T) { + defer leaktest.AfterTest(t)() + sc := storage.TestStoreConfig(nil) + // Newly-started stores (including the "rogue" one) should not GC + // their replicas. We'll turn this back on when needed. + sc.TestingKnobs.DisableReplicaGCQueue = true + sc.RaftDelaySplitToSuppressSnapshotTicks = 0 + // Make the tick interval short so we don't need to wait too long for the + // partitioned leader to time out. Also make the + // RangeLeaseRaftElectionTimeout multiplier high so that system ranges + // like node liveness can actually get leases. + sc.RaftTickInterval = 10 * time.Millisecond + sc.RangeLeaseRaftElectionTimeoutMultiplier = 1000 + + testutils.RunTrueAndFalse(t, "restart store", + func(t *testing.T, restartStore bool) { + mtc := &multiTestContext{ + storeConfig: &sc, + } + defer mtc.Stop() + mtc.Start(t, 3) + + ctx := context.Background() + db := mtc.Store(1).DB() + + increment := func(key roachpb.Key, by int64) { + b := &client.Batch{} + b.AddRawRequest(incrementArgs(key, by)) + require.NoError(t, db.Run(ctx, b)) + } + changeReplicas := func(typ roachpb.ReplicaChangeType, key roachpb.Key, idx int) error { + ri, err := getRangeInfo(ctx, db, key) + require.NoError(t, err) + _, err = db.AdminChangeReplicas(ctx, ri.Desc.StartKey.AsRawKey(), ri.Desc, + roachpb.MakeReplicationChanges(typ, makeReplicationTargets(idx+1)...)) + return err + } + split := func(key roachpb.Key) { + b := &client.Batch{} + b.AddRawRequest(adminSplitArgs(key)) + require.NoError(t, db.Run(ctx, b)) + } + + // Split off a non-system range so we don't have to account for node liveness + // traffic. + scratchTableKey := keys.MakeTablePrefix(math.MaxUint32) + // Put some data in the range so we'll have something to test for. + keyA := append(append(roachpb.Key{}, scratchTableKey...), 'a') + keyB := append(append(roachpb.Key{}, scratchTableKey...), 'b') + + split(scratchTableKey) + ri, err := getRangeInfo(ctx, db, scratchTableKey) + require.Nil(t, err) + rangeID := ri.Desc.RangeID + // First put the range on all three nodes. + mtc.replicateRange(rangeID, 1, 2) + + // Set up a partition for the LHS range only. Initially it is not active. + partRange, err := setupPartitionedRange(mtc, rangeID, 0, false /* activated */) + require.NoError(t, err) + + // Wait for all nodes to catch up. + increment(keyA, 5) + mtc.waitForValues(keyA, []int64{5, 5, 5}) + + // Transfer the lease off of node 0. + mtc.transferLease(ctx, rangeID, 0, 2) + + // Make sure everybody knows about that transfer. + increment(keyA, 1) + mtc.waitForValues(keyA, []int64{6, 6, 6}) + + // Now we partition a replica's LHS, perform a split, remove the RHS, add it + // back and make sure after the LHS the RHS has not been initialized. + // Then we'll test both killing the store and not killing the store before + // clearing the partition and allowing the LHS to process the split. + partRange.activate() + + increment(keyA, 1) + mtc.waitForValues(keyA, []int64{6, 7, 7}) + + split(keyB) + + // Write a value which we can observe to know when the split has been + // applied by the LHS. + increment(keyA, 1) + mtc.waitForValues(keyA, []int64{6, 8, 8}) + + increment(keyB, 6) + // Wait for all non-partitioned nodes to catch up. + mtc.waitForValues(keyB, []int64{0, 6, 6}) + + rhsInfo, err := getRangeInfo(ctx, db, keyB) + require.NoError(t, err) + rhsID := rhsInfo.Desc.RangeID + _, store0Exists := rhsInfo.Desc.GetReplicaDescriptor(1) + require.True(t, store0Exists) + + // Remove and re-add the RHS to create a new uninitialized replica at + // a higher replica ID. This will lead to a tombstone being written. + require.NoError(t, changeReplicas(roachpb.REMOVE_REPLICA, keyB, 0)) + // Unsuccessfuly because the RHS will not accept the learner snapshot + // and will be rolled back. Nevertheless it will have learned that it + // has been removed at the old replica ID. + require.True(t, + testutils.IsError(changeReplicas(roachpb.ADD_REPLICA, keyB, 0), + "snapshot failed.*cannot apply snapshot: snapshot intersects")) + + // Read the HardState of the RHS to ensure that we've written a tombstone + // with our old replica ID. + var tombstone roachpb.RaftTombstone + testutils.SucceedsSoon(t, func() error { + tombstoneKey := keys.RaftTombstoneKey(rhsID) + ok, err := engine.MVCCGetProto( + ctx, mtc.Store(0).Engine(), tombstoneKey, hlc.Timestamp{}, &tombstone, engine.MVCCGetOptions{}, + ) + assert.NoError(t, err) + if !ok { + return fmt.Errorf("tombstone not found for range %d", rhsID) + } + return nil + }) + + if restartStore { + mtc.stopStore(0) + mtc.restartStore(0) + } + + // Verify that the commit index on the RHS is zero because we haven't yet + // applied the split. + hs, err := stateloader.Make(rhsID).LoadHardState(ctx, mtc.Store(0).Engine()) + require.NoError(t, err) + require.Equal(t, uint64(0), hs.Commit) + + // Remove the partition and demonstrate that the LHS catches up + // past the split (8 was written after the split). + partRange.deactivate() + mtc.waitForValues(keyA, []int64{8, 8, 8}) + + // Verify that the commit index on the RHS is still zero (was not + // initialized by the split). + hs, err = stateloader.Make(rhsID).LoadHardState(ctx, mtc.Store(0).Engine()) + require.NoError(t, err) + require.Equal(t, uint64(0), hs.Commit) + + // The RHS should be able to be added successfully now. + // This needs a SucceedsSoon because of a rare race between sending a + // learner snapshot and sending a raft snapshot whereby the raft snapshot + // is sent first (we block raft snapshots from being sent while sending + // the learner snapshot but not the other way around). + testutils.SucceedsSoon(t, func() error { + return changeReplicas(roachpb.ADD_REPLICA, keyB, 0) + }) + mtc.waitForValues(keyB, []int64{6, 6, 6}) + }) +} diff --git a/pkg/storage/client_replica_gc_test.go b/pkg/storage/client_replica_gc_test.go index 7b8e41cf1b36..4162ffca8d85 100644 --- a/pkg/storage/client_replica_gc_test.go +++ b/pkg/storage/client_replica_gc_test.go @@ -148,8 +148,11 @@ func TestReplicaGCQueueDropReplicaDirect(t *testing.T) { // removes a range from a store that no longer should have a replica. func TestReplicaGCQueueDropReplicaGCOnScan(t *testing.T) { defer leaktest.AfterTest(t)() - mtc := &multiTestContext{} + cfg := storage.TestStoreConfig(nil) + cfg.TestingKnobs.DisableEagerReplicaRemoval = true + mtc.storeConfig = &cfg + defer mtc.Stop() mtc.Start(t, 3) // Disable the replica gc queue to prevent direct removal of replica. diff --git a/pkg/storage/client_split_test.go b/pkg/storage/client_split_test.go index 6db3d819b5f9..f3cb67ba9fc8 100644 --- a/pkg/storage/client_split_test.go +++ b/pkg/storage/client_split_test.go @@ -3314,8 +3314,12 @@ func TestSplitTriggerMeetsUnexpectedReplicaID(t *testing.T) { // different replicaID than the split trigger expects. add := func() { _, err := tc.AddReplicas(kRHS, tc.Target(1)) - if !testutils.IsError(err, `snapshot intersects existing range`) { - t.Fatalf(`expected snapshot intersects existing range" error got: %+v`, err) + // The "snapshot intersects existing range" error is expected if the store + // has not heard a raft message addressed to a later replica ID while the + // "was not found on" error is expected if the store has heard that it has + // a newer replica ID before receiving the snapshot. + if !testutils.IsError(err, `snapshot intersects existing range|r[0-9]+ was not found on s[0-9]+`) { + t.Fatalf(`expected snapshot intersects existing range|r[0-9]+ was not found on s[0-9]+" error got: %+v`, err) } } for i := 0; i < 5; i++ { @@ -3361,7 +3365,8 @@ func TestSplitTriggerMeetsUnexpectedReplicaID(t *testing.T) { if err != nil { return err } - if desc := repl.Desc(); !descLHS.Equal(desc) { + if desc := repl.Desc(); desc.IsInitialized() && !descLHS.Equal(desc) { + require.NoError(t, store.ManualReplicaGC(repl)) return errors.Errorf("expected %s got %s", &descLHS, desc) } return nil diff --git a/pkg/storage/client_test.go b/pkg/storage/client_test.go index 11c57695ff08..7cd9b61a4746 100644 --- a/pkg/storage/client_test.go +++ b/pkg/storage/client_test.go @@ -1061,8 +1061,8 @@ func (m *multiTestContext) restartStore(i int) { } func (m *multiTestContext) Store(i int) *storage.Store { - m.mu.Lock() - defer m.mu.Unlock() + m.mu.RLock() + defer m.mu.RUnlock() return m.stores[i] } @@ -1243,6 +1243,23 @@ func (m *multiTestContext) unreplicateRangeNonFatal(rangeID roachpb.RangeID, des return err } +// waitForUnreplicated waits until no replica exists for the specified range +// on the dest store. +func (m *multiTestContext) waitForUnreplicated(rangeID roachpb.RangeID, dest int) error { + // Wait for the unreplications to complete on destination node. + return retry.ForDuration(testutils.DefaultSucceedsSoonDuration, func() error { + _, err := m.stores[dest].GetReplica(rangeID) + switch err.(type) { + case nil: + return fmt.Errorf("replica still exists on dest %d", dest) + case *roachpb.RangeNotFoundError: + return nil + default: + return err + } + }) +} + // readIntFromEngines reads the current integer value at the given key // from all configured engines, filling in zeros when the value is not // found. Returns a slice of the same length as mtc.engines. diff --git a/pkg/storage/replica.go b/pkg/storage/replica.go index 095a00256bd9..13f928c30c0f 100644 --- a/pkg/storage/replica.go +++ b/pkg/storage/replica.go @@ -242,6 +242,7 @@ type Replica struct { syncutil.RWMutex // The destroyed status of a replica indicating if it's alive, corrupt, // scheduled for destruction or has been GCed. + // destroyStatus should only be set while also holding the raftMu. destroyStatus // Is the range quiescent? Quiescent ranges are not Tick()'d and unquiesce // whenever a Raft operation is performed. @@ -345,7 +346,8 @@ type Replica struct { replicaID roachpb.ReplicaID // The minimum allowed ID for this replica. Initialized from // RaftTombstone.NextReplicaID. - minReplicaID roachpb.ReplicaID + tombstoneReplicaID roachpb.ReplicaID + // The ID of the leader replica within the Raft group. Used to determine // when the leadership changes. leaderID roachpb.ReplicaID @@ -610,13 +612,26 @@ func (r *Replica) String() string { return fmt.Sprintf("[n%d,s%d,r%s]", r.store.Ident.NodeID, r.store.Ident.StoreID, &r.rangeStr) } -// ReplicaID returns the ID for the Replica. +// ReplicaID returns the ID for the Replica. It may be zero if the replica does +// not know its ID. Once a Replica has a non-zero ReplicaID it will never change. func (r *Replica) ReplicaID() roachpb.ReplicaID { r.mu.RLock() defer r.mu.RUnlock() return r.mu.replicaID } +// minReplicaID returns the minimum replica ID this replica could ever possibly +// have. If this replica currently knows its replica ID (i.e. ReplicaID() is +// non-zero) then it returns it. Otherwise it returns r.mu.tombstoneReplicaID. +func (r *Replica) minReplicaID() roachpb.ReplicaID { + r.mu.RLock() + defer r.mu.RUnlock() + if r.mu.replicaID != 0 { + return r.mu.replicaID + } + return r.mu.tombstoneReplicaID +} + // cleanupFailedProposal cleans up after a proposal that has failed. It // clears any references to the proposal and releases associated quota. // It requires that both Replica.mu and Replica.raftMu are exclusively held. @@ -1043,6 +1058,39 @@ func (r *Replica) requestCanProceed(rspan roachpb.RSpan, ts hlc.Timestamp) error return mismatchErr } +// isNewerThanSplit is a helper used in split(Pre|Post)Apply to +// determine whether the Replica on the right hand side of the split must +// have been removed from this store after the split. +// +// TODO(ajwerner): Ideally if this store had ever learned that the split +// replica were removed then it would always remember that. In the case +// that the first raft message this store ever receives for the this +// replica is at a higher replica ID than the replica ID for this store in the +// split that creates the range and then crashes. We will not create a tombstone +// when creating an uninitialized replica and so will have no durable record +// of being that replica. +func (r *Replica) isNewerThanSplit(split *roachpb.SplitTrigger) bool { + r.mu.RLock() + defer r.mu.RUnlock() + return r.isNewerThanSplitRLocked(split) +} + +func (r *Replica) isNewerThanSplitRLocked(split *roachpb.SplitTrigger) bool { + rightDesc, hasRightDesc := split.RightDesc.GetReplicaDescriptor(r.StoreID()) + // If we have written a tombstone for this range then we know that the RHS + // must have already been removed at the split replica ID. + return r.mu.tombstoneReplicaID != 0 || + // If the first raft message we received for the RHS range was for a replica + // ID is above the replica ID of the split then we would not have written a + // tombstone but we will have a replica ID that will exceed the split + // replica ID. + (r.mu.replicaID > rightDesc.ReplicaID && + // If we're catching up from a preemptive snapshot we won't be in the split. + // and we won't know whether our current replica ID indicates we've been + // removed. + hasRightDesc) +} + // checkBatchRequest verifies BatchRequest validity requirements. In particular, // the batch must have an assigned timestamp, and either all requests must be // read-only, or none. diff --git a/pkg/storage/replica_application_result.go b/pkg/storage/replica_application_result.go index aa3a0116a841..fbb203362526 100644 --- a/pkg/storage/replica_application_result.go +++ b/pkg/storage/replica_application_result.go @@ -293,22 +293,40 @@ func (r *Replica) handleComputeChecksumResult(ctx context.Context, cc *storagepb r.computeChecksumPostApply(ctx, *cc) } -func (r *Replica) handleChangeReplicasResult(ctx context.Context, chng *storagepb.ChangeReplicas) { - storeID := r.store.StoreID() - var found bool - for _, rDesc := range chng.Replicas() { - if rDesc.StoreID == storeID { - found = true - break - } +func (r *Replica) handleChangeReplicasResult( + ctx context.Context, chng *storagepb.ChangeReplicas, +) (changeRemovedReplica bool) { + // If this command removes us then we would have set the destroy status + // to destroyReasonRemovalPending which we detect here. + if ds, _ := r.IsDestroyed(); ds != destroyReasonRemovalPending { + return false // changeRemovedReplica + } + + // If this command removes us then we need to go through the process of + // removing our replica from the store. After this method returns, the code + // should roughly return all the way up to whoever called handleRaftReady + // and this Replica should never be heard from again. We can detect if this + // change removed us by inspecting the replica's destroyStatus. We check the + // destroy status before processing a raft ready so if we find ourselves with + // removal pending at this point then we know that this command must be + // responsible. + if log.V(1) { + log.Infof(ctx, "removing replica due to ChangeReplicasTrigger: %v", chng) } - if !found { - // This wants to run as late as possible, maximizing the chances - // that the other nodes have finished this command as well (since - // processing the removal from the queue looks up the Range at the - // lease holder, being too early here turns this into a no-op). - r.store.replicaGCQueue.AddAsync(ctx, r, replicaGCPriorityRemoved) + + // NB: postDestroyRaftMuLocked requires that the batch which removed the data + // be durably synced to disk, which we have. + // See replicaAppBatch.ApplyToStateMachine(). + if err := r.postDestroyRaftMuLocked(ctx, r.GetMVCCStats()); err != nil { + log.Fatalf(ctx, "failed to run Replica postDestroy: %v", err) + } + + if err := r.store.removeReplicaImpl(ctx, r, chng.Desc.NextReplicaID, RemoveOptions{ + DestroyData: false, // We already destroyed the data when the batch committed. + }); err != nil { + log.Fatalf(ctx, "failed to remove replica: %v", err) } + return true } func (r *Replica) handleRaftLogDeltaResult(ctx context.Context, delta int64) { diff --git a/pkg/storage/replica_application_state_machine.go b/pkg/storage/replica_application_state_machine.go index 83f3cfca75fc..f275f6642a6d 100644 --- a/pkg/storage/replica_application_state_machine.go +++ b/pkg/storage/replica_application_state_machine.go @@ -13,7 +13,6 @@ package storage import ( "context" "fmt" - "math" "time" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -376,6 +375,9 @@ type replicaAppBatch struct { // triggered a migration to the replica applied state key. If so, this // migration will be performed when the application batch is committed. migrateToAppliedStateKey bool + // changeRemovesReplica tracks whether the command in the batch (there must + // be only one) removes this replica from the range. + changeRemovesReplica bool // Statistics. entries int @@ -515,6 +517,33 @@ func (b *replicaAppBatch) stageWriteBatch(ctx context.Context, cmd *replicatedCm return nil } +// changeRemovesStore returns true if any of the removals in this change have storeID. +func changeRemovesStore( + desc *roachpb.RangeDescriptor, change *storagepb.ChangeReplicas, storeID roachpb.StoreID, +) bool { + _, existsInDesc := desc.GetReplicaDescriptor(storeID) + // NB: if we're catching up from a preemptive snapshot then we won't + // exist in the current descriptor. + if !existsInDesc { + return false + } + // In 19.1 and before we used DeprecatedUpdatedReplicas instead of providing + // a new range descriptor. Check first if this is 19.2 or later command which + // includes the new descriptor. + if change.Desc != nil { + _, existsInChange := change.Desc.GetReplicaDescriptor(storeID) + return !existsInChange + } + // Otherwise use the Removed() method to determine whether this command + // removes us. + for _, replDesc := range change.Removed() { + if replDesc.StoreID == storeID { + return true + } + } + return false +} + // runPreApplyTriggers runs any triggers that must fire before a command is // applied. It may modify the command's ReplicatedEvalResult. func (b *replicaAppBatch) runPreApplyTriggers(ctx context.Context, cmd *replicatedCmd) error { @@ -554,27 +583,40 @@ func (b *replicaAppBatch) runPreApplyTriggers(ctx context.Context, cmd *replicat // cannot be constructed at evaluation time because it differs // on each replica (votes may have already been cast on the // uninitialized replica). Write this new hardstate to the batch too. - // See https://github.com/cockroachdb/cockroach/issues/20629 - splitPreApply(ctx, b.batch, res.Split.SplitTrigger) + // See https://github.com/cockroachdb/cockroach/issues/20629. + // + // Alternatively if we discover that the RHS has already been removed + // from this store, clean up its data. + splitPreApply(ctx, b.batch, res.Split.SplitTrigger, b.r) } if merge := res.Merge; merge != nil { // Merges require the subsumed range to be atomically deleted when the // merge transaction commits. + + // If our range currently has a non-zero replica ID then we know we're + // safe to commit this merge because of the invariants provided to us + // by the merge protocol. Namely if this committed we know that if the + // command committed then all of the replicas in the range descriptor + // are collocated when this command commits. If we do not have a non-zero + // replica ID then the logic in Stage should detect that and destroy our + // preemptive snapshot so we shouldn't ever get here. rhsRepl, err := b.r.store.GetReplica(merge.RightDesc.RangeID) if err != nil { return wrapWithNonDeterministicFailure(err, "unable to get replica for merge") } - // Use math.MaxInt32 as the nextReplicaID as an extra safeguard against creating - // new replicas of the RHS. This isn't required for correctness, since the merge - // protocol should guarantee that no new replicas of the RHS can ever be - // created, but it doesn't hurt to be careful. - const rangeIDLocalOnly = true + + // Use math.MaxInt32 (mergedTombstoneReplicaID) as the nextReplicaID as an + // extra safeguard against creating new replicas of the RHS. This isn't + // required for correctness, since the merge protocol should guarantee that + // no new replicas of the RHS can ever be created, but it doesn't hurt to + // be careful. + const clearRangeIDLocalOnly = true const mustClearRange = false if err := rhsRepl.preDestroyRaftMuLocked( - ctx, b.batch, b.batch, math.MaxInt32, rangeIDLocalOnly, mustClearRange, + ctx, b.batch, b.batch, mergedTombstoneReplicaID, clearRangeIDLocalOnly, mustClearRange, ); err != nil { - return wrapWithNonDeterministicFailure(err, "unable to destroy range before merge") + return wrapWithNonDeterministicFailure(err, "unable to destroy replica before merge") } } @@ -602,6 +644,44 @@ func (b *replicaAppBatch) runPreApplyTriggers(ctx context.Context, cmd *replicat } } + // Detect if this command will remove us from the range. + // If so we stage the removal of all of our range data into this batch. + // We'll complete the removal when it commits. Later logic detects the + // removal by inspecting the destroy status. + // + // NB: This is the last step in the preApply which durably writes to the + // replica state so that if it removes the replica it removes everything. + if change := res.ChangeReplicas; change != nil && + changeRemovesStore(b.state.Desc, change, b.r.store.StoreID()) { + // Delete all of the local data. We're going to delete this hard state too. + // In order for this to be safe we need code above this to promise that we're + // never going to write hard state in response to a message for a later + // replica (with a different replica ID) to this range state. + // Furthermore we mark the replica as destroyed so that new commands are not + // accepted. The replica will be destroyed in handleChangeReplicas. + // Note that we must be holding the raftMu here because we're in the + // midst of application. + + if !b.r.store.TestingKnobs().DisableEagerReplicaRemoval { + b.r.mu.Lock() + b.r.mu.destroyStatus.Set( + roachpb.NewRangeNotFoundError(b.r.RangeID, b.r.store.StoreID()), + destroyReasonRemovalPending) + b.r.mu.Unlock() + b.changeRemovesReplica = true + if err := b.r.preDestroyRaftMuLocked( + ctx, + b.batch, + b.batch, + change.Desc.NextReplicaID, + false, /* clearRangeIDLocalOnly */ + false, /* mustUseClearRange */ + ); err != nil { + return wrapWithNonDeterministicFailure(err, "unable to destroy replica before removal") + } + } + } + // Provide the command's corresponding logical operations to the Replica's // rangefeed. Only do so if the WriteBatch is non-nil, in which case the // rangefeed requires there to be a corresponding logical operation log or @@ -614,6 +694,7 @@ func (b *replicaAppBatch) runPreApplyTriggers(ctx context.Context, cmd *replicat } else if cmd.raftCmd.LogicalOpLog != nil { log.Fatalf(ctx, "non-nil logical op log with nil write batch: %v", cmd.raftCmd) } + return nil } @@ -671,17 +752,23 @@ func (b *replicaAppBatch) ApplyToStateMachine(ctx context.Context) error { r := b.r r.store.Clock().Update(b.maxTS) - // Add the replica applied state key to the write batch. - if err := b.addAppliedStateKeyToBatch(ctx); err != nil { - return err + // Add the replica applied state key to the write batch if this change + // doesn't remove us. + if !b.changeRemovesReplica { + if err := b.addAppliedStateKeyToBatch(ctx); err != nil { + return err + } } // Apply the write batch to RockDB. Entry application is done without // syncing to disk. The atomicity guarantees of the batch and the fact that // the applied state is stored in this batch, ensure that if the batch ends // up not being durably committed then the entries in this batch will be - // applied again upon startup. - const sync = false + // applied again upon startup. However, if we're removing the replica's data + // then we sync this batch as it is not safe to call postDestroyRaftMuLocked + // before ensuring that the replica's data has been synchronously removed. + // See handleChangeReplicasResult(). + sync := b.changeRemovesReplica if err := b.batch.Commit(sync); err != nil { return wrapWithNonDeterministicFailure(err, "unable to commit Raft entry batch") } @@ -862,7 +949,11 @@ func (sm *replicaStateMachine) ApplySideEffects( // before notifying a potentially waiting client. clearTrivialReplicatedEvalResultFields(cmd.replicatedResult()) if !cmd.IsTrivial() { - shouldAssert := sm.handleNonTrivialReplicatedEvalResult(ctx, *cmd.replicatedResult()) + shouldAssert, isRemoved := sm.handleNonTrivialReplicatedEvalResult(ctx, *cmd.replicatedResult()) + + if isRemoved { + return nil, apply.ErrRemoved + } // NB: Perform state assertion before acknowledging the client. // Some tests (TestRangeStatsInit) assumes that once the store has started // and the first range has a lease that there will not be a later hard-state. @@ -928,7 +1019,7 @@ func (sm *replicaStateMachine) ApplySideEffects( // to pass a replicatedResult that does not imply any side-effects. func (sm *replicaStateMachine) handleNonTrivialReplicatedEvalResult( ctx context.Context, rResult storagepb.ReplicatedEvalResult, -) (shouldAssert bool) { +) (shouldAssert, isRemoved bool) { // Assert that this replicatedResult implies at least one side-effect. if rResult.Equal(storagepb.ReplicatedEvalResult{}) { log.Fatalf(ctx, "zero-value ReplicatedEvalResult passed to handleNonTrivialReplicatedEvalResult") @@ -960,7 +1051,7 @@ func (sm *replicaStateMachine) handleNonTrivialReplicatedEvalResult( // we want to assert that these two states do not diverge. shouldAssert = !rResult.Equal(storagepb.ReplicatedEvalResult{}) if !shouldAssert { - return false + return false, false } if rResult.Split != nil { @@ -1000,7 +1091,7 @@ func (sm *replicaStateMachine) handleNonTrivialReplicatedEvalResult( } if rResult.ChangeReplicas != nil { - sm.r.handleChangeReplicasResult(ctx, rResult.ChangeReplicas) + isRemoved = sm.r.handleChangeReplicasResult(ctx, rResult.ChangeReplicas) rResult.ChangeReplicas = nil } @@ -1012,7 +1103,7 @@ func (sm *replicaStateMachine) handleNonTrivialReplicatedEvalResult( if !rResult.Equal(storagepb.ReplicatedEvalResult{}) { log.Fatalf(ctx, "unhandled field in ReplicatedEvalResult: %s", pretty.Diff(rResult, storagepb.ReplicatedEvalResult{})) } - return true + return true, isRemoved } func (sm *replicaStateMachine) maybeApplyConfChange(ctx context.Context, cmd *replicatedCmd) error { diff --git a/pkg/storage/replica_destroy.go b/pkg/storage/replica_destroy.go index 0fb60c307472..b706ab4f7876 100644 --- a/pkg/storage/replica_destroy.go +++ b/pkg/storage/replica_destroy.go @@ -12,6 +12,8 @@ package storage import ( "context" + "fmt" + "math" "github.com/cockroachdb/cockroach/pkg/keys" "github.com/cockroachdb/cockroach/pkg/roachpb" @@ -29,7 +31,10 @@ type DestroyReason int const ( // The replica is alive. destroyReasonAlive DestroyReason = iota - // The replica has been marked for GC, but hasn't been GCed yet. + // The replica is in the process of being removed but has not been removed + // yet. It exists to avoid races between two threads which may decide to + // destroy a replica (e.g. processing a ChangeRelicasTrigger removing the + // range and receiving a raft message with a higher replica ID). destroyReasonRemovalPending // The replica has been GCed. destroyReasonRemoved @@ -43,15 +48,15 @@ type destroyStatus struct { err error } +func (s destroyStatus) String() string { + return fmt.Sprintf("{%v %d}", s.err, s.reason) +} + func (s *destroyStatus) Set(err error, reason DestroyReason) { s.err = err s.reason = reason } -func (s *destroyStatus) Reset() { - s.Set(nil, destroyReasonAlive) -} - // IsAlive returns true when a replica is alive. func (s destroyStatus) IsAlive() bool { return s.reason == destroyReasonAlive @@ -62,16 +67,30 @@ func (s destroyStatus) Removed() bool { return s.reason == destroyReasonRemoved } +// RemovingOrRemoved returns whether the replica is removed or in the process of +// being removed. +func (s destroyStatus) RemovingOrRemoved() bool { + return s.reason == destroyReasonRemovalPending || s.reason == destroyReasonRemoved +} + +// mergedTombstoneReplicaID is the replica ID written into the tombstone +// for replicas which are part of a range which is known to have been merged. +// This value should prevent any messages from stale replicas of that range from +// ever resurrecting merged replicas. Whenever merging or subsuming a replica we +// know new replicas can never be created so this value is used even if we +// don't know the current replica ID. +const mergedTombstoneReplicaID roachpb.ReplicaID = math.MaxInt32 + func (r *Replica) preDestroyRaftMuLocked( ctx context.Context, reader engine.Reader, writer engine.Writer, nextReplicaID roachpb.ReplicaID, - rangeIDLocalOnly bool, - mustClearRange bool, + clearRangeIDLocalOnly bool, + mustUseClearRange bool, ) error { desc := r.Desc() - err := clearRangeData(desc, reader, writer, rangeIDLocalOnly, mustClearRange) + err := clearRangeData(desc, reader, writer, clearRangeIDLocalOnly, mustUseClearRange) if err != nil { return err } @@ -89,15 +108,17 @@ func (r *Replica) postDestroyRaftMuLocked(ctx context.Context, ms enginepb.MVCCS // // TODO(benesch): we would ideally atomically suggest the compaction with // the deletion of the data itself. - desc := r.Desc() - r.store.compactor.Suggest(ctx, storagepb.SuggestedCompaction{ - StartKey: roachpb.Key(desc.StartKey), - EndKey: roachpb.Key(desc.EndKey), - Compaction: storagepb.Compaction{ - Bytes: ms.Total(), - SuggestedAtNanos: timeutil.Now().UnixNano(), - }, - }) + if ms != (enginepb.MVCCStats{}) { + desc := r.Desc() + r.store.compactor.Suggest(ctx, storagepb.SuggestedCompaction{ + StartKey: roachpb.Key(desc.StartKey), + EndKey: roachpb.Key(desc.EndKey), + Compaction: storagepb.Compaction{ + Bytes: ms.Total(), + SuggestedAtNanos: timeutil.Now().UnixNano(), + }, + }) + } // NB: we need the nil check below because it's possible that we're GC'ing a // Replica without a replicaID, in which case it does not have a sideloaded @@ -115,21 +136,22 @@ func (r *Replica) postDestroyRaftMuLocked(ctx context.Context, ms enginepb.MVCCS } // destroyRaftMuLocked deletes data associated with a replica, leaving a -// tombstone. +// tombstone. The Replica may not be initialized in which case only the +// range ID local data is removed. func (r *Replica) destroyRaftMuLocked(ctx context.Context, nextReplicaID roachpb.ReplicaID) error { startTime := timeutil.Now() ms := r.GetMVCCStats() - batch := r.Engine().NewWriteOnlyBatch() defer batch.Close() + clearRangeIDLocalOnly := !r.IsInitialized() if err := r.preDestroyRaftMuLocked( ctx, r.Engine(), batch, nextReplicaID, - false, /* rangeIDLocalOnly */ - false, /* mustClearRange */ + clearRangeIDLocalOnly, + false, /* mustUseClearRange */ ); err != nil { return err } @@ -148,12 +170,18 @@ func (r *Replica) destroyRaftMuLocked(ctx context.Context, nextReplicaID roachpb if err := r.postDestroyRaftMuLocked(ctx, ms); err != nil { return err } - - log.Infof(ctx, "removed %d (%d+%d) keys in %0.0fms [clear=%0.0fms commit=%0.0fms]", - ms.KeyCount+ms.SysCount, ms.KeyCount, ms.SysCount, - commitTime.Sub(startTime).Seconds()*1000, - preTime.Sub(startTime).Seconds()*1000, - commitTime.Sub(preTime).Seconds()*1000) + if r.IsInitialized() { + log.Infof(ctx, "removed %d (%d+%d) keys in %0.0fms [clear=%0.0fms commit=%0.0fms]", + ms.KeyCount+ms.SysCount, ms.KeyCount, ms.SysCount, + commitTime.Sub(startTime).Seconds()*1000, + preTime.Sub(startTime).Seconds()*1000, + commitTime.Sub(preTime).Seconds()*1000) + } else { + log.Infof(ctx, "removed uninitialized range in %0.0fms [clear=%0.0fms commit=%0.0fms]", + commitTime.Sub(startTime).Seconds()*1000, + preTime.Sub(startTime).Seconds()*1000, + commitTime.Sub(preTime).Seconds()*1000) + } return nil } @@ -188,8 +216,8 @@ func (r *Replica) setTombstoneKey( if nextReplicaID < externalNextReplicaID { nextReplicaID = externalNextReplicaID } - if nextReplicaID > r.mu.minReplicaID { - r.mu.minReplicaID = nextReplicaID + if nextReplicaID > r.mu.tombstoneReplicaID { + r.mu.tombstoneReplicaID = nextReplicaID } r.mu.Unlock() diff --git a/pkg/storage/replica_gc_queue.go b/pkg/storage/replica_gc_queue.go index 649e87d60eaa..56388e4625e5 100644 --- a/pkg/storage/replica_gc_queue.go +++ b/pkg/storage/replica_gc_queue.go @@ -12,7 +12,6 @@ package storage import ( "context" - "math" "time" "github.com/cockroachdb/cockroach/pkg/config" @@ -114,13 +113,13 @@ func newReplicaGCQueue(store *Store, db *client.DB, gossip *gossip.Gossip) *repl // in the past. func (rgcq *replicaGCQueue) shouldQueue( ctx context.Context, now hlc.Timestamp, repl *Replica, _ *config.SystemConfig, -) (bool, float64) { +) (shouldQ bool, prio float64) { + lastCheck, err := repl.GetLastReplicaGCTimestamp(ctx) if err != nil { log.Errorf(ctx, "could not read last replica GC timestamp: %+v", err) return false, 0 } - if _, currentMember := repl.Desc().GetReplicaDescriptor(repl.store.StoreID()); !currentMember { return true, replicaGCPriorityRemoved } @@ -215,10 +214,16 @@ func (rgcq *replicaGCQueue) process( } replyDesc := rs[0] + repl.mu.RLock() + replicaID := repl.mu.replicaID + ticks := repl.mu.ticks + repl.mu.RUnlock() + // Now check whether the replica is meant to still exist. // Maybe it was deleted "under us" by being moved. currentDesc, currentMember := replyDesc.GetReplicaDescriptor(repl.store.StoreID()) - if desc.RangeID == replyDesc.RangeID && currentMember { + sameRange := desc.RangeID == replyDesc.RangeID + if sameRange && currentMember { // This replica is a current member of the raft group. Set the last replica // GC check time to avoid re-processing for another check interval. // @@ -230,15 +235,10 @@ func (rgcq *replicaGCQueue) process( if err := repl.setLastReplicaGCTimestamp(ctx, repl.store.Clock().Now()); err != nil { return err } - } else if desc.RangeID == replyDesc.RangeID { + } else if sameRange { // We are no longer a member of this range, but the range still exists. // Clean up our local data. - repl.mu.RLock() - replicaID := repl.mu.replicaID - ticks := repl.mu.ticks - repl.mu.RUnlock() - if replicaID == 0 { // This is a preemptive replica. GC'ing a preemptive replica is a // good idea if and only if the up-replication that it was a part of @@ -284,13 +284,18 @@ func (rgcq *replicaGCQueue) process( rgcq.metrics.RemoveReplicaCount.Inc(1) log.VEventf(ctx, 1, "destroying local data") + + nextReplicaID := replyDesc.NextReplicaID // Note that this seems racy - we didn't hold any locks between reading // the range descriptor above and deciding to remove the replica - but // we pass in the NextReplicaID to detect situations in which the // replica became "non-gc'able" in the meantime by checking (with raftMu // held throughout) whether the replicaID is still smaller than the - // NextReplicaID. - if err := repl.store.RemoveReplica(ctx, repl, replyDesc.NextReplicaID, RemoveOptions{ + // NextReplicaID. Given non-zero replica IDs don't change, this is only + // possible if we currently think we're processing a pre-emptive snapshot + // but discover in RemoveReplica that this range has since been added and + // knows that. + if err := repl.store.RemoveReplica(ctx, repl, nextReplicaID, RemoveOptions{ DestroyData: true, }); err != nil { return err @@ -328,10 +333,10 @@ func (rgcq *replicaGCQueue) process( } } - // A replica ID of MaxInt32 is written when we know a range to have been - // merged. See the Merge case of runPreApplyTriggers() for details. - const nextReplicaID = math.MaxInt32 - if err := repl.store.RemoveReplica(ctx, repl, nextReplicaID, RemoveOptions{ + // A mergedTombstoneReplicaID as nextReplicaID because we know the + // range to have been merged. See the Merge case of + // runPreApplyTriggers() for details. + if err := repl.store.RemoveReplica(ctx, repl, mergedTombstoneReplicaID, RemoveOptions{ DestroyData: true, }); err != nil { return err diff --git a/pkg/storage/replica_init.go b/pkg/storage/replica_init.go index b28a3a33591b..08c3c9f66100 100644 --- a/pkg/storage/replica_init.go +++ b/pkg/storage/replica_init.go @@ -150,26 +150,24 @@ func (r *Replica) initRaftMuLockedReplicaMuLocked( } r.rangeStr.store(replicaID, r.mu.state.Desc) r.connectionClass.set(rpc.ConnectionClassForKey(desc.StartKey)) - if err := r.setReplicaIDRaftMuLockedMuLocked(replicaID); err != nil { - return err + if r.mu.replicaID == 0 { + if err := r.setReplicaIDRaftMuLockedMuLocked(ctx, replicaID); err != nil { + return err + } + } else if r.mu.replicaID != replicaID { + log.Fatalf(ctx, "attempting to initialize a replica which has ID %d with ID %d", + r.mu.replicaID, replicaID) } - r.assertStateLocked(ctx, r.store.Engine()) return nil } -func (r *Replica) setReplicaID(replicaID roachpb.ReplicaID) error { - r.raftMu.Lock() - defer r.raftMu.Unlock() - r.mu.Lock() - defer r.mu.Unlock() - return r.setReplicaIDRaftMuLockedMuLocked(replicaID) -} - -func (r *Replica) setReplicaIDRaftMuLockedMuLocked(replicaID roachpb.ReplicaID) error { - if r.mu.replicaID == replicaID { - // The common case: the replica ID is unchanged. - return nil +func (r *Replica) setReplicaIDRaftMuLockedMuLocked( + ctx context.Context, replicaID roachpb.ReplicaID, +) error { + if r.mu.replicaID != 0 { + log.Fatalf(ctx, "cannot set replica ID from anything other than 0, currently %d", + r.mu.replicaID) } if replicaID == 0 { // If the incoming message does not have a new replica ID it is a @@ -177,23 +175,17 @@ func (r *Replica) setReplicaIDRaftMuLockedMuLocked(replicaID roachpb.ReplicaID) // accepted. return nil } - if replicaID < r.mu.minReplicaID { + if replicaID < r.mu.tombstoneReplicaID { return &roachpb.RaftGroupDeletedError{} } if r.mu.replicaID > replicaID { return errors.Errorf("replicaID cannot move backwards from %d to %d", r.mu.replicaID, replicaID) } - - if r.mu.destroyStatus.reason == destroyReasonRemovalPending { - // An earlier incarnation of this replica was removed, but apparently it has been re-added - // now, so reset the status. - r.mu.destroyStatus.Reset() + if r.mu.destroyStatus.RemovingOrRemoved() { + // This replica has been marked for removal and we're trying to resurrect it. + log.Fatalf(ctx, "cannot resurect replica %d", r.mu.replicaID) } - // if r.mu.replicaID != 0 { - // // TODO(bdarnell): clean up previous raftGroup (update peers) - // } - // Initialize or update the sideloaded storage. If the sideloaded storage // already exists (which is iff the previous replicaID was non-zero), then // we have to move the contained files over (this corresponds to the case in @@ -220,24 +212,12 @@ func (r *Replica) setReplicaIDRaftMuLockedMuLocked(replicaID roachpb.ReplicaID) return errors.Wrap(err, "while initializing sideloaded storage") } - previousReplicaID := r.mu.replicaID r.mu.replicaID = replicaID - if replicaID >= r.mu.minReplicaID { - r.mu.minReplicaID = replicaID + 1 - } - // Reset the raft group to force its recreation on next usage. - r.mu.internalRaftGroup = nil - - // If there was a previous replica, repropose its pending commands under - // this new incarnation. - if previousReplicaID != 0 { - if log.V(1) { - log.Infof(r.AnnotateCtx(context.TODO()), "changed replica ID from %d to %d", - previousReplicaID, replicaID) - } - // repropose all pending commands under new replicaID. - r.refreshProposalsLocked(0, reasonReplicaIDChanged) + // Sanity check that we do not already have a raft group as we did not + // know our replica ID before this call. + if r.mu.internalRaftGroup != nil { + log.Fatalf(ctx, "somehow had an initialized raft group on a zero valued replica") } return nil @@ -270,8 +250,12 @@ func (r *Replica) maybeInitializeRaftGroup(ctx context.Context) { // If this replica hasn't initialized the Raft group, create it and // unquiesce and wake the leader to ensure the replica comes up to date. initialized := r.mu.internalRaftGroup != nil + // If this replica has been removed or is in the process of being removed + // then it'll never handle any raft events so there's no reason to initialize + // it now. + removed := !r.mu.destroyStatus.IsAlive() r.mu.RUnlock() - if initialized { + if initialized || removed { return } @@ -281,13 +265,41 @@ func (r *Replica) maybeInitializeRaftGroup(ctx context.Context) { r.mu.Lock() defer r.mu.Unlock() + // If we raced on checking the destroyStatus above that's fine as + // the below withRaftGroupLocked will no-op. if err := r.withRaftGroupLocked(true, func(raftGroup *raft.RawNode) (bool, error) { return true, nil - }); err != nil { + }); err != nil && err != errRemoved { log.VErrEventf(ctx, 1, "unable to initialize raft group: %s", err) } } +// setDescAfterSplit sets the replica's descriptor and then frees resources +// which may need to discover the split like the txnWaitQueue and rangefeeds. +func (r *Replica) setDescAfterSplit(ctx context.Context, desc *roachpb.RangeDescriptor) { + r.setDesc(ctx, desc) + + // Clear the LHS txn wait queue, to redirect to the RHS if + // appropriate. We do this after setDescWithoutProcessUpdate + // to ensure that no pre-split commands are inserted into the + // txnWaitQueue after we clear it. + r.txnWaitQueue.Clear(false /* disable */) + + // The rangefeed processor will no longer be provided logical ops for + // its entire range, so it needs to be shut down and all registrations + // need to retry. + // TODO(nvanbenschoten): It should be possible to only reject registrations + // that overlap with the new range of the split and keep registrations that + // are only interested in keys that are still on the original range running. + r.disconnectRangefeedWithReason( + roachpb.RangeFeedRetryError_REASON_RANGE_SPLIT, + ) + + // Clear the original range's request stats, since they include requests for + // spans that are now owned by the new range. + r.leaseholderStats.resetRequestCounts() +} + // setDesc atomically sets the replica's descriptor. It requires raftMu to be // locked. func (r *Replica) setDesc(ctx context.Context, desc *roachpb.RangeDescriptor) { diff --git a/pkg/storage/replica_learner_test.go b/pkg/storage/replica_learner_test.go index 6f2a5ff6825a..9e9072af9e6f 100644 --- a/pkg/storage/replica_learner_test.go +++ b/pkg/storage/replica_learner_test.go @@ -429,13 +429,13 @@ func TestReplicaGCQueueSeesLearnerOrJointConfig(t *testing.T) { require.Contains(t, tracing.FormatRecordedSpans(trace), msg) return tc.LookupRangeOrFatal(t, scratchStartKey) } - desc := checkNoGC() // Make sure it didn't collect the learner. require.NotEmpty(t, desc.Replicas().Learners()) // Now get the range into a joint config. tc.RemoveReplicasOrFatal(t, scratchStartKey, tc.Target(1)) // remove learner + ltk.withStopAfterJointConfig(func() { desc = tc.AddReplicasOrFatal(t, scratchStartKey, tc.Target(1)) require.Len(t, desc.Replicas().Filter(predIncoming), 1, desc) @@ -547,10 +547,14 @@ func TestLearnerAdminChangeReplicasRace(t *testing.T) { // Unblock the snapshot, and surprise AddReplicas. It should retry and error // that the descriptor has changed since the AdminChangeReplicas command - // started. + // started. Alternatively it may fail in sending the snapshot because of a + // "raft group deleted" error if the newly added learner attempts to send + // a raft message to another node after it has been removed and then destroys + // itself in response to a ReplicaTooOldError. close(blockSnapshotsCh) - if err := g.Wait(); !testutils.IsError(err, `descriptor changed`) { - t.Fatalf(`expected "descriptor changed" error got: %+v`, err) + const msgRE = `descriptor changed|raft group deleted` + if err := g.Wait(); !testutils.IsError(err, msgRE) { + t.Fatalf(`expected %q error got: %+v`, msgRE, err) } desc = tc.LookupRangeOrFatal(t, scratchStartKey) require.Len(t, desc.Replicas().Voters(), 1) diff --git a/pkg/storage/replica_raft.go b/pkg/storage/replica_raft.go index 52b577da569c..004daa6c1e78 100644 --- a/pkg/storage/replica_raft.go +++ b/pkg/storage/replica_raft.go @@ -379,6 +379,8 @@ func (r *Replica) hasPendingProposalsRLocked() bool { return r.numPendingProposalsRLocked() > 0 } +var errRemoved = errors.New("replica removed") + // stepRaftGroup calls Step on the replica's RawNode with the provided request's // message. Before doing so, it assures that the replica is unquiesced and ready // to handle the request. @@ -445,13 +447,11 @@ func (r *Replica) handleRaftReadyRaftMuLocked( var hasReady bool var rd raft.Ready r.mu.Lock() - lastIndex := r.mu.lastIndex // used for append below lastTerm := r.mu.lastTerm raftLogSize := r.mu.raftLogSize leaderID := r.mu.leaderID lastLeaderID := leaderID - err := r.withRaftGroupLocked(true, func(raftGroup *raft.RawNode) (bool, error) { if err := r.mu.proposalBuf.FlushLockedWithRaftGroup(raftGroup); err != nil { return false, err @@ -462,11 +462,13 @@ func (r *Replica) handleRaftReadyRaftMuLocked( return hasReady /* unquiesceAndWakeLeader */, nil }) r.mu.Unlock() - if err != nil { + if err == errRemoved { + // If we've been removed then just return. + return stats, "", nil + } else if err != nil { const expl = "while checking raft group for Ready" return stats, expl, errors.Wrap(err, expl) } - if !hasReady { // We must update the proposal quota even if we don't have a ready. // Consider the case when our quota is of size 1 and two out of three @@ -723,7 +725,6 @@ func (r *Replica) handleRaftReadyRaftMuLocked( // Might have gone negative if node was recently restarted. raftLogSize = 0 } - } // Update protected state - last index, last term, raft log size, and raft @@ -756,10 +757,18 @@ func (r *Replica) handleRaftReadyRaftMuLocked( applicationStart := timeutil.Now() if len(rd.CommittedEntries) > 0 { - if err := appTask.ApplyCommittedEntries(ctx); err != nil { + err := appTask.ApplyCommittedEntries(ctx) + stats.applyCommittedEntriesStats = sm.moveStats() + switch err { + case nil: + case apply.ErrRemoved: + // We know that our replica has been removed. We also know that we've + // stepped the state machine up to the point where it's been removed. + // TODO(ajwerner): decide if there's more to be done here. + return stats, "", err + default: return stats, err.(*nonDeterministicFailure).safeExpl, err } - stats.applyCommittedEntriesStats = sm.moveStats() // etcd raft occasionally adds a nil entry (our own commands are never // empty). This happens in two situations: When a new leader is elected, and @@ -789,11 +798,12 @@ func (r *Replica) handleRaftReadyRaftMuLocked( r.mu.Unlock() } - // TODO(bdarnell): need to check replica id and not Advance if it - // has changed. Or do we need more locking to guarantee that replica - // ID cannot change during handleRaftReady? + // NB: if we just processed a command which removed this replica from the + // raft group we will early return before this point. This, combined with + // the fact that we'll refuse to process messages intended for a higher + // replica ID ensures that our replica ID could not have changed. const expl = "during advance" - if err := r.withRaftGroup(true, func(raftGroup *raft.RawNode) (bool, error) { + err = r.withRaftGroup(true, func(raftGroup *raft.RawNode) (bool, error) { raftGroup.Advance(rd) // If the Raft group still has more to process then we immediately @@ -804,7 +814,8 @@ func (r *Replica) handleRaftReadyRaftMuLocked( r.store.enqueueRaftUpdateCheck(r.RangeID) } return true, nil - }); err != nil { + }) + if err != nil { return stats, expl, errors.Wrap(err, expl) } @@ -1157,7 +1168,7 @@ func (r *Replica) sendRaftMessage(ctx context.Context, msg raftpb.Message) { r.mu.droppedMessages++ raftGroup.ReportUnreachable(msg.To) return true, nil - }); err != nil { + }); err != nil && err != errRemoved { log.Fatal(ctx, err) } } @@ -1200,7 +1211,7 @@ func (r *Replica) reportSnapshotStatus(ctx context.Context, to roachpb.ReplicaID if err := r.withRaftGroup(true, func(raftGroup *raft.RawNode) (bool, error) { raftGroup.ReportSnapshot(uint64(to), snapStatus) return true, nil - }); err != nil { + }); err != nil && err != errRemoved { log.Fatal(ctx, err) } } @@ -1322,13 +1333,16 @@ func (s pendingCmdSlice) Less(i, j int) bool { // varies. // // Requires that Replica.mu is held. +// +// If this Replica is in the process of being removed this method will return +// true and a nil error. func (r *Replica) withRaftGroupLocked( mayCampaignOnWake bool, f func(r *raft.RawNode) (unquiesceAndWakeLeader bool, _ error), ) error { - if r.mu.destroyStatus.Removed() { + if r.mu.destroyStatus.RemovingOrRemoved() { // Silently ignore all operations on destroyed replicas. We can't return an // error here as all errors returned from this method are considered fatal. - return nil + return errRemoved } if r.mu.replicaID == 0 { @@ -1378,6 +1392,9 @@ func (r *Replica) withRaftGroupLocked( // should not initiate an election while handling incoming raft // messages (which may include MsgVotes from an election in progress, // and this election would be disrupted if we started our own). +// +// If this Replica is in the process of being removed this method will return +// errRemoved. func (r *Replica) withRaftGroup( mayCampaignOnWake bool, f func(r *raft.RawNode) (unquiesceAndWakeLeader bool, _ error), ) error { @@ -1536,6 +1553,9 @@ func (r *Replica) maybeAcquireSnapshotMergeLock( // not be applied yet) and acquires the split or merge lock if // necessary (in addition to other preparation). It returns a function // which will release any lock acquired (or nil). +// +// After this method returns successfully the RHS of the split or merge +// is guaranteed to exist in the Store using GetReplica(). func (r *Replica) maybeAcquireSplitMergeLock( ctx context.Context, raftCmd storagepb.RaftCommand, ) (func(), error) { diff --git a/pkg/storage/replica_raftstorage.go b/pkg/storage/replica_raftstorage.go index 7132eae707e0..86f6937a56e7 100644 --- a/pkg/storage/replica_raftstorage.go +++ b/pkg/storage/replica_raftstorage.go @@ -698,7 +698,6 @@ func clearRangeData( } else { keyRanges = rditer.MakeAllKeyRanges(desc) } - var clearRangeFn func(engine.Reader, engine.Writer, engine.MVCCKey, engine.MVCCKey) error if mustClearRange { clearRangeFn = func(eng engine.Reader, writer engine.Writer, start, end engine.MVCCKey) error { @@ -894,8 +893,7 @@ func (r *Replica) applySnapshot( // problematic, as it would prevent this store from ever having a new replica // of the removed range. In this case, however, it's copacetic, as subsumed // ranges _can't_ have new replicas. - const subsumedNextReplicaID = math.MaxInt32 - if err := r.clearSubsumedReplicaDiskData(ctx, inSnap.SSSS, s.Desc, subsumedRepls, subsumedNextReplicaID); err != nil { + if err := r.clearSubsumedReplicaDiskData(ctx, inSnap.SSSS, s.Desc, subsumedRepls, mergedTombstoneReplicaID); err != nil { return err } stats.subsumedReplicas = timeutil.Now() @@ -915,7 +913,7 @@ func (r *Replica) applySnapshot( // has not yet been updated. Any errors past this point must therefore be // treated as fatal. - if err := r.clearSubsumedReplicaInMemoryData(ctx, subsumedRepls, subsumedNextReplicaID); err != nil { + if err := r.clearSubsumedReplicaInMemoryData(ctx, subsumedRepls, mergedTombstoneReplicaID); err != nil { log.Fatalf(ctx, "failed to clear in-memory data of subsumed replicas while applying snapshot: %+v", err) } @@ -1013,7 +1011,7 @@ func (r *Replica) clearSubsumedReplicaDiskData( r.store.Engine(), &subsumedReplSST, subsumedNextReplicaID, - true, /* rangeIDLocalOnly */ + true, /* clearRangeIDLocalOnly */ true, /* mustClearRange */ ); err != nil { subsumedReplSST.Close() diff --git a/pkg/storage/replica_stats.go b/pkg/storage/replica_stats.go index c1157f0a0f43..61fdc801219d 100644 --- a/pkg/storage/replica_stats.go +++ b/pkg/storage/replica_stats.go @@ -102,6 +102,22 @@ func (rs *replicaStats) splitRequestCounts(other *replicaStats) { } } +// halve is called when a split occurs but the RHS has been removed already +// and cannot be initialized with splitRequestCounts. +func (rs *replicaStats) halve() { + rs.mu.Lock() + defer rs.mu.Unlock() + for i := range rs.mu.requests { + if rs.mu.requests[i] == nil { + continue + } + for k := range rs.mu.requests[i] { + newVal := rs.mu.requests[i][k] / 2.0 + rs.mu.requests[i][k] = newVal + } + } +} + func (rs *replicaStats) record(nodeID roachpb.NodeID) { rs.recordCount(1, nodeID) } diff --git a/pkg/storage/replica_test.go b/pkg/storage/replica_test.go index 05490d143468..bcbf3a983dea 100644 --- a/pkg/storage/replica_test.go +++ b/pkg/storage/replica_test.go @@ -7278,117 +7278,6 @@ func TestSyncSnapshot(t *testing.T) { } } -// TestReplicaIDChangePending verifies that on a replica ID change, pending -// commands are re-proposed on the new raft group. -func TestReplicaIDChangePending(t *testing.T) { - defer leaktest.AfterTest(t)() - - tc := testContext{} - cfg := TestStoreConfig(nil) - // Disable ticks to avoid automatic reproposals after a timeout, which - // would pass this test. - cfg.RaftTickInterval = math.MaxInt32 - stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) - tc.StartWithStoreConfig(t, stopper, cfg) - repl := tc.repl - - // Stop the command from being proposed to the raft group and being removed. - proposedOnOld := make(chan struct{}, 1) - repl.mu.Lock() - repl.mu.proposalBuf.testing.submitProposalFilter = func(*ProposalData) (drop bool, _ error) { - select { - case proposedOnOld <- struct{}{}: - default: - } - return true, nil - } - lease := *repl.mu.state.Lease - repl.mu.Unlock() - - // Add a command to the pending list and wait for it to be proposed. - magicTS := tc.Clock().Now() - ba := roachpb.BatchRequest{} - ba.Timestamp = magicTS - ba.Add(&roachpb.PutRequest{ - RequestHeader: roachpb.RequestHeader{ - Key: roachpb.Key("a"), - }, - Value: roachpb.MakeValueFromBytes([]byte("val")), - }) - _, _, _, err := repl.evalAndPropose(context.Background(), lease, &ba, &allSpans, endCmds{}) - if err != nil { - t.Fatal(err) - } - <-proposedOnOld - - // Set the raft command handler so we can tell if the command has been - // re-proposed. - proposedOnNew := make(chan struct{}, 1) - repl.mu.Lock() - repl.mu.proposalBuf.testing.submitProposalFilter = func(p *ProposalData) (drop bool, _ error) { - if p.Request.Timestamp == magicTS { - select { - case proposedOnNew <- struct{}{}: - default: - } - } - return false, nil - } - repl.mu.Unlock() - - // Set the ReplicaID on the replica. - if err := repl.setReplicaID(2); err != nil { - t.Fatal(err) - } - - <-proposedOnNew -} - -func TestSetReplicaID(t *testing.T) { - defer leaktest.AfterTest(t)() - - tsc := TestStoreConfig(nil) - tc := testContext{} - stopper := stop.NewStopper() - defer stopper.Stop(context.TODO()) - tc.StartWithStoreConfig(t, stopper, tsc) - - repl := tc.repl - - testCases := []struct { - replicaID roachpb.ReplicaID - minReplicaID roachpb.ReplicaID - newReplicaID roachpb.ReplicaID - expectedMinReplicaID roachpb.ReplicaID - expectedErr string - }{ - {0, 0, 1, 2, ""}, - {0, 1, 1, 2, ""}, - {0, 2, 1, 2, "raft group deleted"}, - {1, 2, 1, 2, ""}, // not an error; replicaID == newReplicaID is checked first - {2, 0, 1, 0, "replicaID cannot move backwards"}, - } - for _, c := range testCases { - t.Run("", func(t *testing.T) { - repl.mu.Lock() - repl.mu.replicaID = c.replicaID - repl.mu.minReplicaID = c.minReplicaID - repl.mu.Unlock() - - err := repl.setReplicaID(c.newReplicaID) - repl.mu.Lock() - if repl.mu.minReplicaID != c.expectedMinReplicaID { - t.Errorf("expected minReplicaID=%d, but found %d", c.expectedMinReplicaID, repl.mu.minReplicaID) - } - repl.mu.Unlock() - if !testutils.IsError(err, c.expectedErr) { - t.Fatalf("expected %q, but found %v", c.expectedErr, err) - } - }) - } -} - func TestReplicaRetryRaftProposal(t *testing.T) { defer leaktest.AfterTest(t)() diff --git a/pkg/storage/store.go b/pkg/storage/store.go index 99fa19230a5a..666ea5b7a939 100644 --- a/pkg/storage/store.go +++ b/pkg/storage/store.go @@ -35,6 +35,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/settings" "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/sqlutil" + "github.com/cockroachdb/cockroach/pkg/storage/apply" "github.com/cockroachdb/cockroach/pkg/storage/batcheval" "github.com/cockroachdb/cockroach/pkg/storage/closedts/container" "github.com/cockroachdb/cockroach/pkg/storage/closedts/ctpb" @@ -1142,7 +1143,7 @@ func (s *Store) IsStarted() bool { return atomic.LoadInt32(&s.started) == 1 } -// IterateIDPrefixKeys helps visit system keys that use RangeID prefixing ( such as +// IterateIDPrefixKeys helps visit system keys that use RangeID prefixing (such as // RaftHardStateKey, RaftTombstoneKey, and many others). Such keys could in principle exist at any // RangeID, and this helper efficiently discovers all the keys of the desired type (as specified by // the supplied `keyFn`) and, for each key-value pair discovered, unmarshals it into `msg` and then @@ -2088,10 +2089,76 @@ func (s *Store) AllocateRangeID(ctx context.Context) (roachpb.RangeID, error) { // splitPreApply is called when the raft command is applied. Any // changes to the given ReadWriter will be written atomically with the // split commit. -func splitPreApply(ctx context.Context, eng engine.ReadWriter, split roachpb.SplitTrigger) { +func splitPreApply( + ctx context.Context, eng engine.ReadWriter, split roachpb.SplitTrigger, r *Replica, +) { + // Check on the RHS, we need to ensure that it exists and has a minReplicaID + // less than or equal to the replica we're about to initialize. + // + // The right hand side of the split was already created (and its raftMu + // acquired) in Replica.acquireSplitLock. It must be present here. + rightRepl, err := r.store.GetReplica(split.RightDesc.RangeID) + if err != nil { + log.Fatalf(ctx, "unable to find RHS replica: %+v", err) + } + + // If the RHS is not in the split, sanity check that the LHS is currently + // catching up from a preemptive snapshot. + _, hasRightDesc := split.RightDesc.GetReplicaDescriptor(r.StoreID()) + if !hasRightDesc { + _, lhsExists := r.Desc().GetReplicaDescriptor(r.StoreID()) + if lhsExists { + log.Fatalf(ctx, "cannot process split on s%s which exists in LHS and not in RHS: %+v", + r.StoreID(), split) + } + } + + // Check to make sure we don't know that the RHS has already been removed + // from this store at the replica ID implied by this split. + // This is the common case. + + // Check to see if we know that the RHS has already been removed from this + // store at the replica ID implied by the split. + if rightRepl.isNewerThanSplit(&split) { + // We're in the rare case where we know that the RHS has been removed + // and re-added with a higher replica ID. We know we've never processed a + // snapshot for the right range because up to this point it would overlap + // with the left and ranges cannot move rightwards. + // + // It is important to preserve the HardState because we might however have + // already voted at a higher term. In general this shouldn't happen because + // we add learners and then promote them only after we snapshot but we're + // going to be extra careful in case future versions of cockroach somehow + // promote replicas without ensuring that a snapshot has been received. + // + // Clear the user data the RHS would have inherited from the LHS due to the + // split and additionally clear all of the range ID local state that the + // split trigger writes into the RHS. + // + // Rather than specifically deleting around the data we want to preserve + // we read the HardState to preserve it, clear everything and write back + // the HardState and tombstone. + hs, err := rightRepl.raftMu.stateLoader.LoadHardState(ctx, eng) + if err != nil { + log.Fatalf(ctx, "failed to load hard state for removed rhs: %v", err) + } + const rangeIDLocalOnly = false + const mustUseClearRange = false + if err := clearRangeData(&split.RightDesc, eng, eng, rangeIDLocalOnly, mustUseClearRange); err != nil { + log.Fatalf(ctx, "failed to clear range data for removed rhs: %v", err) + } + if err := rightRepl.raftMu.stateLoader.SetHardState(ctx, eng, hs); err != nil { + log.Fatalf(ctx, "failed to set hard state with 0 commit index for removed rhs: %v", err) + } + if err := r.setTombstoneKey(ctx, eng, r.minReplicaID()); err != nil { + log.Fatalf(ctx, "failed to set tombstone for removed rhs: %v", err) + } + return + } + // Update the raft HardState with the new Commit value now that the // replica is initialized (combining it with existing or default - // Term and Vote). + // Term and Vote). This is the common case. rsl := stateloader.Make(split.RightDesc.RangeID) if err := rsl.SynthesizeRaftState(ctx, eng); err != nil { log.Fatal(ctx, err) @@ -2115,89 +2182,18 @@ func splitPostApply( { // Already holding raftMu, see above. rightRng.mu.Lock() - // The right hand side of the split may have been removed and re-added - // in the meantime, and the replicaID in RightDesc may be stale. - // Consequently the call below may fail with a RaftGroupDeletedError. In - // general, this protects earlier incarnations of the replica that were - // since replicaGC'ed from reneging on promises made earlier (for - // example, once the HardState is removed, a replica could cast a - // different vote for the same term). - // - // It is safe to circumvent that in this case because the RHS must have - // remained uninitialized (the LHS blocks all user data, and since our - // Raft logs start at a nonzero index a snapshot must go through before - // any log entries are appended). This means the data in that range is - // just a HardState which we "logically" snapshot by assigning it data - // formerly located within the LHS. - // - // Note that if we ever have a way to replicaGC uninitialized replicas, - // the RHS may have been gc'ed and so the HardState would be gone. In - // that case, the requirement that the HardState remains would have been - // violated if the bypass below were used, which is why we place an - // assertion. - // - // See: - // https://github.com/cockroachdb/cockroach/issues/21146#issuecomment-365757329 - // - // TODO(tbg): this argument is flawed - it's possible for a tombstone - // to exist on the RHS: - // https://github.com/cockroachdb/cockroach/issues/40470 - // Morally speaking, this means that we should throw away the data we - // moved from the LHS to the RHS (depending on the tombstone). - // Realistically speaking it will probably be easier to create the RHS - // anyway, even though there's a tombstone and it may just get gc'ed - // again. Note that for extra flavor, we may not even know whether the - // RHS is currently supposed to exist or not, lending more weight to the - // second approach. - tombstoneKey := keys.RaftTombstoneKey(rightRng.RangeID) - var tombstone roachpb.RaftTombstone - if ok, err := engine.MVCCGetProto( - ctx, r.store.Engine(), tombstoneKey, hlc.Timestamp{}, &tombstone, engine.MVCCGetOptions{}, - ); err != nil { - log.Fatalf(ctx, "unable to load tombstone for RHS: %+v", err) - } else if ok { - log.Fatalf(ctx, "split trigger found right-hand side with tombstone %+v: %v", tombstone, rightRng) - } - rightDesc, ok := split.RightDesc.GetReplicaDescriptor(r.StoreID()) - if !ok { - // This is yet another special quirky case. The local store is not - // necessarily a member of the split; this can occur if this store - // wasn't a member at the time of the split, but is nevertheless - // catching up across the split. For example, add a learner, and - // while it is being caught up via a snapshot, remove the learner - // again, then execute a split, and re-add it. Upon being re-added - // the learner will likely catch up from where the snapshot left it, - // and it will see itself get removed, then we hit this branch when - // the split trigger is applied, and eventually there's a - // ChangeReplicas that re-adds the local store under a new - // replicaID. - // - // So our trigger will have a smaller replicaID for our RHS, which - // will blow up in initRaftMuLockedReplicaMuLocked. We still want - // to force the RHS to accept the descriptor, even though that - // rewinds the replicaID. To do that we want to change the existing - // replicaID, but we didn't find one -- zero is then the only reasonable - // choice. Note that this is also the replicaID a replica that is - // not reflected in its own descriptor will have, i.e. we're cooking - // up less of a special case here than you'd expect at first glance. - // - // Note that futzing with the replicaID is a high-risk operation as - // it is what the raft peer will believe itself to be identified by. - // Under no circumstances must we use a replicaID that belongs to - // someone else, or a byzantine situation will result. Zero is - // special-cased and will never init a raft group until the real - // ID is known from inbound raft traffic. - rightDesc.ReplicaID = 0 // for clarity only; it's already zero - } - if rightRng.mu.replicaID > rightDesc.ReplicaID { - rightRng.mu.replicaID = rightDesc.ReplicaID + + // If we know that the RHS has already been removed at this replica ID + // then we also know that its data has already been removed by the preApply + // so we continue to update the descriptor for the left hand side and + // return. + if rightRng.isNewerThanSplitRLocked(split) { + rightRng.mu.Unlock() + r.setDescAfterSplit(ctx, &split.LeftDesc) + r.writeStats.halve() + return } - // NB: the safety argument above implies that we don't have to worry - // about restoring the existing minReplicaID if it's nonzero. No - // promises have been made, so none need to be kept. So we clear this - // unconditionally, making sure that it doesn't block us from init'ing - // the RHS. - rightRng.mu.minReplicaID = 0 + err := rightRng.initRaftMuLockedReplicaMuLocked(&split.RightDesc, r.store.Clock(), 0) rightRng.mu.Unlock() if err != nil { @@ -2237,9 +2233,10 @@ func splitPostApply( // until it receives a Raft message addressed to the right-hand range. But // since new replicas start out quiesced, unless we explicitly awaken the // Raft group, there might not be any Raft traffic for quite a while. - if err := rightRng.withRaftGroup(true, func(r *raft.RawNode) (unquiesceAndWakeLeader bool, _ error) { + err = rightRng.withRaftGroup(true, func(r *raft.RawNode) (unquiesceAndWakeLeader bool, _ error) { return true, nil - }); err != nil { + }) + if err != nil { log.Fatalf(ctx, "unable to create raft group for right-hand range in split: %+v", err) } @@ -2315,27 +2312,7 @@ func (s *Store) SplitRange( s.replicaQueues.Delete(int64(rightDesc.RangeID)) } - leftRepl.setDesc(ctx, &newLeftDesc) - - // Clear the LHS txn wait queue, to redirect to the RHS if - // appropriate. We do this after setDescWithoutProcessUpdate - // to ensure that no pre-split commands are inserted into the - // txnWaitQueue after we clear it. - leftRepl.txnWaitQueue.Clear(false /* disable */) - - // The rangefeed processor will no longer be provided logical ops for - // its entire range, so it needs to be shut down and all registrations - // need to retry. - // TODO(nvanbenschoten): It should be possible to only reject registrations - // that overlap with the new range of the split and keep registrations that - // are only interested in keys that are still on the original range running. - leftRepl.disconnectRangefeedWithReason( - roachpb.RangeFeedRetryError_REASON_RANGE_SPLIT, - ) - - // Clear the original range's request stats, since they include requests for - // spans that are now owned by the new range. - leftRepl.leaseholderStats.resetRequestCounts() + leftRepl.setDescAfterSplit(ctx, &newLeftDesc) leftRepl.writeStats.splitRequestCounts(rightRepl.writeStats) if err := s.addReplicaInternalLocked(rightRepl); err != nil { @@ -2348,7 +2325,6 @@ func (s *Store) SplitRange( if err := rightRepl.updateRangeInfo(rightRepl.Desc()); err != nil { return err } - // Add the range to metrics and maybe gossip on capacity change. s.metrics.ReplicaCount.Inc(1) s.maybeGossipOnCapacityChange(ctx, rangeAddEvent) @@ -2580,7 +2556,12 @@ func (s *Store) removeReplicaImpl( // We check both rep.mu.ReplicaID and rep.mu.state.Desc's replica ID because // they can differ in cases when a replica's ID is increased due to an // incoming raft message (see #14231 for background). + // TODO(ajwerner): reconsider some of this sanity checking. rep.mu.Lock() + if rep.mu.destroyStatus.Removed() { + rep.mu.Unlock() + return nil + } replicaID := rep.mu.replicaID if rep.mu.replicaID >= nextReplicaID { rep.mu.Unlock() @@ -2600,12 +2581,7 @@ func (s *Store) removeReplicaImpl( } if !rep.IsInitialized() { - // The split trigger relies on the fact that it can bypass the tombstone - // check for the RHS, but this is only true as long as we never delete - // its HardState. - // - // See the comment in splitPostApply for details. - log.Fatalf(ctx, "can not replicaGC uninitialized replicas") + log.Fatalf(ctx, "can not replicaGC uninitialized replicas in this method") } // During merges, the context might have the subsuming range, so we explicitly @@ -2666,7 +2642,73 @@ func (s *Store) removeReplicaImpl( // TODO(peter): Could release s.mu.Lock() here. s.maybeGossipOnCapacityChange(ctx, rangeRemoveEvent) s.scanner.RemoveReplica(rep) + return nil +} + +func (s *Store) removeUninitializedReplicaRaftMuLocked( + ctx context.Context, rep *Replica, nextReplicaID roachpb.ReplicaID, +) error { + rep.raftMu.AssertHeld() + + // Sanity check this removal. + rep.mu.RLock() + ds := rep.mu.destroyStatus + isInitialized := rep.isInitializedRLocked() + rep.mu.RUnlock() + // Somebody already removed this Replica. + if ds.Removed() { + return nil + } + if ds.reason != destroyReasonRemovalPending { + log.Fatalf(ctx, "cannot remove uninitialized replica which is not removal pending: %v", ds) + } + + // When we're in this state we should have already had our destroy status set + // so it shouldn't have been possible to process any raft messages or apply + // any snapshots. + if isInitialized { + log.Fatalf(ctx, "previously uninitialized replica became initialized before removal") + } + + // Proceed with the removal. + rep.readOnlyCmdMu.Lock() + rep.mu.Lock() + rep.cancelPendingCommandsLocked() + rep.mu.internalRaftGroup = nil + rep.mu.destroyStatus.Set(roachpb.NewRangeNotFoundError(rep.RangeID, rep.store.StoreID()), destroyReasonRemoved) + rep.mu.Unlock() + rep.readOnlyCmdMu.Unlock() + + if err := rep.destroyRaftMuLocked(ctx, nextReplicaID); err != nil { + log.Fatalf(ctx, "failed to remove uninitialized replica %v: %v", rep, err) + } + + s.mu.Lock() + defer s.mu.Unlock() + // Sanity check, could be removed. + value, stillExists := s.mu.replicas.Load(int64(rep.RangeID)) + if !stillExists { + log.Fatalf(ctx, "uninitialized replica was removed in the meantime") + } + existing := (*Replica)(value) + if existing == rep { + log.Infof(ctx, "removing uninitialized replica %v", rep) + } else { + log.Fatalf(ctx, "uninitialized replica %v was already removed", rep) + } + + s.metrics.ReplicaCount.Dec(1) + + // Only an uninitialized replica can have a placeholder since, by + // definition, an initialized replica will be present in the + // replicasByKey map. While the replica will usually consume the + // placeholder itself, that isn't guaranteed and so this invocation + // here is crucial (i.e. don't remove it). + if s.removePlaceholderLocked(ctx, rep.RangeID) { + atomic.AddInt32(&s.counts.droppedPlaceholders, 1) + } + s.unlinkReplicaByRangeIDLocked(rep.RangeID) return nil } @@ -3504,7 +3546,6 @@ func (s *Store) processRaftSnapshotRequest( if err := r.stepRaftGroup(&snapHeader.RaftMessageRequest); err != nil { return roachpb.NewError(err) } - if _, expl, err := r.handleRaftReadyRaftMuLocked(ctx, inSnap); err != nil { fatalOnRaftReadyErr(ctx, expl, err) } @@ -3543,33 +3584,43 @@ func (s *Store) HandleRaftResponse(ctx context.Context, resp *RaftMessageRespons repl.raftMu.Lock() defer repl.raftMu.Unlock() repl.mu.Lock() - defer repl.mu.Unlock() // If the replica ID in the error does not match then we know // that the replica has been removed and re-added quickly. In // that case, we don't want to add it to the replicaGCQueue. - if tErr.ReplicaID != repl.mu.replicaID { - log.Infof(ctx, "replica too old response with old replica ID: %s", tErr.ReplicaID) + // If the replica is not alive then we also should ignore this error. + if tErr.ReplicaID != repl.mu.replicaID || + !repl.mu.destroyStatus.IsAlive() || + // Ignore if we want to test the replicaGC queue. + s.TestingKnobs().DisableEagerReplicaRemoval { + repl.mu.Unlock() return nil } - // If the replica ID in the error does match, we know the replica - // will be removed and we can cancel any pending commands. This is - // sometimes necessary to unblock PushTxn operations that are - // necessary for the replica GC to succeed. - repl.cancelPendingCommandsLocked() - // The replica will be garbage collected soon (we are sure // since our replicaID is definitely too old), but in the meantime we // already want to bounce all traffic from it. Note that the replica - // could be re-added with a higher replicaID, in which this error is - // cleared in setReplicaIDRaftMuLockedMuLocked. - if repl.mu.destroyStatus.IsAlive() { - storeID := repl.store.StoreID() - repl.mu.destroyStatus.Set(roachpb.NewRangeNotFoundError(repl.RangeID, storeID), destroyReasonRemovalPending) + // could be re-added with a higher replicaID, but we want to clear the + // replica's data before that happens. + if log.V(1) { + log.Infof(ctx, "setting local replica to destroyed due to ReplicaTooOld error") } - s.replicaGCQueue.AddAsync(ctx, repl, replicaGCPriorityRemoved) + storeID := repl.store.StoreID() + // NB: We know that there's a later copy of this range on this store but + // to introduce another more specific error in this case is overkill so + // we return RangeNotFoundError which the recipient will properly + // ignore. + repl.mu.destroyStatus.Set(roachpb.NewRangeNotFoundError(repl.RangeID, storeID), + destroyReasonRemovalPending) + repl.mu.Unlock() + nextReplicaID := tErr.ReplicaID + 1 + if !repl.IsInitialized() { + return s.removeUninitializedReplicaRaftMuLocked(ctx, repl, nextReplicaID) + } + return s.removeReplicaImpl(ctx, repl, nextReplicaID, RemoveOptions{ + DestroyData: true, + }) case *roachpb.RaftGroupDeletedError: if replErr != nil { // RangeNotFoundErrors are expected here; nothing else is. @@ -3632,7 +3683,11 @@ func (s *Store) processRequestQueue(ctx context.Context, rangeID roachpb.RangeID // giving up the lock. Set lastRepl to nil, so we don't handle it // down below as well. lastRepl = nil - if _, expl, err := r.handleRaftReadyRaftMuLocked(ctx, noSnap); err != nil { + switch _, expl, err := r.handleRaftReadyRaftMuLocked(ctx, noSnap); err { + case nil: + case apply.ErrRemoved: + // return hopefully never to be heard from again. + default: fatalOnRaftReadyErr(ctx, expl, err) } } @@ -3677,7 +3732,11 @@ func (s *Store) processRequestQueue(ctx context.Context, rangeID roachpb.RangeID // handleRaftReadyRaftMuLocked) since racing to handle Raft Ready won't // have any undesirable results. ctx = lastRepl.AnnotateCtx(ctx) - if _, expl, err := lastRepl.handleRaftReady(ctx, noSnap); err != nil { + switch _, expl, err := lastRepl.handleRaftReady(ctx, noSnap); err { + case nil: + case apply.ErrRemoved: + // return hopefully never to be heard from again. + default: fatalOnRaftReadyErr(ctx, expl, err) } } @@ -3693,8 +3752,12 @@ func (s *Store) processReady(ctx context.Context, rangeID roachpb.RangeID) { ctx = r.AnnotateCtx(ctx) start := timeutil.Now() stats, expl, err := r.handleRaftReady(ctx, noSnap) - if err != nil { - log.Fatalf(ctx, "%s: %+v", log.Safe(expl), err) // TODO(bdarnell) + switch err { + case nil: + case apply.ErrRemoved: + return + default: + fatalOnRaftReadyErr(ctx, expl, err) } elapsed := timeutil.Since(start) s.metrics.RaftWorkingDurationNanos.Inc(elapsed.Nanoseconds()) @@ -3937,7 +4000,16 @@ func (s *Store) getOrCreateReplica( replicaID roachpb.ReplicaID, creatingReplica *roachpb.ReplicaDescriptor, ) (_ *Replica, created bool, _ error) { + // We need a retry loop as the replica we find in the map may be in the + // process of being removed or may need to be removed. + r := retry.Start(retry.Options{ + InitialBackoff: time.Microsecond, + // Set the backoff up to only a small amount to wait for data that + // might need to be cleared. + MaxBackoff: 10 * time.Millisecond, + }) for { + r.Next() r, created, err := s.tryGetOrCreateReplica( ctx, rangeID, @@ -3965,33 +4037,80 @@ func (s *Store) tryGetOrCreateReplica( rangeID roachpb.RangeID, replicaID roachpb.ReplicaID, creatingReplica *roachpb.ReplicaDescriptor, -) (_ *Replica, created bool, _ error) { - // The common case: look up an existing (initialized) replica. - if value, ok := s.mu.replicas.Load(int64(rangeID)); ok { - repl := (*Replica)(value) - repl.raftMu.Lock() // not unlocked - repl.mu.Lock() - defer repl.mu.Unlock() - - var replTooOldErr error - if creatingReplica != nil { +) (_ *Replica, created bool, err error) { + var ( + handleFromReplicaTooOld = func(repl *Replica) error { + if creatingReplica == nil { + return nil + } // Drop messages that come from a node that we believe was once a member of // the group but has been removed. desc := repl.mu.state.Desc _, found := desc.GetReplicaDescriptorByID(creatingReplica.ReplicaID) // It's not a current member of the group. Is it from the past? if !found && creatingReplica.ReplicaID < desc.NextReplicaID { - replTooOldErr = roachpb.NewReplicaTooOldError(creatingReplica.ReplicaID) + return roachpb.NewReplicaTooOldError(creatingReplica.ReplicaID) + } + return nil + } + handleToReplicaTooOld = func(repl *Replica) error { + if replicaID == 0 || repl.mu.replicaID == 0 || repl.mu.replicaID >= replicaID { + return nil + } + if log.V(1) { + log.Infof(ctx, "found message for replica ID %d which is newer than %v", replicaID, repl) + } + repl.mu.destroyStatus.Set(err, destroyReasonRemovalPending) + isInitialized := repl.isInitializedRLocked() + repl.mu.Unlock() + defer repl.mu.Lock() + if !isInitialized { + if err := s.removeUninitializedReplicaRaftMuLocked(ctx, repl, replicaID); err != nil { + log.Fatalf(ctx, "failed to remove uninitialized replica: %v", err) + } + } else { + if err := s.removeReplicaImpl(ctx, repl, replicaID, RemoveOptions{ + DestroyData: true, + }); err != nil { + log.Fatal(ctx, err) + } } + return errRetry + } + ) + // The common case: look up an existing (initialized) replica. + if value, ok := s.mu.replicas.Load(int64(rangeID)); ok { + repl := (*Replica)(value) + repl.raftMu.Lock() // not unlocked on success + repl.mu.Lock() + defer repl.mu.Unlock() + if err := handleFromReplicaTooOld(repl); err != nil { + repl.raftMu.Unlock() + return nil, false, err + } + if repl.mu.destroyStatus.RemovingOrRemoved() { + repl.raftMu.Unlock() + return nil, false, errRetry + } + if err := handleToReplicaTooOld(repl); err != nil { + repl.raftMu.Unlock() + return nil, false, err } var err error - if replTooOldErr != nil { - err = replTooOldErr - } else if ds := repl.mu.destroyStatus; ds.reason == destroyReasonRemoved { - err = errRetry - } else { - err = repl.setReplicaIDRaftMuLockedMuLocked(replicaID) + if repl.mu.replicaID == 0 { + // This message is telling us about our replica ID. + // This is a common case when dealing with preemptive snapshots. + err = repl.setReplicaIDRaftMuLockedMuLocked(repl.AnnotateCtx(ctx), replicaID) + } else if replicaID != 0 && repl.mu.replicaID > replicaID { + // The sender is behind and is sending to an old replica. + // We could silently drop this message but this way we'll inform the + // sender that they may no longer exist. + err = roachpb.NewRangeNotFoundError(rangeID, s.StoreID()) + } else if replicaID != 0 && repl.mu.replicaID != replicaID { + // This case should have been caught by handleToReplicaTooOld. + log.Fatalf(ctx, "intended replica id %d unexpectedly does not match the current replica %v", + replicaID, repl) } if err != nil { repl.raftMu.Unlock() @@ -4028,7 +4147,7 @@ func (s *Store) tryGetOrCreateReplica( // replica even outside of raft processing. Have to do this after grabbing // Store.mu to maintain lock ordering invariant. repl.mu.Lock() - repl.mu.minReplicaID = tombstone.NextReplicaID + repl.mu.tombstoneReplicaID = tombstone.NextReplicaID // Add the range to range map, but not replicasByKey since the range's start // key is unknown. The range will be added to replicasByKey later when a // snapshot is applied. After unlocking Store.mu above, another goroutine diff --git a/pkg/storage/store_snapshot.go b/pkg/storage/store_snapshot.go index 3fb7ff8b74d5..7c8062fb5250 100644 --- a/pkg/storage/store_snapshot.go +++ b/pkg/storage/store_snapshot.go @@ -628,7 +628,9 @@ func (s *Store) canApplySnapshotLocked( existingRepl.raftMu.AssertHeld() existingRepl.mu.RLock() - existingIsInitialized := existingRepl.isInitializedRLocked() + existingDesc := existingRepl.mu.state.Desc + existingIsInitialized := existingDesc.IsInitialized() + existingDestroyStatus := existingRepl.mu.destroyStatus existingRepl.mu.RUnlock() if existingIsInitialized { @@ -637,15 +639,19 @@ func (s *Store) canApplySnapshotLocked( // in Replica.maybeAcquireSnapshotMergeLock for how this is // made safe. // - // NB: we expect the replica to know its replicaID at this point - // (i.e. !existingIsPreemptive), though perhaps it's possible - // that this isn't true if the leader initiates a Raft snapshot - // (that would provide a range descriptor with this replica in - // it) but this node reboots (temporarily forgetting its - // replicaID) before the snapshot arrives. + // NB: The snapshot must be intended for this replica as + // withReplicaForRequest ensures that requests with a non-zero replica + // id are passed to a replica with a matching id. Given this is not a + // preemptive snapshot we know that its id must be non-zero. return nil, nil } + // If we are not alive then we should not apply a snapshot as our removal + // is imminent. + if existingDestroyStatus.RemovingOrRemoved() { + return nil, existingDestroyStatus.err + } + // We have a key range [desc.StartKey,desc.EndKey) which we want to apply a // snapshot for. Is there a conflicting existing placeholder or an // overlapping range? @@ -670,7 +676,7 @@ func (s *Store) checkSnapshotOverlapLocked( // NB: this check seems redundant since placeholders are also represented in // replicasByKey (and thus returned in getOverlappingKeyRangeLocked). if exRng, ok := s.mu.replicaPlaceholders[desc.RangeID]; ok { - return errors.Errorf("%s: canApplySnapshotLocked: cannot add placeholder, have an existing placeholder %s", s, exRng) + return errors.Errorf("%s: canApplySnapshotLocked: cannot add placeholder, have an existing placeholder %s %v", s, exRng, snapHeader.RaftMessageRequest.FromReplica) } // TODO(benesch): consider discovering and GC'ing *all* overlapping ranges, @@ -736,43 +742,16 @@ func (s *Store) shouldAcceptSnapshotData( if snapHeader.IsPreemptive() { return crdberrors.AssertionFailedf(`expected a raft or learner snapshot`) } - - s.mu.Lock() - defer s.mu.Unlock() - - // TODO(tbg): see the comment on desc.Generation for what seems to be a much - // saner way to handle overlap via generational semantics. - desc := *snapHeader.State.Desc - - // First, check for an existing Replica. - if v, ok := s.mu.replicas.Load( - int64(desc.RangeID), - ); ok { - existingRepl := (*Replica)(v) - existingRepl.mu.RLock() - existingIsInitialized := existingRepl.isInitializedRLocked() - existingRepl.mu.RUnlock() - - if existingIsInitialized { - // Regular Raft snapshots can't be refused at this point, - // even if they widen the existing replica. See the comments - // in Replica.maybeAcquireSnapshotMergeLock for how this is - // made safe. - // - // NB: we expect the replica to know its replicaID at this point - // (i.e. !existingIsPreemptive), though perhaps it's possible - // that this isn't true if the leader initiates a Raft snapshot - // (that would provide a range descriptor with this replica in - // it) but this node reboots (temporarily forgetting its - // replicaID) before the snapshot arrives. + pErr := s.withReplicaForRequest(ctx, &snapHeader.RaftMessageRequest, + func(ctx context.Context, r *Replica) *roachpb.Error { + if !r.IsInitialized() { + s.mu.Lock() + defer s.mu.Unlock() + return roachpb.NewError(s.checkSnapshotOverlapLocked(ctx, snapHeader)) + } return nil - } - } - - // We have a key range [desc.StartKey,desc.EndKey) which we want to apply a - // snapshot for. Is there a conflicting existing placeholder or an - // overlapping range? - return s.checkSnapshotOverlapLocked(ctx, snapHeader) + }) + return pErr.GoError() } // receiveSnapshot receives an incoming snapshot via a pre-opened GRPC stream. diff --git a/pkg/storage/store_snapshot_preemptive.go b/pkg/storage/store_snapshot_preemptive.go index 9c8893a93980..67d351719eea 100644 --- a/pkg/storage/store_snapshot_preemptive.go +++ b/pkg/storage/store_snapshot_preemptive.go @@ -330,8 +330,8 @@ func (s *Store) processPreemptiveSnapshotRequest( // Raft has decided the snapshot shouldn't be applied we would be // writing the tombstone key incorrectly. r.mu.Lock() - if r.mu.state.Desc.NextReplicaID > r.mu.minReplicaID { - r.mu.minReplicaID = r.mu.state.Desc.NextReplicaID + if r.mu.state.Desc.NextReplicaID > r.mu.tombstoneReplicaID { + r.mu.tombstoneReplicaID = r.mu.state.Desc.NextReplicaID } r.mu.Unlock() } diff --git a/pkg/storage/store_test.go b/pkg/storage/store_test.go index 8829fd7a15cb..7f1f26f0db8a 100644 --- a/pkg/storage/store_test.go +++ b/pkg/storage/store_test.go @@ -569,8 +569,8 @@ func TestStoreAddRemoveRanges(t *testing.T) { // Try to remove range 1 again. if err := store.RemoveReplica(context.Background(), repl1, repl1.Desc().NextReplicaID, RemoveOptions{ DestroyData: true, - }); err == nil { - t.Fatal("expected error re-removing same range") + }); err != nil { + t.Fatalf("didn't expect error re-removing same range: %v", err) } // Try to add a range with previously-used (but now removed) ID. repl2Dup := createReplica(store, 1, roachpb.RKey("a"), roachpb.RKey("b")) @@ -712,11 +712,10 @@ func TestStoreRemoveReplicaDestroy(t *testing.T) { // Verify that removal of a replica marks it as destroyed so that future raft // commands on the Replica will silently be dropped. - if err := repl1.withRaftGroup(true, func(r *raft.RawNode) (bool, error) { + err = repl1.withRaftGroup(true, func(r *raft.RawNode) (bool, error) { return true, errors.Errorf("unexpectedly created a raft group") - }); err != nil { - t.Fatal(err) - } + }) + require.Equal(t, errRemoved, err) repl1.mu.Lock() expErr := roachpb.NewError(repl1.mu.destroyStatus.err) @@ -2953,104 +2952,6 @@ func TestStoreRemovePlaceholderOnRaftIgnored(t *testing.T) { }) } -// Test that we set proper tombstones for removed replicas and use the -// tombstone to reject attempts to create a replica with a lesser ID. -func TestRemovedReplicaTombstone(t *testing.T) { - defer leaktest.AfterTest(t)() - - const rangeID = 1 - creatingReplica := roachpb.ReplicaDescriptor{ - NodeID: 2, - StoreID: 2, - ReplicaID: 2, - } - - // All test cases assume that the starting replica ID is 1. This assumption - // is enforced by a check within the test logic. - testCases := []struct { - setReplicaID roachpb.ReplicaID // set the existing replica to this before removing it - descNextReplicaID roachpb.ReplicaID // the descriptor's NextReplicaID during replica removal - createReplicaID roachpb.ReplicaID // try creating a replica at this ID - expectCreated bool - }{ - {1, 2, 2, true}, - {1, 2, 1, false}, - {1, 2, 1, false}, - {1, 3, 1, false}, - {1, 3, 2, false}, - {1, 3, 3, true}, - {1, 99, 98, false}, - {1, 99, 99, true}, - {2, 2, 2, false}, - {2, 2, 3, true}, - {2, 2, 99, true}, - {98, 2, 98, false}, - {98, 2, 99, true}, - } - for _, c := range testCases { - t.Run("", func(t *testing.T) { - tc := testContext{} - stopper := stop.NewStopper() - ctx := context.TODO() - defer stopper.Stop(ctx) - tc.Start(t, stopper) - s := tc.store - - repl1, err := s.GetReplica(rangeID) - if err != nil { - t.Fatal(err) - } - repl1.mu.Lock() - if repl1.mu.replicaID != 1 { - repl1.mu.Unlock() - t.Fatalf("test precondition not met; expected ReplicaID=1, got %d", repl1.mu.replicaID) - } - repl1.mu.Unlock() - - // Try to trigger a race where the replica ID gets increased during the GC - // process by taking the store lock and inserting a short sleep to cause - // the goroutine to start running the setReplicaID call. - errChan := make(chan error) - - func() { - repl1.raftMu.Lock() - defer repl1.raftMu.Unlock() - s.mu.Lock() - defer s.mu.Unlock() - repl1.mu.Lock() - defer repl1.mu.Unlock() - - go func() { - errChan <- s.RemoveReplica(ctx, repl1, c.descNextReplicaID, RemoveOptions{DestroyData: true}) - }() - - time.Sleep(1 * time.Millisecond) - - if err := repl1.setReplicaIDRaftMuLockedMuLocked(c.setReplicaID); err != nil { - t.Fatal(err) - } - }() - - if err := <-errChan; testutils.IsError(err, "replica ID has changed") { - // We didn't trigger the race, so just return success. - return - } else if err != nil { - t.Fatal(err) - } - - _, created, err := s.getOrCreateReplica(ctx, rangeID, c.createReplicaID, &creatingReplica) - if created != c.expectCreated { - t.Errorf("expected s.getOrCreateReplica(%d, %d, %v).created=%v, got %v", - rangeID, c.createReplicaID, creatingReplica, c.expectCreated, created) - } - if !c.expectCreated && !testutils.IsError(err, "raft group deleted") { - t.Errorf("expected s.getOrCreateReplica(%d, %d, %v).err='raft group deleted', got %v", - rangeID, c.createReplicaID, creatingReplica, err) - } - }) - } -} - type fakeSnapshotStream struct { nextResp *SnapshotResponse nextErr error diff --git a/pkg/storage/testing_knobs.go b/pkg/storage/testing_knobs.go index 7ee8dccf6953..91d5b48e8f9f 100644 --- a/pkg/storage/testing_knobs.go +++ b/pkg/storage/testing_knobs.go @@ -139,6 +139,12 @@ type StoreTestingKnobs struct { // DisableRefreshReasonTicks disables refreshing pending commands // periodically. DisableRefreshReasonTicks bool + // DisableEagerReplicaRemoval prevents the Replica from destroying itself + // when it encounters a ChangeReplicasTrigger which would remove it. + // This option can lead to nasty cases during shutdown where a replica will + // spin attempting to acquire a split or merge lock on a RHS which will + // always fail and is generally not safe but is useful for testing. + DisableEagerReplicaRemoval bool // RefreshReasonTicksPeriod overrides the default period over which // pending commands are refreshed. The period is specified as a multiple // of Raft group ticks.