Skip to content

Commit

Permalink
kvserver: transfer lease in JOINT config directly to a VOTER_INCOMING…
Browse files Browse the repository at this point in the history
… replica

In #74077 we allowed entering a JOINT
config when the leaseholder is being demoted, in which case we attempted to transfer the lease
away before leaving the JOINT config. Looking for a lease transfer target at that stage created
flakiness, since in some situations this target isn't found and we need to retry. This PR
takes the approach that when the leaseholder is being removed, we should enter the JOINT config
only if there is a VOTER_INCOMING replica. In that case, the lease is transferred directly to
that replica, without further checks. And otherwise, the lease must be transferred before attempting
to remove the leaseholder.

Release justification: fixes flakiness caused by #74077
Release note: None
  • Loading branch information
shralex committed Mar 23, 2022
1 parent e02793d commit 40fd5d3
Show file tree
Hide file tree
Showing 10 changed files with 115 additions and 218 deletions.
104 changes: 22 additions & 82 deletions pkg/kv/kvserver/client_lease_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -593,46 +593,14 @@ func TestLeasePreferencesRebalance(t *testing.T) {
})
}

// Tests that when leaseholder is relocated, the lease can be transferred directly to a new node.
// This verifies https://github.com/cockroachdb/cockroach/issues/67740
func TestLeaseholderRelocatePreferred(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testLeaseholderRelocateInternal(t, "us")
}

// Tests that when leaseholder is relocated, the lease will transfer to a node in a preferred
// location, even if another node is being added.
// This verifies https://github.com/cockroachdb/cockroach/issues/67740
func TestLeaseholderRelocateNonPreferred(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testLeaseholderRelocateInternal(t, "eu")
}

// Tests that when leaseholder is relocated, the lease will transfer to some node,
// even if nodes in the preferred region aren't available.
// This verifies https://github.com/cockroachdb/cockroach/issues/67740
func TestLeaseholderRelocateNonExistent(t *testing.T) {
// Tests that when leaseholder is relocated, the lease can be transferred directly to new node
func TestLeaseholderRelocate(t *testing.T) {
defer leaktest.AfterTest(t)()
defer log.Scope(t).Close(t)
testLeaseholderRelocateInternal(t, "au")
}

// Tests that when leaseholder is relocated, the lease can be transferred directly to new node
func testLeaseholderRelocateInternal(t *testing.T, preferredRegion string) {
stickyRegistry := server.NewStickyInMemEnginesRegistry()
defer stickyRegistry.CloseAllStickyInMemEngines()
ctx := context.Background()
manualClock := hlc.NewHybridManualClock()
zcfg := zonepb.DefaultZoneConfig()
zcfg.LeasePreferences = []zonepb.LeasePreference{
{
Constraints: []zonepb.Constraint{
{Type: zonepb.Constraint_REQUIRED, Key: "region", Value: preferredRegion},
},
},
}

serverArgs := make(map[int]base.TestServerArgs)
locality := func(region string) roachpb.Locality {
Expand All @@ -647,7 +615,6 @@ func testLeaseholderRelocateInternal(t *testing.T, preferredRegion string) {
locality("eu"),
locality("us"),
locality("us"),
locality("au"),
}

const numNodes = 4
Expand All @@ -656,9 +623,8 @@ func testLeaseholderRelocateInternal(t *testing.T, preferredRegion string) {
Locality: localities[i],
Knobs: base.TestingKnobs{
Server: &server.TestingKnobs{
ClockSource: manualClock.UnixNano,
DefaultZoneConfigOverride: &zcfg,
StickyEngineRegistry: stickyRegistry,
ClockSource: manualClock.UnixNano,
StickyEngineRegistry: stickyRegistry,
},
},
StoreSpecs: []base.StoreSpec{
Expand Down Expand Up @@ -698,7 +664,6 @@ func testLeaseholderRelocateInternal(t *testing.T, preferredRegion string) {
context.Background(), rhsDesc.StartKey.AsRawKey(),
tc.Targets(0, 1, 3), nil, false)
if err != nil {
require.True(t, kvserver.IsTransientLeaseholderError(err), "%v", err)
return err
}
leaseHolder, err = tc.FindRangeLeaseHolder(rhsDesc, nil)
Expand All @@ -711,34 +676,13 @@ func testLeaseholderRelocateInternal(t *testing.T, preferredRegion string) {
return nil
})

// The only node with "au" locality is down, the lease can move anywhere.
if preferredRegion == "au" {
return
}

// Make sure lease moved to the preferred region, if .
leaseHolder, err = tc.FindRangeLeaseHolder(rhsDesc, nil)
require.NoError(t, err)
require.Equal(t, locality(preferredRegion),
localities[leaseHolder.NodeID-1])

var leaseholderNodeId int
if preferredRegion == "us" {
require.Equal(t, tc.Target(3).NodeID,
leaseHolder.NodeID)
leaseholderNodeId = 3
} else {
if leaseHolder.NodeID == tc.Target(0).NodeID {
leaseholderNodeId = 0
} else {
require.Equal(t, tc.Target(1).NodeID,
leaseHolder.NodeID)
leaseholderNodeId = 1
}
}
require.Equal(t, tc.Target(3), leaseHolder)

// Double check that lease moved directly.
repl := tc.GetFirstStoreFromServer(t, leaseholderNodeId).
repl := tc.GetFirstStoreFromServer(t, 3).
LookupReplica(roachpb.RKey(rhsDesc.StartKey.AsRawKey()))
history := repl.GetLeaseHistory()
require.Equal(t, leaseHolder.NodeID,
Expand Down Expand Up @@ -793,7 +737,7 @@ func TestLeasePreferencesDuringOutage(t *testing.T) {
},
},
}
numNodes := 6
numNodes := 5
serverArgs := make(map[int]base.TestServerArgs)
locality := func(region string, dc string) roachpb.Locality {
return roachpb.Locality{
Expand All @@ -804,7 +748,6 @@ func TestLeasePreferencesDuringOutage(t *testing.T) {
}
}
localities := []roachpb.Locality{
locality("eu", "tr"),
locality("eu", "tr"),
locality("us", "sf"),
locality("us", "sf"),
Expand Down Expand Up @@ -834,25 +777,26 @@ func TestLeasePreferencesDuringOutage(t *testing.T) {
ReplicationMode: base.ReplicationManual,
ServerArgsPerNode: serverArgs,
})

defer tc.Stopper().Stop(ctx)

key := bootstrap.TestingUserTableDataMin()
tc.SplitRangeOrFatal(t, key)
tc.AddVotersOrFatal(t, key, tc.Targets(2, 4)...)
tc.AddVotersOrFatal(t, key, tc.Targets(1, 3)...)
repl := tc.GetFirstStoreFromServer(t, 0).LookupReplica(roachpb.RKey(key))
require.NoError(t, tc.WaitForVoters(key, tc.Targets(2, 4)...))
tc.TransferRangeLeaseOrFatal(t, *repl.Desc(), tc.Target(2))
require.NoError(t, tc.WaitForVoters(key, tc.Targets(1, 3)...))
tc.TransferRangeLeaseOrFatal(t, *repl.Desc(), tc.Target(1))

// Shutdown the sf datacenter, which is going to kill the node with the lease.
tc.StopServer(1)
tc.StopServer(2)
tc.StopServer(3)

wait := func(duration int64) {
manualClock.Increment(duration)
// Gossip and heartbeat all the live stores, we do this manually otherwise the
// allocator on server 0 may see everyone as temporarily dead due to the
// clock move above.
for _, i := range []int{0, 1, 4, 5} {
for _, i := range []int{0, 3, 4} {
require.NoError(t, tc.Servers[i].HeartbeatNodeLiveness())
require.NoError(t, tc.GetFirstStoreFromServer(t, i).GossipStore(ctx, true))
}
Expand All @@ -879,18 +823,18 @@ func TestLeasePreferencesDuringOutage(t *testing.T) {
testutils.SucceedsSoon(t, func() error {
store := tc.GetFirstStoreFromServer(t, 0)
sl, _, _ := store.GetStoreConfig().StorePool.GetStoreList()
if len(sl.Stores()) != 4 {
return errors.Errorf("expected all 4 remaining stores to be live, but only got %v", sl.Stores())
if len(sl.Stores()) != 3 {
return errors.Errorf("expected all 3 remaining stores to be live, but only got %v",
sl.Stores())
}
if err := checkDead(store, 2); err != nil {
if err := checkDead(store, 1); err != nil {
return err
}
if err := checkDead(store, 3); err != nil {
if err := checkDead(store, 2); err != nil {
return err
}
return nil
})

_, _, enqueueError := tc.GetFirstStoreFromServer(t, 0).
ManuallyEnqueue(ctx, "replicate", repl, true)

Expand All @@ -903,26 +847,22 @@ func TestLeasePreferencesDuringOutage(t *testing.T) {
return err
})

// Check that the leaseholder is in the US
srv, err := tc.FindMemberServer(newLeaseHolder.StoreID)
require.NoError(t, err)
region, ok := srv.Locality().Find("region")
require.True(t, ok)
require.Equal(t, "us", region)

require.Equal(t, 3, len(repl.Desc().Replicas().Voters().VoterDescriptors()))
// Validate that we upreplicated outside of SF.
replicas := repl.Desc().Replicas().Voters().VoterDescriptors()
require.Equal(t, 3, len(replicas))
for _, replDesc := range replicas {
for _, replDesc := range repl.Desc().Replicas().Voters().VoterDescriptors() {
serv, err := tc.FindMemberServer(replDesc.StoreID)
require.NoError(t, err)
dc, ok := serv.Locality().Find("dc")
require.True(t, ok)
require.NotEqual(t, "sf", dc)
}

// make sure we see the eu node as a lease holder in the second to last position.
history := repl.GetLeaseHistory()
// make sure we see the eu node as a lease holder in the second to last position.
require.Equal(t, tc.Target(0).NodeID, history[len(history)-2].Replica.NodeID)
}

Expand Down Expand Up @@ -1001,7 +941,7 @@ func TestLeasesDontThrashWhenNodeBecomesSuspect(t *testing.T) {

_, rhsDesc := tc.SplitRangeOrFatal(t, bootstrap.TestingUserTableDataMin())
tc.AddVotersOrFatal(t, rhsDesc.StartKey.AsRawKey(), tc.Targets(1, 2, 3)...)
tc.RemoveLeaseHolderOrFatal(t, rhsDesc, tc.Target(0))
tc.RemoveLeaseHolderOrFatal(t, rhsDesc, tc.Target(0), tc.Target(1))

startKeys := make([]roachpb.Key, 20)
startKeys[0] = rhsDesc.StartKey.AsRawKey()
Expand Down
2 changes: 1 addition & 1 deletion pkg/kv/kvserver/client_metrics_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ func TestStoreMetrics(t *testing.T) {
// Verify stats after addition.
verifyStats(t, tc, 1, 2)
checkGauge(t, "store 0", tc.GetFirstStoreFromServer(t, 0).Metrics().ReplicaCount, initialCount+1)
tc.RemoveLeaseHolderOrFatal(t, desc, tc.Target(0))
tc.RemoveLeaseHolderOrFatal(t, desc, tc.Target(0), tc.Target(1))
testutils.SucceedsSoon(t, func() error {
_, err := tc.GetFirstStoreFromServer(t, 0).GetReplica(desc.RangeID)
if err == nil {
Expand Down
25 changes: 8 additions & 17 deletions pkg/kv/kvserver/client_raft_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3841,25 +3841,16 @@ func TestLeaseHolderRemoveSelf(t *testing.T) {
})
defer tc.Stopper().Stop(ctx)

_, desc := tc.SplitRangeOrFatal(t, bootstrap.TestingUserTableDataMin())
key := desc.StartKey.AsRawKey()
tc.AddVotersOrFatal(t, key, tc.Targets(1)...)

// Remove the replica from first store.
tc.RemoveLeaseHolderOrFatal(t, desc, tc.Target(0))
leaseHolder := tc.GetFirstStoreFromServer(t, 0)
key := []byte("a")
tc.SplitRangeOrFatal(t, key)
tc.AddVotersOrFatal(t, key, tc.Target(1))

// Check that lease moved to server 2.
leaseInfo := getLeaseInfoOrFatal(t, context.Background(), tc.Servers[1].DB(), key)
rangeDesc, err := tc.LookupRange(key)
if err != nil {
t.Fatal(err)
}
replica, ok := rangeDesc.GetReplicaDescriptor(tc.Servers[1].GetFirstStoreID())
if !ok {
t.Fatalf("expected to find replica in server 2")
// Attempt to remove the replica from first store.
expectedErr := "invalid ChangeReplicasTrigger"
if _, err := tc.RemoveVoters(key, tc.Target(0)); !testutils.IsError(err, expectedErr) {
t.Fatalf("expected %q error trying to remove leaseholder replica; got %v", expectedErr, err)
}
require.Equal(t, leaseInfo.Lease.Replica, replica)
leaseHolder := tc.GetFirstStoreFromServer(t, 1)

// Expect that we can still successfully do a get on the range.
getArgs := getArgs(key)
Expand Down
2 changes: 1 addition & 1 deletion pkg/kv/kvserver/client_replica_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1871,7 +1871,7 @@ func TestRemoveLeaseholder(t *testing.T) {
require.Equal(t, tc.Target(0), leaseHolder)

// Remove server 1.
tc.RemoveLeaseHolderOrFatal(t, rhsDesc, tc.Target(0))
tc.RemoveLeaseHolderOrFatal(t, rhsDesc, tc.Target(0), tc.Target(1))

// Check that the lease moved away from 1.
leaseHolder, err = tc.FindRangeLeaseHolder(rhsDesc, nil)
Expand Down
10 changes: 0 additions & 10 deletions pkg/kv/kvserver/markers.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,13 +78,3 @@ var errMarkInvalidReplicationChange = errors.New("invalid replication change")
func IsIllegalReplicationChangeError(err error) bool {
return errors.Is(err, errMarkInvalidReplicationChange)
}

var errLeaseholderNotRaftLeader = errors.New(
"removing leaseholder not allowed since it isn't the Raft leader")

// IsTransientLeaseholderError can happen when a reconfiguration is invoked,
// if the Raft leader is not collocated with the leaseholder.
// This is temporary, and indicates that the operation should be retried.
func IsTransientLeaseholderError(err error) bool {
return errors.Is(err, errLeaseholderNotRaftLeader)
}
Loading

0 comments on commit 40fd5d3

Please sign in to comment.