From ecc931becdeca6a5563f291489d5b1ed4389a2cb Mon Sep 17 00:00:00 2001 From: Erik Grinaker Date: Sat, 11 Mar 2023 16:32:51 +0000 Subject: [PATCH] kvserver: metamorphically enable `kv.expiration_leases_only.enabled` Epic: none Release note: None --- pkg/kv/kvserver/client_lease_test.go | 78 +++++++++---------- pkg/kv/kvserver/client_merge_test.go | 8 +- pkg/kv/kvserver/client_raft_test.go | 13 +++- .../client_replica_circuit_breaker_test.go | 5 ++ pkg/kv/kvserver/client_replica_test.go | 8 ++ pkg/kv/kvserver/client_split_test.go | 4 + pkg/kv/kvserver/closed_timestamp_test.go | 6 ++ pkg/kv/kvserver/replica_range_lease.go | 7 +- pkg/kv/kvserver/replica_rangefeed_test.go | 4 + pkg/kv/kvserver/replicate_queue_test.go | 8 ++ pkg/testutils/testcluster/testcluster.go | 14 ++++ 11 files changed, 109 insertions(+), 46 deletions(-) diff --git a/pkg/kv/kvserver/client_lease_test.go b/pkg/kv/kvserver/client_lease_test.go index 8d35dc468b3d..ae352965949d 100644 --- a/pkg/kv/kvserver/client_lease_test.go +++ b/pkg/kv/kvserver/client_lease_test.go @@ -31,7 +31,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/allocator/storepool" "github.com/cockroachdb/cockroach/pkg/kv/kvserver/kvserverbase" - "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/settings/cluster" @@ -57,10 +56,15 @@ func TestStoreRangeLease(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) + ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism + tc := testcluster.StartTestCluster(t, 1, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ + Settings: st, Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ DisableMergeQueue: true, @@ -69,7 +73,7 @@ func TestStoreRangeLease(t *testing.T) { }, }, ) - defer tc.Stopper().Stop(context.Background()) + defer tc.Stopper().Stop(ctx) store := tc.GetFirstStoreFromServer(t, 0) // NodeLivenessKeyMax is a static split point, so this is always @@ -839,7 +843,6 @@ func TestLeaseholderRelocate(t *testing.T) { // Make sure the lease is on 3 and is fully upgraded. tc.TransferRangeLeaseOrFatal(t, rhsDesc, tc.Target(2)) - tc.WaitForLeaseUpgrade(ctx, t, rhsDesc) // Check that the lease moved to 3. leaseHolder, err := tc.FindRangeLeaseHolder(rhsDesc, nil) @@ -872,17 +875,24 @@ func TestLeaseholderRelocate(t *testing.T) { require.NoError(t, err) require.Equal(t, tc.Target(3), leaseHolder) - // Double check that lease moved directly. + // Double check that lease moved directly. The tail of the lease history + // should all be on leaseHolder.NodeID. We may metamorphically enable + // kv.expiration_leases_only.enabled, in which case there will be a single + // expiration lease, but otherwise we'll have transferred an expiration lease + // and then upgraded to an epoch lease. repl := tc.GetFirstStoreFromServer(t, 3). LookupReplica(roachpb.RKey(rhsDesc.StartKey.AsRawKey())) history := repl.GetLeaseHistory() - require.Equal(t, leaseHolder.NodeID, - history[len(history)-1].Replica.NodeID) - require.Equal(t, leaseHolder.NodeID, - history[len(history)-2].Replica.NodeID) // account for the lease upgrade - require.Equal(t, tc.Target(2).NodeID, - history[len(history)-3].Replica.NodeID) + require.Equal(t, leaseHolder.NodeID, history[len(history)-1].Replica.NodeID) + var prevLeaseHolder roachpb.NodeID + for i := len(history) - 1; i >= 0; i-- { + if id := history[i].Replica.NodeID; id != leaseHolder.NodeID { + prevLeaseHolder = id + break + } + } + require.Equal(t, tc.Target(2).NodeID, prevLeaseHolder) } func gossipLiveness(t *testing.T, tc *testcluster.TestCluster) { @@ -1100,15 +1110,20 @@ func TestLeasesDontThrashWhenNodeBecomesSuspect(t *testing.T) { locality("us-west"), locality("us-west"), } + + ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism + // Speed up lease transfers. stickyRegistry := server.NewStickyInMemEnginesRegistry() defer stickyRegistry.CloseAllStickyInMemEngines() - ctx := context.Background() manualClock := hlc.NewHybridManualClock() serverArgs := make(map[int]base.TestServerArgs) numNodes := 4 for i := 0; i < numNodes; i++ { serverArgs[i] = base.TestServerArgs{ + Settings: st, Locality: localities[i], Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ @@ -1346,13 +1361,6 @@ func TestAcquireLeaseTimeout(t *testing.T) { return nil } - // The lease request timeout depends on the Raft election timeout, so we set - // it low to get faster timeouts (800 ms) and speed up the test. - var raftCfg base.RaftConfig - raftCfg.SetDefaults() - raftCfg.RaftHeartbeatIntervalTicks = 1 - raftCfg.RaftElectionTimeoutTicks = 2 - manualClock := hlc.NewHybridManualClock() // Start a two-node cluster. @@ -1360,7 +1368,11 @@ func TestAcquireLeaseTimeout(t *testing.T) { tc := testcluster.StartTestCluster(t, numNodes, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ - RaftConfig: raftCfg, + RaftConfig: base.RaftConfig{ + // Lease request timeout depends on Raft election timeout, speed it up. + RaftHeartbeatIntervalTicks: 1, + RaftElectionTimeoutTicks: 2, + }, Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ WallClock: manualClock, @@ -1383,27 +1395,10 @@ func TestAcquireLeaseTimeout(t *testing.T) { repl, err := tc.GetFirstStoreFromServer(t, 0).GetReplica(desc.RangeID) require.NoError(t, err) - tc.IncrClockForLeaseUpgrade(t, manualClock) - tc.WaitForLeaseUpgrade(ctx, t, desc) - - // Stop n2 and increment its epoch to invalidate the lease. + // Stop n2 and invalidate its leases by forwarding the clock. tc.StopServer(1) - n2ID := tc.Server(1).NodeID() - lv, ok := tc.Server(0).NodeLiveness().(*liveness.NodeLiveness) - require.True(t, ok) - lvNode2, ok := lv.GetLiveness(n2ID) - require.True(t, ok) - manualClock.Forward(lvNode2.Expiration.WallTime) - - testutils.SucceedsSoon(t, func() error { - lvNode2, ok = lv.GetLiveness(n2ID) - require.True(t, ok) - err := lv.IncrementEpoch(context.Background(), lvNode2.Liveness) - if errors.Is(err, liveness.ErrEpochAlreadyIncremented) { - return nil - } - return err - }) + leaseDuration := tc.GetFirstStoreFromServer(t, 0).GetStoreConfig().RangeLeaseDuration + manualClock.Increment(leaseDuration.Nanoseconds()) require.False(t, repl.CurrentLeaseStatus(ctx).IsValid()) // Trying to acquire the lease should error with an empty NLHE, since the @@ -1456,11 +1451,14 @@ func TestLeaseTransfersUseExpirationLeasesAndBumpToEpochBasedOnes(t *testing.T) }{} ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism manualClock := hlc.NewHybridManualClock() tci := serverutils.StartNewTestCluster(t, 2, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ + Settings: st, Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ // Never ticked -- demonstrating that we're not relying on @@ -1527,6 +1525,8 @@ func TestLeaseUpgradeVersionGate(t *testing.T) { clusterversion.ByKey(clusterversion.TODODelete_V22_2EnableLeaseUpgrade-1), false, /* initializeVersion */ ) + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism + tci := serverutils.StartNewTestCluster(t, 2, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ diff --git a/pkg/kv/kvserver/client_merge_test.go b/pkg/kv/kvserver/client_merge_test.go index a6eacfd3a38d..2311014835cc 100644 --- a/pkg/kv/kvserver/client_merge_test.go +++ b/pkg/kv/kvserver/client_merge_test.go @@ -486,10 +486,15 @@ func mergeCheckingTimestampCaches( manualClock := hlc.NewHybridManualClock() ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + // This test explicitly sets up a leader/leaseholder partition, which doesn't + // work with expiration leases (the lease expires). + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ + Settings: st, Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ WallClock: manualClock, @@ -1012,9 +1017,6 @@ func TestStoreRangeMergeTimestampCacheCausality(t *testing.T) { if !lhsRepl1.OwnsValidLease(ctx, tc.Servers[1].Clock().NowAsClockTimestamp()) { return errors.New("s2 does not own valid lease for lhs range") } - if lhsRepl1.CurrentLeaseStatus(ctx).Lease.Type() != roachpb.LeaseEpoch { - return errors.Errorf("lease still an expiration based lease") - } return nil }) diff --git a/pkg/kv/kvserver/client_raft_test.go b/pkg/kv/kvserver/client_raft_test.go index 3a4dbed642f7..39574d0e43eb 100644 --- a/pkg/kv/kvserver/client_raft_test.go +++ b/pkg/kv/kvserver/client_raft_test.go @@ -1315,9 +1315,13 @@ func TestRequestsOnFollowerWithNonLiveLeaseholder(t *testing.T) { return nil } + st := cluster.MakeTestingClusterSettings() + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism + clusterArgs := base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ + Settings: st, // Reduce the election timeout some to speed up the test. RaftConfig: base.RaftConfig{RaftElectionTimeoutTicks: 10}, Knobs: base.TestingKnobs{ @@ -1749,7 +1753,7 @@ func TestLogGrowthWhenRefreshingPendingCommands(t *testing.T) { } propNode := tc.GetFirstStoreFromServer(t, propIdx).TestSender() tc.TransferRangeLeaseOrFatal(t, *leaderRepl.Desc(), tc.Target(propIdx)) - tc.WaitForLeaseUpgrade(ctx, t, *leaderRepl.Desc()) + tc.MaybeWaitForLeaseUpgrade(ctx, t, *leaderRepl.Desc()) testutils.SucceedsSoon(t, func() error { // Lease transfers may not be immediately observed by the new // leaseholder. Wait until the new leaseholder is aware. @@ -4872,10 +4876,17 @@ func TestDefaultConnectionDisruptionDoesNotInterfereWithSystemTraffic(t *testing }, } + // This test relies on epoch leases being invalidated when a node restart, + // which isn't true for expiration leases, so we disable expiration lease + // metamorphism. + st := cluster.MakeTestingClusterSettings() + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism + const numServers int = 3 stickyServerArgs := make(map[int]base.TestServerArgs) for i := 0; i < numServers; i++ { stickyServerArgs[i] = base.TestServerArgs{ + Settings: st, StoreSpecs: []base.StoreSpec{ { InMemory: true, diff --git a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go index 241cdda40746..865b200154b4 100644 --- a/pkg/kv/kvserver/client_replica_circuit_breaker_test.go +++ b/pkg/kv/kvserver/client_replica_circuit_breaker_test.go @@ -700,6 +700,10 @@ func setupCircuitBreakerTest(t *testing.T) *circuitBreakerTest { var rangeID int64 // atomic slowThresh := &atomic.Value{} // supports .SetSlowThreshold(x) slowThresh.Store(time.Duration(0)) + ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + // TODO(erikgrinaker): We may not need this for all circuit breaker tests. + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism storeKnobs := &kvserver.StoreTestingKnobs{ SlowReplicationThresholdOverride: func(ba *kvpb.BatchRequest) time.Duration { t.Helper() @@ -748,6 +752,7 @@ func setupCircuitBreakerTest(t *testing.T) *circuitBreakerTest { args := base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ + Settings: st, RaftConfig: raftCfg, Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ diff --git a/pkg/kv/kvserver/client_replica_test.go b/pkg/kv/kvserver/client_replica_test.go index d13f4b7d95cc..98fd7f39d304 100644 --- a/pkg/kv/kvserver/client_replica_test.go +++ b/pkg/kv/kvserver/client_replica_test.go @@ -2060,11 +2060,14 @@ func TestLeaseMetricsOnSplitAndTransfer(t *testing.T) { return nil } ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism manualClock := hlc.NewHybridManualClock() tc := testcluster.StartTestCluster(t, 2, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ + Settings: st, Knobs: base.TestingKnobs{ Store: &kvserver.StoreTestingKnobs{ EvalKnobs: kvserverbase.BatchEvalTestingKnobs{ @@ -4358,6 +4361,11 @@ func TestStrictGCEnforcement(t *testing.T) { protectedts.PollInterval.Override(ctx, &tc.Server(0).ClusterSettings().SV, 500*time.Hour) defer protectedts.PollInterval.Override(ctx, &tc.Server(0).ClusterSettings().SV, 2*time.Minute) + // Disable follower reads. When metamorphically enabling expiration-based + // leases, an expired lease will cause a follower read which bypasses the + // strict GC enforcement. + sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.follower_reads_enabled = false") + sqlDB.Exec(t, "SET CLUSTER SETTING kv.closed_timestamp.target_duration = '10 ms'") defer sqlDB.Exec(t, `SET CLUSTER SETTING kv.gc_ttl.strict_enforcement.enabled = DEFAULT`) setStrictGC(t, true) diff --git a/pkg/kv/kvserver/client_split_test.go b/pkg/kv/kvserver/client_split_test.go index 94b2e3ddfc62..bd19d50f4c3f 100644 --- a/pkg/kv/kvserver/client_split_test.go +++ b/pkg/kv/kvserver/client_split_test.go @@ -43,6 +43,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap" "github.com/cockroachdb/cockroach/pkg/sql/catalog/systemschema" @@ -2784,11 +2785,14 @@ func TestStoreCapacityAfterSplit(t *testing.T) { defer leaktest.AfterTest(t)() defer log.Scope(t).Close(t) ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism manualClock := hlc.NewHybridManualClock() tc := testcluster.StartTestCluster(t, 2, base.TestClusterArgs{ ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ + Settings: st, Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ WallClock: manualClock, diff --git a/pkg/kv/kvserver/closed_timestamp_test.go b/pkg/kv/kvserver/closed_timestamp_test.go index 788bb17201b7..8c10b36539c5 100644 --- a/pkg/kv/kvserver/closed_timestamp_test.go +++ b/pkg/kv/kvserver/closed_timestamp_test.go @@ -31,6 +31,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/txnwait" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/server" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descpb" "github.com/cockroachdb/cockroach/pkg/sql/rowenc/keyside" "github.com/cockroachdb/cockroach/pkg/sql/sem/tree" @@ -671,8 +672,13 @@ func TestClosedTimestampFrozenAfterSubsumption(t *testing.T) { st := mergeFilter{} manual := hlc.NewHybridManualClock() pinnedLeases := kvserver.NewPinnedLeases() + + cs := cluster.MakeTestingClusterSettings() + kvserver.ExpirationLeasesOnly.Override(ctx, &cs.SV, false) // override metamorphism + clusterArgs := base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ + Settings: cs, RaftConfig: base.RaftConfig{ // We set the raft election timeout to a small duration. This should // result in the node liveness duration being ~3.6 seconds. Note that diff --git a/pkg/kv/kvserver/replica_range_lease.go b/pkg/kv/kvserver/replica_range_lease.go index 8277f7582c95..a5880ccf0fe5 100644 --- a/pkg/kv/kvserver/replica_range_lease.go +++ b/pkg/kv/kvserver/replica_range_lease.go @@ -57,6 +57,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv/kvserver/raftutil" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings" + "github.com/cockroachdb/cockroach/pkg/util" "github.com/cockroachdb/cockroach/pkg/util/hlc" "github.com/cockroachdb/cockroach/pkg/util/log" "github.com/cockroachdb/cockroach/pkg/util/retry" @@ -75,11 +76,11 @@ var transferExpirationLeasesFirstEnabled = settings.RegisterBoolSetting( true, ) -var expirationLeasesOnly = settings.RegisterBoolSetting( +var ExpirationLeasesOnly = settings.RegisterBoolSetting( settings.SystemOnly, "kv.expiration_leases_only.enabled", "only use expiration-based leases, never epoch-based ones (experimental, affects performance)", - false, + util.ConstantWithMetamorphicTestBool("kv.expiration_leases_only.enabled", false), ) var leaseStatusLogLimiter = func() *log.EveryN { @@ -789,7 +790,7 @@ func (r *Replica) requiresExpirationLeaseRLocked() bool { // expiration-based lease, either because it requires one or because // kv.expiration_leases_only.enabled is enabled. func (r *Replica) shouldUseExpirationLeaseRLocked() bool { - return expirationLeasesOnly.Get(&r.ClusterSettings().SV) || r.requiresExpirationLeaseRLocked() + return ExpirationLeasesOnly.Get(&r.ClusterSettings().SV) || r.requiresExpirationLeaseRLocked() } // requestLeaseLocked executes a request to obtain or extend a lease diff --git a/pkg/kv/kvserver/replica_rangefeed_test.go b/pkg/kv/kvserver/replica_rangefeed_test.go index 123c55eac771..77281986930e 100644 --- a/pkg/kv/kvserver/replica_rangefeed_test.go +++ b/pkg/kv/kvserver/replica_rangefeed_test.go @@ -1242,10 +1242,14 @@ func TestRangefeedCheckpointsRecoverFromLeaseExpiration(t *testing.T) { // evaluating on the scratch range. var rejectExtraneousRequests int64 // accessed atomically + st := cluster.MakeTestingClusterSettings() + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism + cargs := aggressiveResolvedTimestampClusterArgs cargs.ReplicationMode = base.ReplicationManual manualClock := hlc.NewHybridManualClock() cargs.ServerArgs = base.TestServerArgs{ + Settings: st, Knobs: base.TestingKnobs{ Server: &server.TestingKnobs{ WallClock: manualClock, diff --git a/pkg/kv/kvserver/replicate_queue_test.go b/pkg/kv/kvserver/replicate_queue_test.go index ed29b4fe0191..4c090ae9d781 100644 --- a/pkg/kv/kvserver/replicate_queue_test.go +++ b/pkg/kv/kvserver/replicate_queue_test.go @@ -39,6 +39,7 @@ import ( "github.com/cockroachdb/cockroach/pkg/rpc" "github.com/cockroachdb/cockroach/pkg/server" "github.com/cockroachdb/cockroach/pkg/server/serverpb" + "github.com/cockroachdb/cockroach/pkg/settings/cluster" "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql/catalog/bootstrap" "github.com/cockroachdb/cockroach/pkg/testutils" @@ -2070,6 +2071,8 @@ func TestReplicateQueueAcquiresInvalidLeases(t *testing.T) { defer log.Scope(t).Close(t) ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism stickyEngineRegistry := server.NewStickyInMemEnginesRegistry() defer stickyEngineRegistry.CloseAllStickyInMemEngines() @@ -2082,6 +2085,7 @@ func TestReplicateQueueAcquiresInvalidLeases(t *testing.T) { // statuses pre and post enabling the replicate queue. ReplicationMode: base.ReplicationManual, ServerArgs: base.TestServerArgs{ + Settings: st, DisableDefaultTestTenant: true, ScanMinIdleTime: time.Millisecond, ScanMaxIdleTime: time.Millisecond, @@ -2362,8 +2366,12 @@ func TestReplicateQueueExpirationLeasesOnly(t *testing.T) { skip.UnderShort(t) ctx := context.Background() + st := cluster.MakeTestingClusterSettings() + kvserver.ExpirationLeasesOnly.Override(ctx, &st.SV, false) // override metamorphism + tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{ ServerArgs: base.TestServerArgs{ + Settings: st, // Speed up the replicate queue, which switches the lease type. ScanMinIdleTime: time.Millisecond, ScanMaxIdleTime: time.Millisecond, diff --git a/pkg/testutils/testcluster/testcluster.go b/pkg/testutils/testcluster/testcluster.go index ff38f1ab674d..f651ddb297ed 100644 --- a/pkg/testutils/testcluster/testcluster.go +++ b/pkg/testutils/testcluster/testcluster.go @@ -1051,11 +1051,25 @@ func (tc *TestCluster) IncrClockForLeaseUpgrade(t *testing.T, clock *hlc.HybridM ) } +// MaybeWaitForLeaseUpgrade waits until the lease held for the given range +// descriptor is upgraded to an epoch-based one, but only if we expect the lease +// to be upgraded. +func (tc *TestCluster) MaybeWaitForLeaseUpgrade( + ctx context.Context, t *testing.T, desc roachpb.RangeDescriptor, +) { + if kvserver.ExpirationLeasesOnly.Get(&tc.Server(0).ClusterSettings().SV) { + return + } + tc.WaitForLeaseUpgrade(ctx, t, desc) +} + // WaitForLeaseUpgrade waits until the lease held for the given range descriptor // is upgraded to an epoch-based one. func (tc *TestCluster) WaitForLeaseUpgrade( ctx context.Context, t *testing.T, desc roachpb.RangeDescriptor, ) { + require.False(t, kvserver.ExpirationLeasesOnly.Get(&tc.Server(0).ClusterSettings().SV), + "cluster configured to only use expiration leases") testutils.SucceedsSoon(t, func() error { li, _, err := tc.FindRangeLeaseEx(ctx, desc, nil) require.NoError(t, err)