From 02d8b59ba2008ce9146f4201f363484ddb849765 Mon Sep 17 00:00:00 2001 From: Andrew Baptist Date: Sun, 24 Sep 2023 13:02:43 -0400 Subject: [PATCH 1/5] upgrade: remove unused fields Epic: none Release note: None --- pkg/upgrade/BUILD.bazel | 1 - pkg/upgrade/tenant_upgrade.go | 7 ------- pkg/upgrade/upgradejob/upgrade_job.go | 3 --- 3 files changed, 11 deletions(-) diff --git a/pkg/upgrade/BUILD.bazel b/pkg/upgrade/BUILD.bazel index 500cc1f7cc4d..7bcef1ffdafb 100644 --- a/pkg/upgrade/BUILD.bazel +++ b/pkg/upgrade/BUILD.bazel @@ -20,7 +20,6 @@ go_library( "//pkg/roachpb", "//pkg/server/serverpb", "//pkg/settings/cluster", - "//pkg/spanconfig", "//pkg/sql/catalog/descs", "//pkg/sql/catalog/lease", "//pkg/sql/catalog/resolver", diff --git a/pkg/upgrade/tenant_upgrade.go b/pkg/upgrade/tenant_upgrade.go index e467c03f64b5..b26e8241317a 100644 --- a/pkg/upgrade/tenant_upgrade.go +++ b/pkg/upgrade/tenant_upgrade.go @@ -20,7 +20,6 @@ import ( "github.com/cockroachdb/cockroach/pkg/kv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/settings/cluster" - "github.com/cockroachdb/cockroach/pkg/spanconfig" "github.com/cockroachdb/cockroach/pkg/sql/catalog/descs" "github.com/cockroachdb/cockroach/pkg/sql/catalog/lease" "github.com/cockroachdb/cockroach/pkg/sql/catalog/resolver" @@ -44,12 +43,6 @@ type TenantDeps struct { // TODO(ajwerner): Remove this in favor of the descs.DB above. InternalExecutor isql.Executor - SpanConfig struct { // deps for span config upgrades; can be removed accordingly - spanconfig.KVAccessor - spanconfig.Splitter - Default roachpb.SpanConfig - } - TestingKnobs *upgradebase.TestingKnobs SchemaResolverConstructor func( // A constructor that returns a schema resolver for `descriptors` in `currDb`. txn *kv.Txn, descriptors *descs.Collection, currDb string, diff --git a/pkg/upgrade/upgradejob/upgrade_job.go b/pkg/upgrade/upgradejob/upgrade_job.go index 372ff6320cd5..beedb0082c13 100644 --- a/pkg/upgrade/upgradejob/upgrade_job.go +++ b/pkg/upgrade/upgradejob/upgrade_job.go @@ -97,9 +97,6 @@ func (r resumer) Resume(ctx context.Context, execCtxI interface{}) error { TestingKnobs: execCtx.ExecCfg().UpgradeTestingKnobs, SessionData: execCtx.SessionData(), } - tenantDeps.SpanConfig.KVAccessor = execCtx.ExecCfg().SpanConfigKVAccessor - tenantDeps.SpanConfig.Splitter = execCtx.ExecCfg().SpanConfigSplitter - tenantDeps.SpanConfig.Default = execCtx.ExecCfg().DefaultZoneConfig.AsSpanConfig() tenantDeps.SchemaResolverConstructor = func( txn *kv.Txn, descriptors *descs.Collection, currDb string, From b670d20f90226f8805d1babaea985a4b2b9792fd Mon Sep 17 00:00:00 2001 From: Radu Berinde Date: Wed, 27 Sep 2023 11:59:31 -0700 Subject: [PATCH 2/5] clusterversion: fix formatting of versions comment Some of the formatting in this big comment is getting screwed up by gofmt. This change fixes it - the trick was to make sure the "sub lists" are all indented by a tab so it's all treated like a big code block. Epic: none Release note: None --- pkg/clusterversion/cockroach_versions.go | 106 +++++++++++------------ 1 file changed, 52 insertions(+), 54 deletions(-) diff --git a/pkg/clusterversion/cockroach_versions.go b/pkg/clusterversion/cockroach_versions.go index 9d5e316453bc..d5b79122ddf1 100644 --- a/pkg/clusterversion/cockroach_versions.go +++ b/pkg/clusterversion/cockroach_versions.go @@ -26,70 +26,68 @@ type Key int // // You'll want to add a new one in the following cases: // -// (a) When introducing a backwards incompatible feature. Broadly, by this we +// (a) When introducing a backwards incompatible feature. Broadly, by this we +// mean code that's structured as follows: // -// mean code that's structured as follows: +// if (specific-version is active) { +// // Implies that all nodes in the cluster are running binaries that +// // have this code. We can "enable" the new feature knowing that +// // outbound RPCs, requests, etc. will be handled by nodes that know +// // how to do so. +// } else { +// // There may be some nodes running older binaries without this code. +// // To be safe, we'll want to behave as we did before introducing +// // this feature. +// } // -// if (specific-version is active) { -// // Implies that all nodes in the cluster are running binaries that -// // have this code. We can "enable" the new feature knowing that -// // outbound RPCs, requests, etc. will be handled by nodes that know -// // how to do so. -// } else { -// // There may be some nodes running older binaries without this code. -// // To be safe, we'll want to behave as we did before introducing -// // this feature. -// } +// Authors of migrations need to be careful in ensuring that end-users +// aren't able to enable feature gates before they're active. This is fine: // -// Authors of migrations need to be careful in ensuring that end-users -// aren't able to enable feature gates before they're active. This is fine: +// func handleSomeNewStatement() error { +// if !(specific-version is active) { +// return errors.New("cluster version needs to be bumped") +// } +// // ... +// } // -// func handleSomeNewStatement() error { -// if !(specific-version is active) { -// return errors.New("cluster version needs to be bumped") -// } -// // ... -// } +// At the same time, with requests/RPCs originating at other crdb nodes, the +// initiator of the request gets to decide what's supported. A node should +// not refuse functionality on the grounds that its view of the version gate +// is as yet inactive. Consider the sender: // -// At the same time, with requests/RPCs originating at other crdb nodes, the -// initiator of the request gets to decide what's supported. A node should -// not refuse functionality on the grounds that its view of the version gate -// is as yet inactive. Consider the sender: -// -// func invokeSomeRPC(req) { -// if (specific-version is active) { -// // Like mentioned above, this implies that all nodes in the -// // cluster are running binaries that can handle this new -// // feature. We may have learned about this fact before the -// // node on the other end. This is due to the fact that migration -// // manager informs each node about the specific-version being -// // activated active concurrently. See BumpClusterVersion for -// // where that happens. Still, it's safe for us to enable the new -// // feature flags as we trust the recipient to know how to deal -// // with it. -// req.NewFeatureFlag = true -// } -// send(req) -// } +// func invokeSomeRPC(req) { +// if (specific-version is active) { +// // Like mentioned above, this implies that all nodes in the +// // cluster are running binaries that can handle this new +// // feature. We may have learned about this fact before the +// // node on the other end. This is due to the fact that migration +// // manager informs each node about the specific-version being +// // activated active concurrently. See BumpClusterVersion for +// // where that happens. Still, it's safe for us to enable the new +// // feature flags as we trust the recipient to know how to deal +// // with it. +// req.NewFeatureFlag = true +// } +// send(req) +// } // // And consider the recipient: // -// func someRPC(req) { -// if !req.NewFeatureFlag { -// // Legacy behavior... -// } -// // There's no need to even check if the specific-version is active. -// // If the flag is enabled, the specific-version must have been -// // activated, even if we haven't yet heard about it (we will pretty -// // soon). -// } -// -// See clusterversion.Handle.IsActive and usage of some existing versions -// below for more clues on the matter. +// func someRPC(req) { +// if !req.NewFeatureFlag { +// // Legacy behavior... +// } +// // There's no need to even check if the specific-version is active. +// // If the flag is enabled, the specific-version must have been +// // activated, even if we haven't yet heard about it (we will pretty +// // soon). +// } // -// (b) When cutting a major release branch. When cutting release-20.2 for +// See clusterversion.Handle.IsActive and usage of some existing versions +// below for more clues on the matter. // -// example, you'll want to introduce the following to `master`. +// (b) When cutting a major release branch. When cutting release-20.2 for +// example, you'll want to introduce the following to `master`. // // (i) V20_2 (keyed to v20.2.0-0}) // (ii) V21_1Start (keyed to v20.2.0-1}) From fdb6e86a88cff5ea764aebd26a090aee5cd8431f Mon Sep 17 00:00:00 2001 From: Alex Sarkesian Date: Thu, 21 Sep 2023 16:40:58 -0700 Subject: [PATCH 3/5] kvcoord: fix flake in `TestTransactionUnexpectedlyCommitted` The `TestTransactionUnexpectedlyCommitted/recovery_after_transfer_lease` test, introduced to test #107658, has been flaky (particularly under deadlock builds) due to a race condition between a retry of a write and intent resolution. While both orderings in this test result in a correct `AmbiguousResultError` for the client, when intent resolution wins the race, the retried write will attempt to push away the current lockholder; since it is illegal for a committed transaction to perform a push, this results in a different secondary error attached to the `AmbiguousResultError`. This change ensures a predefined ordering of these operations so that the secondary error is consistent across runs of the test. Fixes: #110187 Release note: None --- .../kvclient/kvcoord/dist_sender_ambiguous_test.go | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/pkg/kv/kvclient/kvcoord/dist_sender_ambiguous_test.go b/pkg/kv/kvclient/kvcoord/dist_sender_ambiguous_test.go index 781ade85153c..8097129bab8e 100644 --- a/pkg/kv/kvclient/kvcoord/dist_sender_ambiguous_test.go +++ b/pkg/kv/kvclient/kvcoord/dist_sender_ambiguous_test.go @@ -274,7 +274,6 @@ func TestTransactionUnexpectedlyCommitted(t *testing.T) { // several seconds, and requires maintaining expected leases. skip.UnderShort(t) skip.UnderStressRace(t) - skip.WithIssue(t, 110187, "flaky test") succeedsSoonDuration := testutils.DefaultSucceedsSoonDuration if util.RaceEnabled { @@ -1050,7 +1049,7 @@ func TestTransactionUnexpectedlyCommitted(t *testing.T) { // 7. _->n1: PushTxn(txn2->txn1) -- Discovers txn1 in STAGING and starts // recovery. // 8. _->n1: RecoverTxn(txn1) -- Recovery should mark txn1 committed, but - // pauses before returning so that txn1's intents don't get cleaned up. + // intent resolution on txn1 needs to be paused until after txn1 finishes. if req.ba.IsSingleRecoverTxnRequest() && cp == AfterSending { close(recoverComplete) } @@ -1077,8 +1076,15 @@ func TestTransactionUnexpectedlyCommitted(t *testing.T) { // 12. txn1->n1: EndTxn(commit) -- Recovery has already completed, so this // request fails with "transaction unexpectedly committed". - // - if req.ba.IsSingleRecoverTxnRequest() && cp == AfterSending { + // + // If the intent on (b) were resolved and txn2 could grab the lock prior + // to txn1's retry of the Put(b), the retry will cause a PushTxn to txn2. + // Given that the recovery at (8) has already completed, a PushTxn + // request where the pusher is a committed transaction results in an + // "already committed" TransactionStatusError from the txnwait queue. + // While this still results in AmbiguousResultError from the DistSender, + // the reason will be distinct; as such we pause the intent resolution. + if riReq, ok := req.ba.GetArg(kvpb.ResolveIntent); ok && riReq.Header().Key.Equal(keyB) && cp == BeforeSending { req.pauseUntil(t, txn1Done, cp) t.Logf("%s - complete, resp={%s}", req.prefix, resp) } From 561563782859688efb058db071a216191c68d86d Mon Sep 17 00:00:00 2001 From: Lidor Carmel Date: Wed, 27 Sep 2023 13:28:58 -0500 Subject: [PATCH 4/5] roachtest: fix leaked goroutines in c2c roachtests Without this PR, c2c roachtests have almost 30 messages like these at the end: ``` 18:04:19 leaktest.go:161: Leaked goroutine: goroutine 1879 [select, 2 minutes]: database/sql.(*DB).connectionOpener(0xc003f2bc70, {0x10812e000, 0xc00059d220}) GOROOT/src/database/sql/sql.go:1218 +0x8d created by database/sql.OpenDB GOROOT/src/database/sql/sql.go:791 +0x18d ``` This PR cleans that up. Epic: none Release note: None --- pkg/cmd/roachtest/tests/cluster_to_cluster.go | 25 ++++++++++++---- pkg/cmd/roachtest/tests/multitenant_tpch.go | 3 +- pkg/cmd/roachtest/tests/multitenant_utils.go | 29 +++++++++++++++---- pkg/cmd/roachtest/tests/tpcc.go | 2 +- pkg/cmd/roachtest/tests/tpchvec.go | 2 +- 5 files changed, 47 insertions(+), 14 deletions(-) diff --git a/pkg/cmd/roachtest/tests/cluster_to_cluster.go b/pkg/cmd/roachtest/tests/cluster_to_cluster.go index 1d778c9d5c66..000e045be53d 100644 --- a/pkg/cmd/roachtest/tests/cluster_to_cluster.go +++ b/pkg/cmd/roachtest/tests/cluster_to_cluster.go @@ -484,7 +484,9 @@ func makeReplicationDriver(t test.Test, c cluster.Cluster, rs replicationSpec) * } } -func (rd *replicationDriver) setupC2C(ctx context.Context, t test.Test, c cluster.Cluster) { +func (rd *replicationDriver) setupC2C( + ctx context.Context, t test.Test, c cluster.Cluster, +) (cleanup func()) { if len(rd.rs.multiregion.srcLocalities) != 0 { nodeCount := rd.rs.srcNodes + rd.rs.dstNodes localityCount := len(rd.rs.multiregion.srcLocalities) + len(rd.rs.multiregion.destLocalities) @@ -585,6 +587,10 @@ func (rd *replicationDriver) setupC2C(ctx context.Context, t test.Test, c cluste require.NoError(rd.t, rd.c.StartGrafana(ctx, promLog, rd.setup.promCfg)) rd.t.L().Printf("Prom has started") } + return func() { + srcDB.Close() + destDB.Close() + } } func (rd *replicationDriver) crdbNodes() option.NodeListOption { @@ -776,7 +782,9 @@ func (rd *replicationDriver) onFingerprintMismatch( ) { rd.t.L().Printf("conducting table level fingerprints") srcTenantConn := rd.c.Conn(ctx, rd.t.L(), 1, option.TenantName(rd.setup.src.name)) + defer srcTenantConn.Close() dstTenantConn := rd.c.Conn(ctx, rd.t.L(), rd.rs.srcNodes+1, option.TenantName(rd.setup.dst.name)) + defer dstTenantConn.Close() fingerprintBisectErr := replicationutils.InvestigateFingerprints(ctx, srcTenantConn, dstTenantConn, startTime, endTime) @@ -957,7 +965,8 @@ func (rd *replicationDriver) main(ctx context.Context) { rd.metrics.cutoverEnd = newMetricSnapshot(metricSnapper, timeutil.Now()) rd.t.L().Printf("starting the destination tenant") - startInMemoryTenant(ctx, rd.t, rd.c, rd.setup.dst.name, rd.setup.dst.gatewayNodes) + conn := startInMemoryTenant(ctx, rd.t, rd.c, rd.setup.dst.name, rd.setup.dst.gatewayNodes) + conn.Close() rd.metrics.export(rd.t, len(rd.setup.src.nodes)) @@ -1030,7 +1039,8 @@ func runAcceptanceClusterReplication(ctx context.Context, t test.Test, c cluster suites: registry.Suites("nightly"), } rd := makeReplicationDriver(t, c, sp) - rd.setupC2C(ctx, t, c) + cleanup := rd.setupC2C(ctx, t, c) + defer cleanup() // Spin up a monitor to capture any node deaths. m := rd.newMonitor(ctx) @@ -1227,7 +1237,8 @@ func registerClusterToCluster(r registry.Registry) { c2cRegisterWrapper(r, sp, func(ctx context.Context, t test.Test, c cluster.Cluster) { rd := makeReplicationDriver(t, c, sp) - rd.setupC2C(ctx, t, c) + cleanup := rd.setupC2C(ctx, t, c) + defer cleanup() // Spin up a monitor to capture any node deaths. m := rd.newMonitor(ctx) m.Go(func(ctx context.Context) error { @@ -1499,7 +1510,8 @@ func registerClusterReplicationResilience(r registry.Registry) { rrd := makeReplShutdownDriver(t, c, rsp) rrd.t.L().Printf("Planning to shut down node during %s phase", rrd.phase) - rrd.setupC2C(ctx, t, c) + cleanup := rrd.setupC2C(ctx, t, c) + defer cleanup() shutdownSetupDone := make(chan struct{}) @@ -1609,7 +1621,8 @@ func registerClusterReplicationDisconnect(r registry.Registry) { } c2cRegisterWrapper(r, sp, func(ctx context.Context, t test.Test, c cluster.Cluster) { rd := makeReplicationDriver(t, c, sp) - rd.setupC2C(ctx, t, c) + cleanup := rd.setupC2C(ctx, t, c) + defer cleanup() shutdownSetupDone := make(chan struct{}) diff --git a/pkg/cmd/roachtest/tests/multitenant_tpch.go b/pkg/cmd/roachtest/tests/multitenant_tpch.go index 1cc23ffd26f9..606ce20284a2 100644 --- a/pkg/cmd/roachtest/tests/multitenant_tpch.go +++ b/pkg/cmd/roachtest/tests/multitenant_tpch.go @@ -93,7 +93,8 @@ func runMultiTenantTPCH( // Now we create a tenant and run all TPCH queries within it. if sharedProcess { - db := createInMemoryTenant(ctx, t, c, appTenantName, c.All(), true /* secure */) + db := createInMemoryTenantWithConn(ctx, t, c, appTenantName, c.All(), true /* secure */) + defer db.Close() url := fmt.Sprintf("{pgurl:1:%s}", appTenantName) runTPCH(db, url, 1 /* setupIdx */) } else { diff --git a/pkg/cmd/roachtest/tests/multitenant_utils.go b/pkg/cmd/roachtest/tests/multitenant_utils.go index 0d7dab42ab95..3859d3ecc765 100644 --- a/pkg/cmd/roachtest/tests/multitenant_utils.go +++ b/pkg/cmd/roachtest/tests/multitenant_utils.go @@ -327,9 +327,7 @@ func createTenantAdminRole(t test.Test, tenantName string, tenantSQL *sqlutils.S const appTenantName = "app" // createInMemoryTenant runs through the necessary steps to create an in-memory -// tenant without resource limits and full dbconsole viewing privileges. As a -// convenience, it also returns a connection to the tenant (on a random node in -// the cluster). +// tenant without resource limits and full dbconsole viewing privileges. func createInMemoryTenant( ctx context.Context, t test.Test, @@ -337,8 +335,26 @@ func createInMemoryTenant( tenantName string, nodes option.NodeListOption, secure bool, +) { + db := createInMemoryTenantWithConn(ctx, t, c, tenantName, nodes, secure) + db.Close() +} + +// createInMemoryTenantWithConn runs through the necessary steps to create an +// in-memory tenant without resource limits and full dbconsole viewing +// privileges. As a convenience, it also returns a connection to the tenant (on +// a random node in the cluster). +func createInMemoryTenantWithConn( + ctx context.Context, + t test.Test, + c cluster.Cluster, + tenantName string, + nodes option.NodeListOption, + secure bool, ) *gosql.DB { - sysSQL := sqlutils.MakeSQLRunner(c.Conn(ctx, t.L(), nodes.RandNode()[0])) + sysDB := c.Conn(ctx, t.L(), nodes.RandNode()[0]) + defer sysDB.Close() + sysSQL := sqlutils.MakeSQLRunner(sysDB) sysSQL.Exec(t, "CREATE TENANT $1", tenantName) tenantConn := startInMemoryTenant(ctx, t, c, tenantName, nodes) @@ -360,7 +376,9 @@ func startInMemoryTenant( tenantName string, nodes option.NodeListOption, ) *gosql.DB { - sysSQL := sqlutils.MakeSQLRunner(c.Conn(ctx, t.L(), nodes.RandNode()[0])) + sysDB := c.Conn(ctx, t.L(), nodes.RandNode()[0]) + defer sysDB.Close() + sysSQL := sqlutils.MakeSQLRunner(sysDB) sysSQL.Exec(t, "ALTER TENANT $1 START SERVICE SHARED", tenantName) sysSQL.Exec(t, `ALTER TENANT $1 GRANT CAPABILITY can_view_node_info=true, can_admin_split=true,can_view_tsdb_metrics=true`, tenantName) sysSQL.Exec(t, `ALTER TENANT $1 SET CLUSTER SETTING sql.split_at.allow_for_secondary_tenant.enabled=true`, tenantName) @@ -384,6 +402,7 @@ func startInMemoryTenant( return err } if err = tenantConn.Ping(); err != nil { + tenantConn.Close() return err } return nil diff --git a/pkg/cmd/roachtest/tests/tpcc.go b/pkg/cmd/roachtest/tests/tpcc.go index 00a1594cc3f1..ea2ca43e90a5 100644 --- a/pkg/cmd/roachtest/tests/tpcc.go +++ b/pkg/cmd/roachtest/tests/tpcc.go @@ -1431,7 +1431,7 @@ func runTPCCBench(ctx context.Context, t test.Test, c cluster.Cluster, b tpccBen var db *gosql.DB if b.SharedProcessMT { - db = createInMemoryTenant(ctx, t, c, appTenantName, roachNodes, false /* secure */) + db = createInMemoryTenantWithConn(ctx, t, c, appTenantName, roachNodes, false /* secure */) } else { db = c.Conn(ctx, t.L(), 1) } diff --git a/pkg/cmd/roachtest/tests/tpchvec.go b/pkg/cmd/roachtest/tests/tpchvec.go index 48805496d1f1..4c34837a26c2 100644 --- a/pkg/cmd/roachtest/tests/tpchvec.go +++ b/pkg/cmd/roachtest/tests/tpchvec.go @@ -601,7 +601,7 @@ func runTPCHVec( if _, err := singleTenantConn.Exec("SET CLUSTER SETTING kv.range_merge.queue_enabled = false;"); err != nil { t.Fatal(err) } - conn = createInMemoryTenant(ctx, t, c, appTenantName, c.All(), false /* secure */) + conn = createInMemoryTenantWithConn(ctx, t, c, appTenantName, c.All(), false /* secure */) } else { conn = c.Conn(ctx, t.L(), 1) disableMergeQueue = true From 3cbfa227fb66c730876216b484ed1146600a0606 Mon Sep 17 00:00:00 2001 From: Andrew Baptist Date: Thu, 24 Aug 2023 11:41:31 -0400 Subject: [PATCH 5/5] spanconfig: pass SpanConfig by pointer SpanConfig can be a large object and it is more efficient to pass by pointer now that it goes through multiple layers of code. Epic: none Release note: None --- .../allocator/allocatorimpl/allocator.go | 40 ++++----- .../allocator/allocatorimpl/allocator_test.go | 85 ++++++++++--------- pkg/kv/kvserver/allocator/plan/replicate.go | 30 +++---- pkg/kv/kvserver/allocator_impl_test.go | 8 +- pkg/kv/kvserver/asim/event/mutation_event.go | 2 +- pkg/kv/kvserver/asim/op/relocate_range.go | 2 +- .../kvserver/asim/queue/allocator_replica.go | 6 +- pkg/kv/kvserver/asim/queue/replicate_queue.go | 4 +- .../asim/queue/replicate_queue_test.go | 4 +- .../kvserver/asim/queue/split_queue_test.go | 2 +- pkg/kv/kvserver/asim/state/config_loader.go | 2 +- pkg/kv/kvserver/asim/state/impl.go | 14 +-- pkg/kv/kvserver/asim/state/state.go | 6 +- pkg/kv/kvserver/asim/state/state_test.go | 4 +- .../asim/storerebalancer/candidate_replica.go | 2 +- pkg/kv/kvserver/helpers_test.go | 2 +- pkg/kv/kvserver/replica.go | 14 ++- pkg/kv/kvserver/replica_command.go | 12 +-- pkg/kv/kvserver/replica_range_lease.go | 2 +- pkg/kv/kvserver/replica_rankings.go | 2 +- pkg/kv/kvserver/replica_test.go | 2 +- pkg/kv/kvserver/replicate_queue.go | 16 ++-- pkg/kv/kvserver/store.go | 8 +- pkg/kv/kvserver/store_rebalancer.go | 2 +- 24 files changed, 140 insertions(+), 131 deletions(-) diff --git a/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go b/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go index b3c8469c94f5..9ee8433dcefe 100644 --- a/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go +++ b/pkg/kv/kvserver/allocator/allocatorimpl/allocator.go @@ -863,7 +863,7 @@ func FilterReplicasForAction( func (a *Allocator) ComputeAction( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, desc *roachpb.RangeDescriptor, ) (action AllocatorAction, priority float64) { if storePool == nil { @@ -931,7 +931,7 @@ func (a *Allocator) ComputeAction( func (a *Allocator) computeAction( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, voterReplicas []roachpb.ReplicaDescriptor, nonVoterReplicas []roachpb.ReplicaDescriptor, ) (action AllocatorAction, adjustedPriority float64) { @@ -1197,7 +1197,7 @@ func (s *GoodCandidateSelector) selectOne(cl candidateList) *candidate { func (a *Allocator) AllocateTarget( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, replacing *roachpb.ReplicaDescriptor, replicaStatus ReplicaStatus, @@ -1277,7 +1277,7 @@ func (a *Allocator) AllocateTarget( func (a *Allocator) CheckAvoidsFragileQuorum( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existingVoters, remainingLiveNonVoters []roachpb.ReplicaDescriptor, replicaStatus ReplicaStatus, replicaType TargetReplicaType, @@ -1317,7 +1317,7 @@ func (a *Allocator) CheckAvoidsFragileQuorum( func (a *Allocator) AllocateVoter( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, replacing *roachpb.ReplicaDescriptor, replicaStatus ReplicaStatus, @@ -1331,7 +1331,7 @@ func (a *Allocator) AllocateVoter( func (a *Allocator) AllocateNonVoter( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, replacing *roachpb.ReplicaDescriptor, replicaStatus ReplicaStatus, @@ -1346,7 +1346,7 @@ func (a *Allocator) AllocateTargetFromList( ctx context.Context, storePool storepool.AllocatorStorePool, candidateStores storepool.StoreList, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, options ScorerOptions, selector CandidateSelector, @@ -1361,7 +1361,7 @@ func (a *Allocator) allocateTargetFromList( ctx context.Context, storePool storepool.AllocatorStorePool, candidateStores storepool.StoreList, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, replacing *roachpb.ReplicaDescriptor, options ScorerOptions, @@ -1464,7 +1464,7 @@ func (a Allocator) simulateRemoveTarget( ctx context.Context, storePool storepool.AllocatorStorePool, targetStore roachpb.StoreID, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, candidates []roachpb.ReplicaDescriptor, existingVoters []roachpb.ReplicaDescriptor, existingNonVoters []roachpb.ReplicaDescriptor, @@ -1521,7 +1521,7 @@ func (a Allocator) simulateRemoveTarget( func (a Allocator) RemoveTarget( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, candidateStoreList storepool.StoreList, existingVoters []roachpb.ReplicaDescriptor, existingNonVoters []roachpb.ReplicaDescriptor, @@ -1601,7 +1601,7 @@ func (a Allocator) RemoveTarget( func (a Allocator) RemoveVoter( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, voterCandidates []roachpb.ReplicaDescriptor, existingVoters []roachpb.ReplicaDescriptor, existingNonVoters []roachpb.ReplicaDescriptor, @@ -1634,7 +1634,7 @@ func (a Allocator) RemoveVoter( func (a Allocator) RemoveNonVoter( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, nonVoterCandidates []roachpb.ReplicaDescriptor, existingVoters []roachpb.ReplicaDescriptor, existingNonVoters []roachpb.ReplicaDescriptor, @@ -1664,7 +1664,7 @@ func (a Allocator) RemoveNonVoter( func (a Allocator) RebalanceTarget( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, raftStatus *raft.Status, existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, rangeUsageInfo allocator.RangeUsageInfo, @@ -1860,7 +1860,7 @@ func (a Allocator) RebalanceTarget( func (a Allocator) RebalanceVoter( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, raftStatus *raft.Status, existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, rangeUsageInfo allocator.RangeUsageInfo, @@ -1896,7 +1896,7 @@ func (a Allocator) RebalanceVoter( func (a Allocator) RebalanceNonVoter( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, raftStatus *raft.Status, existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, rangeUsageInfo allocator.RangeUsageInfo, @@ -1962,7 +1962,7 @@ func (a *Allocator) ValidLeaseTargets( ctx context.Context, storePool storepool.AllocatorStorePool, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existing []roachpb.ReplicaDescriptor, leaseRepl interface { StoreID() roachpb.StoreID @@ -2136,7 +2136,7 @@ func (a *Allocator) leaseholderShouldMoveDueToIOOverload( func (a *Allocator) leaseholderShouldMoveDueToPreferences( ctx context.Context, storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, leaseRepl interface { StoreID() roachpb.StoreID RaftStatus() *raft.Status @@ -2220,7 +2220,7 @@ func (a *Allocator) TransferLeaseTarget( ctx context.Context, storePool storepool.AllocatorStorePool, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existing []roachpb.ReplicaDescriptor, leaseRepl interface { StoreID() roachpb.StoreID @@ -2494,7 +2494,7 @@ func (a *Allocator) ShouldTransferLease( ctx context.Context, storePool storepool.AllocatorStorePool, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existing []roachpb.ReplicaDescriptor, leaseRepl interface { StoreID() roachpb.StoreID @@ -2816,7 +2816,7 @@ func (a Allocator) shouldTransferLeaseForLeaseCountConvergence( // replicas that meet lease preferences (among the `existing` replicas). func (a Allocator) PreferredLeaseholders( storePool storepool.AllocatorStorePool, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existing []roachpb.ReplicaDescriptor, ) []roachpb.ReplicaDescriptor { // Go one preference at a time. As soon as we've found replicas that match a diff --git a/pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go b/pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go index de35ebcc43a9..f4a041235037 100644 --- a/pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go +++ b/pkg/kv/kvserver/allocator/allocatorimpl/allocator_test.go @@ -51,7 +51,7 @@ import ( "go.etcd.io/raft/v3/tracker" ) -var simpleSpanConfig = roachpb.SpanConfig{ +var simpleSpanConfig = &roachpb.SpanConfig{ NumReplicas: 1, Constraints: []roachpb.ConstraintsConjunction{ { @@ -63,21 +63,21 @@ var simpleSpanConfig = roachpb.SpanConfig{ }, } -var multiDCConfigSSD = roachpb.SpanConfig{ +var multiDCConfigSSD = &roachpb.SpanConfig{ NumReplicas: 2, Constraints: []roachpb.ConstraintsConjunction{ {Constraints: []roachpb.Constraint{{Value: "ssd", Type: roachpb.Constraint_REQUIRED}}}, }, } -var multiDCConfigConstrainToA = roachpb.SpanConfig{ +var multiDCConfigConstrainToA = &roachpb.SpanConfig{ NumReplicas: 2, Constraints: []roachpb.ConstraintsConjunction{ {Constraints: []roachpb.Constraint{{Value: "a", Type: roachpb.Constraint_REQUIRED}}}, }, } -var multiDCConfigUnsatisfiableVoterConstraints = roachpb.SpanConfig{ +var multiDCConfigUnsatisfiableVoterConstraints = &roachpb.SpanConfig{ NumReplicas: 2, VoterConstraints: []roachpb.ConstraintsConjunction{ {Constraints: []roachpb.Constraint{{Value: "doesNotExist", Type: roachpb.Constraint_REQUIRED}}}, @@ -86,7 +86,7 @@ var multiDCConfigUnsatisfiableVoterConstraints = roachpb.SpanConfig{ // multiDCConfigVoterAndNonVoter prescribes that one voting replica be placed in // DC "b" and one non-voting replica be placed in DC "a". -var multiDCConfigVoterAndNonVoter = roachpb.SpanConfig{ +var multiDCConfigVoterAndNonVoter = &roachpb.SpanConfig{ NumReplicas: 2, Constraints: []roachpb.ConstraintsConjunction{ // Constrain the non-voter to "a". @@ -99,8 +99,8 @@ var multiDCConfigVoterAndNonVoter = roachpb.SpanConfig{ } // emptySpanConfig returns the empty span configuration. -func emptySpanConfig() roachpb.SpanConfig { - return roachpb.SpanConfig{} +func emptySpanConfig() *roachpb.SpanConfig { + return &roachpb.SpanConfig{} } func testingStartTime() time.Time { @@ -603,7 +603,7 @@ func TestAllocatorAllocateVoterIOOverloadCheck(t *testing.T) { type testCase struct { name string stores []*roachpb.StoreDescriptor - conf roachpb.SpanConfig + conf *roachpb.SpanConfig // The expected store to add when replicas are alive. The allocator should // pick one of the best stores, with low range count. expectedTargetIfAlive roachpb.StoreID @@ -792,7 +792,7 @@ func TestAllocatorExistingReplica(t *testing.T) { result, _, err := a.AllocateVoter( ctx, sp, - roachpb.SpanConfig{ + &roachpb.SpanConfig{ NumReplicas: 0, Constraints: []roachpb.ConstraintsConjunction{ { @@ -853,7 +853,7 @@ func TestAllocatorReplaceDecommissioningReplica(t *testing.T) { result, _, err := a.AllocateVoter( ctx, oSp, - roachpb.SpanConfig{ + &roachpb.SpanConfig{ NumReplicas: 3, Constraints: []roachpb.ConstraintsConjunction{ { @@ -913,7 +913,7 @@ func TestAllocatorReplaceFailsOnConstrainedDecommissioningReplica(t *testing.T) _, _, err := a.AllocateVoter( ctx, oSp, - roachpb.SpanConfig{ + &roachpb.SpanConfig{ NumReplicas: 3, Constraints: []roachpb.ConstraintsConjunction{ { @@ -2274,15 +2274,15 @@ func TestAllocatorTransferLeaseTargetConstraints(t *testing.T) { } } - constraints := func(value string) roachpb.SpanConfig { - return roachpb.SpanConfig{ + constraints := func(value string) *roachpb.SpanConfig { + return &roachpb.SpanConfig{ NumReplicas: 1, Constraints: constraint(value), } } - voterConstraints := func(value string) roachpb.SpanConfig { - return roachpb.SpanConfig{ + voterConstraints := func(value string) *roachpb.SpanConfig { + return &roachpb.SpanConfig{ NumReplicas: 1, VoterConstraints: constraint(value), } @@ -2292,7 +2292,7 @@ func TestAllocatorTransferLeaseTargetConstraints(t *testing.T) { existing []roachpb.ReplicaDescriptor leaseholder roachpb.StoreID expected roachpb.StoreID - conf roachpb.SpanConfig + conf *roachpb.SpanConfig }{ {existing: existing, leaseholder: 5, expected: 1, conf: constraints("1")}, {existing: existing, leaseholder: 5, expected: 1, conf: voterConstraints("1")}, @@ -2386,7 +2386,7 @@ func TestAllocatorTransferLeaseTargetDraining(t *testing.T) { leaseholder roachpb.StoreID excludeLeaseRepl bool expected roachpb.StoreID - conf roachpb.SpanConfig + conf *roachpb.SpanConfig }{ // No existing lease holder, nothing to do. {existing: existing, leaseholder: 0, excludeLeaseRepl: false, expected: 0, conf: emptySpanConfig()}, @@ -2405,13 +2405,13 @@ func TestAllocatorTransferLeaseTargetDraining(t *testing.T) { // Verify that lease preferences dont impact draining. // If the store that is within the lease preferences (store 1) is draining, // we'd like the lease to stay on the next best store (which is store 2). - {existing: existing, leaseholder: 2, excludeLeaseRepl: false, expected: 0, conf: roachpb.SpanConfig{LeasePreferences: preferDC1}}, + {existing: existing, leaseholder: 2, excludeLeaseRepl: false, expected: 0, conf: &roachpb.SpanConfig{LeasePreferences: preferDC1}}, // If the current lease on store 2 needs to be shed (indicated by // excludeLeaseRepl = false), and store 1 is draining, then store 3 // is the only reasonable lease transfer target. - {existing: existing, leaseholder: 2, excludeLeaseRepl: true, expected: 3, conf: roachpb.SpanConfig{LeasePreferences: preferDC1}}, - {existing: existing, leaseholder: 2, excludeLeaseRepl: false, expected: 3, conf: roachpb.SpanConfig{LeasePreferences: preferRegion1}}, - {existing: existing, leaseholder: 2, excludeLeaseRepl: true, expected: 3, conf: roachpb.SpanConfig{LeasePreferences: preferRegion1}}, + {existing: existing, leaseholder: 2, excludeLeaseRepl: true, expected: 3, conf: &roachpb.SpanConfig{LeasePreferences: preferDC1}}, + {existing: existing, leaseholder: 2, excludeLeaseRepl: false, expected: 3, conf: &roachpb.SpanConfig{LeasePreferences: preferRegion1}}, + {existing: existing, leaseholder: 2, excludeLeaseRepl: true, expected: 3, conf: &roachpb.SpanConfig{LeasePreferences: preferRegion1}}, } for _, c := range testCases { t.Run("", func(t *testing.T) { @@ -2958,7 +2958,7 @@ func TestAllocatorLeasePreferences(t *testing.T) { for _, c := range testCases { t.Run("", func(t *testing.T) { - conf := roachpb.SpanConfig{LeasePreferences: c.preferences} + conf := &roachpb.SpanConfig{LeasePreferences: c.preferences} result := a.ShouldTransferLease( ctx, sp, @@ -3088,7 +3088,7 @@ func TestAllocatorLeasePreferencesMultipleStoresPerLocality(t *testing.T) { for _, c := range testCases { t.Run("", func(t *testing.T) { - conf := roachpb.SpanConfig{LeasePreferences: c.preferences} + conf := &roachpb.SpanConfig{LeasePreferences: c.preferences} target := a.TransferLeaseTarget( ctx, sp, @@ -3251,7 +3251,7 @@ func TestAllocatorConstraintsAndVoterConstraints(t *testing.T) { name string existingVoters, existingNonVoters []roachpb.ReplicaDescriptor stores []*roachpb.StoreDescriptor - conf roachpb.SpanConfig + conf *roachpb.SpanConfig expectedVoters, expectedNonVoters []roachpb.StoreID shouldVoterAllocFail, shouldNonVoterAllocFail bool expError string @@ -3790,7 +3790,7 @@ func TestAllocatorNonVoterAllocationExcludesVoterNodes(t *testing.T) { name string existingVoters, existingNonVoters []roachpb.ReplicaDescriptor stores []*roachpb.StoreDescriptor - conf roachpb.SpanConfig + conf *roachpb.SpanConfig expected roachpb.StoreID shouldFail bool expError string @@ -4360,7 +4360,7 @@ func TestAllocatorRebalanceNonVoters(t *testing.T) { type testCase struct { name string stores []*roachpb.StoreDescriptor - conf roachpb.SpanConfig + conf *roachpb.SpanConfig existingVoters, existingNonVoters []roachpb.ReplicaDescriptor expectNoAction bool expectedRemoveTargets, expectedAddTargets []roachpb.StoreID @@ -4511,7 +4511,7 @@ func TestAllocatorRebalanceIOOverloadCheck(t *testing.T) { type testCase struct { name string stores []*roachpb.StoreDescriptor - conf roachpb.SpanConfig + conf *roachpb.SpanConfig existingVoters []roachpb.ReplicaDescriptor expectNoAction bool expectedRemoveTargets, expectedAddTargets []roachpb.StoreID @@ -4637,7 +4637,7 @@ func TestVotersCanRebalanceToNonVoterStores(t *testing.T) { sg := gossiputil.NewStoreGossiper(g) sg.GossipStores(multiDiversityDCStores, t) - conf := roachpb.SpanConfig{ + conf := &roachpb.SpanConfig{ NumReplicas: 4, NumVoters: 2, // We constrain 2 voting replicas to datacenter "a" (stores 1 and 2) but @@ -5503,7 +5503,7 @@ func TestRebalanceCandidatesNumReplicasConstraints(t *testing.T) { } } var rangeUsageInfo allocator.RangeUsageInfo - conf := roachpb.SpanConfig{ + conf := &roachpb.SpanConfig{ Constraints: tc.constraints, NumReplicas: tc.numReplicas, } @@ -6649,7 +6649,7 @@ func TestAllocatorComputeAction(t *testing.T) { lastPriority := float64(999999999) for i, tcase := range testCases { - action, priority := a.ComputeAction(ctx, sp, tcase.conf, &tcase.desc) + action, priority := a.ComputeAction(ctx, sp, &tcase.conf, &tcase.desc) if tcase.expectedAction != action { t.Errorf("Test case %d expected action %q, got action %q", i, allocatorActionNames[tcase.expectedAction], allocatorActionNames[action]) @@ -6746,7 +6746,7 @@ func TestAllocatorComputeActionRemoveDead(t *testing.T) { for i, tcase := range testCases { mockStorePool(sp, tcase.live, nil, tcase.dead, nil, nil, nil) - action, _ := a.ComputeAction(ctx, sp, conf, &tcase.desc) + action, _ := a.ComputeAction(ctx, sp, &conf, &tcase.desc) if tcase.expectedAction != action { t.Errorf("Test case %d expected action %d, got action %d", i, tcase.expectedAction, action) } @@ -6852,7 +6852,7 @@ func TestAllocatorComputeActionWithStorePoolRemoveDead(t *testing.T) { return sp.NodeLivenessFn(nid) }, getNumNodes) - action, _ := a.ComputeAction(ctx, oSp, conf, &tcase.desc) + action, _ := a.ComputeAction(ctx, oSp, &conf, &tcase.desc) if tcase.expectedAction != action { t.Errorf("Test case %d expected action %d, got action %d", i, tcase.expectedAction, action) } @@ -6926,7 +6926,7 @@ func TestAllocatorComputeActionSuspect(t *testing.T) { for i, tcase := range testCases { mockStorePool(sp, tcase.live, nil, nil, nil, nil, tcase.suspect) - action, _ := a.ComputeAction(ctx, sp, conf, &tcase.desc) + action, _ := a.ComputeAction(ctx, sp, &conf, &tcase.desc) if tcase.expectedAction != action { t.Errorf("Test case %d expected action %d, got action %d", i, tcase.expectedAction, action) } @@ -7205,7 +7205,7 @@ func TestAllocatorComputeActionDecommission(t *testing.T) { for i, tcase := range testCases { mockStorePool(sp, tcase.live, nil, tcase.dead, tcase.decommissioning, tcase.decommissioned, nil) - action, _ := a.ComputeAction(ctx, sp, tcase.conf, &tcase.desc) + action, _ := a.ComputeAction(ctx, sp, &tcase.conf, &tcase.desc) if tcase.expectedAction != action { t.Errorf("Test case %d expected action %s, got action %s", i, tcase.expectedAction, action) continue @@ -7503,7 +7503,7 @@ func TestAllocatorComputeActionWithStorePoolDecommission(t *testing.T) { oSp := storepool.NewOverrideStorePool(sp, storepool.OverrideNodeLivenessFunc(overrideLivenessMap, sp.NodeLivenessFn), getNumNodes, ) - action, _ := a.ComputeAction(ctx, oSp, tcase.conf, &tcase.desc) + action, _ := a.ComputeAction(ctx, oSp, &tcase.conf, &tcase.desc) if tcase.expectedAction != action { t.Errorf("Test case %d expected action %s, got action %s", i, tcase.expectedAction, action) } @@ -7539,7 +7539,7 @@ func TestAllocatorRemoveLearner(t *testing.T) { defer stopper.Stop(ctx) live, dead := []roachpb.StoreID{1, 2}, []roachpb.StoreID{3} mockStorePool(sp, live, nil, dead, nil, nil, nil) - action, _ := a.ComputeAction(ctx, sp, conf, &rangeWithLearnerDesc) + action, _ := a.ComputeAction(ctx, sp, &conf, &rangeWithLearnerDesc) require.Equal(t, AllocatorRemoveLearner, action) } @@ -7761,7 +7761,7 @@ func TestAllocatorComputeActionDynamicNumReplicas(t *testing.T) { effectiveNumReplicas := GetNeededVoters(conf.NumReplicas, clusterNodes) require.Equal(t, c.expectedNumReplicas, effectiveNumReplicas, "clusterNodes=%d", clusterNodes) - action, _ := a.ComputeAction(ctx, sp, conf, &desc) + action, _ := a.ComputeAction(ctx, sp, &conf, &desc) require.Equal(t, c.expectedAction.String(), action.String()) }) } @@ -7845,7 +7845,7 @@ func TestAllocatorComputeActionNoStorePool(t *testing.T) { defer log.Scope(t).Close(t) a := MakeAllocator(nil, false, nil, nil) - action, priority := a.ComputeAction(context.Background(), nil, roachpb.SpanConfig{}, nil) + action, priority := a.ComputeAction(context.Background(), nil, &roachpb.SpanConfig{}, nil) if action != AllocatorNoop { t.Errorf("expected AllocatorNoop, but got %v", action) } @@ -8175,10 +8175,11 @@ func TestAllocatorRebalanceDeterminism(t *testing.T) { var rangeUsageInfo allocator.RangeUsageInfo // Ensure that we wouldn't normally rebalance when all stores have the same // replica count. + sc := roachpb.TestingDefaultSpanConfig() add, remove, _, _ := a.RebalanceVoter( ctx, sp, - roachpb.TestingDefaultSpanConfig(), + &sc, nil, replicas(1, 2, 5), nil, @@ -8378,7 +8379,7 @@ func TestAllocatorRebalanceAway(t *testing.T) { actual, _, _, ok := a.RebalanceVoter( ctx, sp, - roachpb.SpanConfig{Constraints: []roachpb.ConstraintsConjunction{constraints}}, + &roachpb.SpanConfig{Constraints: []roachpb.ConstraintsConjunction{constraints}}, nil, existingReplicas, nil, @@ -9066,7 +9067,7 @@ func TestNonVoterPrioritizationInVoterAdditions(t *testing.T) { testCases := []struct { existingVoters []roachpb.ReplicaDescriptor existingNonVoters []roachpb.ReplicaDescriptor - spanConfig roachpb.SpanConfig + spanConfig *roachpb.SpanConfig expectedTargetAllocate roachpb.ReplicationTarget }{ // NB: Store 5 has a non-voter and range count 516 and store 4 can add a @@ -9113,7 +9114,7 @@ func TestNonVoterPrioritizationInVoterAdditions(t *testing.T) { existingNonVoters: []roachpb.ReplicaDescriptor{ {NodeID: 5, StoreID: 5}, }, - spanConfig: roachpb.SpanConfig{ + spanConfig: &roachpb.SpanConfig{ VoterConstraints: []roachpb.ConstraintsConjunction{ { Constraints: []roachpb.Constraint{ diff --git a/pkg/kv/kvserver/allocator/plan/replicate.go b/pkg/kv/kvserver/allocator/plan/replicate.go index 4d5606f5020f..d6bff1f725b5 100644 --- a/pkg/kv/kvserver/allocator/plan/replicate.go +++ b/pkg/kv/kvserver/allocator/plan/replicate.go @@ -61,7 +61,7 @@ type ReplicationPlanner interface { now hlc.ClockTimestamp, repl AllocatorReplica, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, canTransferLeaseFrom CanTransferLeaseFrom, ) (bool, float64) // PlanOneChange calls the allocator to determine an action to be taken upon a @@ -71,7 +71,7 @@ type ReplicationPlanner interface { ctx context.Context, repl AllocatorReplica, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, canTransferLeaseFrom CanTransferLeaseFrom, scatter bool, ) (ReplicateChange, error) @@ -82,7 +82,7 @@ type ReplicationPlanner interface { type CanTransferLeaseFrom func( ctx context.Context, repl LeaseCheckReplica, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, ) bool // LeaseCheckReplica contains methods that may be used to check a replica's @@ -90,7 +90,7 @@ type CanTransferLeaseFrom func( type LeaseCheckReplica interface { HasCorrectLeaseType(lease roachpb.Lease) bool LeaseStatusAt(ctx context.Context, now hlc.ClockTimestamp) kvserverpb.LeaseStatus - LeaseViolatesPreferences(context.Context, roachpb.SpanConfig) bool + LeaseViolatesPreferences(context.Context, *roachpb.SpanConfig) bool OwnsValidLease(context.Context, hlc.ClockTimestamp) bool } @@ -145,7 +145,7 @@ func (rp ReplicaPlanner) ShouldPlanChange( now hlc.ClockTimestamp, repl AllocatorReplica, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, canTransferLeaseFrom CanTransferLeaseFrom, ) (shouldPlanChange bool, priority float64) { @@ -241,7 +241,7 @@ func (rp ReplicaPlanner) PlanOneChange( ctx context.Context, repl AllocatorReplica, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, canTransferLeaseFrom CanTransferLeaseFrom, scatter bool, ) (change ReplicateChange, _ error) { @@ -387,7 +387,7 @@ func (rp ReplicaPlanner) addOrReplaceVoters( ctx context.Context, repl AllocatorReplica, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existingVoters []roachpb.ReplicaDescriptor, remainingLiveVoters, remainingLiveNonVoters []roachpb.ReplicaDescriptor, removeIdx int, @@ -484,7 +484,7 @@ func (rp ReplicaPlanner) addOrReplaceNonVoters( ctx context.Context, repl AllocatorReplica, _ *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existingNonVoters []roachpb.ReplicaDescriptor, liveVoterReplicas, liveNonVoterReplicas []roachpb.ReplicaDescriptor, removeIdx int, @@ -544,7 +544,7 @@ func (rp ReplicaPlanner) findRemoveVoter( LastReplicaAdded() (roachpb.ReplicaID, time.Time) RaftStatus() *raft.Status }, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, ) (roachpb.ReplicationTarget, string, error) { // This retry loop involves quick operations on local state, so a @@ -626,7 +626,7 @@ func (rp ReplicaPlanner) removeVoter( ctx context.Context, repl AllocatorReplica, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, ) (op AllocationOp, stats ReplicateStats, _ error) { removeVoter, details, err := rp.findRemoveVoter(ctx, repl, conf, existingVoters, existingNonVoters) @@ -669,7 +669,7 @@ func (rp ReplicaPlanner) removeNonVoter( ctx context.Context, repl AllocatorReplica, _ *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, ) (op AllocationOp, stats ReplicateStats, _ error) { removeNonVoter, details, err := rp.allocator.RemoveNonVoter( @@ -709,7 +709,7 @@ func (rp ReplicaPlanner) removeDecommissioning( ctx context.Context, repl AllocatorReplica, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, targetType allocatorimpl.TargetReplicaType, ) (op AllocationOp, stats ReplicateStats, _ error) { var decommissioningReplicas []roachpb.ReplicaDescriptor @@ -805,7 +805,7 @@ func (rp ReplicaPlanner) considerRebalance( ctx context.Context, repl AllocatorReplica, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, existingVoters, existingNonVoters []roachpb.ReplicaDescriptor, allocatorPrio float64, canTransferLeaseFrom CanTransferLeaseFrom, @@ -944,7 +944,7 @@ func (rp ReplicaPlanner) shedLeaseTarget( ctx context.Context, repl AllocatorReplica, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, opts allocator.TransferLeaseOptions, ) (op AllocationOp, _ error) { usage := repl.RangeUsageInfo() @@ -1002,7 +1002,7 @@ func (rp ReplicaPlanner) maybeTransferLeaseAwayTarget( ctx context.Context, repl AllocatorReplica, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, removeStoreID roachpb.StoreID, canTransferLeaseFrom CanTransferLeaseFrom, ) (op AllocationOp, _ error) { diff --git a/pkg/kv/kvserver/allocator_impl_test.go b/pkg/kv/kvserver/allocator_impl_test.go index 15a1c318bb9e..6077918d2b02 100644 --- a/pkg/kv/kvserver/allocator_impl_test.go +++ b/pkg/kv/kvserver/allocator_impl_test.go @@ -32,7 +32,7 @@ import ( const firstRangeID = roachpb.RangeID(1) -var simpleSpanConfig = roachpb.SpanConfig{ +var simpleSpanConfig = &roachpb.SpanConfig{ NumReplicas: 1, Constraints: []roachpb.ConstraintsConjunction{ { @@ -323,7 +323,7 @@ func TestAllocatorRebalanceTarget(t *testing.T) { result, _, details, ok := a.RebalanceVoter( ctx, sp, - roachpb.SpanConfig{}, + &roachpb.SpanConfig{}, status, replicas, nil, @@ -349,7 +349,7 @@ func TestAllocatorRebalanceTarget(t *testing.T) { target, _, details, ok := a.RebalanceVoter( ctx, sp, - roachpb.SpanConfig{}, + &roachpb.SpanConfig{}, status, replicas, nil, @@ -369,7 +369,7 @@ func TestAllocatorRebalanceTarget(t *testing.T) { target, origin, details, ok := a.RebalanceVoter( ctx, sp, - roachpb.SpanConfig{}, + &roachpb.SpanConfig{}, status, replicas, nil, diff --git a/pkg/kv/kvserver/asim/event/mutation_event.go b/pkg/kv/kvserver/asim/event/mutation_event.go index 5eefa7631653..ed385a36ec17 100644 --- a/pkg/kv/kvserver/asim/event/mutation_event.go +++ b/pkg/kv/kvserver/asim/event/mutation_event.go @@ -56,7 +56,7 @@ var _ Event = &SetCapacityOverrideEvent{} func (se SetSpanConfigEvent) Func() EventFunc { return MutationFunc(func(ctx context.Context, s state.State) { - s.SetSpanConfig(se.Span, se.Config) + s.SetSpanConfig(se.Span, &se.Config) }) } diff --git a/pkg/kv/kvserver/asim/op/relocate_range.go b/pkg/kv/kvserver/asim/op/relocate_range.go index 37b22cbfaba1..857d0ae6ee57 100644 --- a/pkg/kv/kvserver/asim/op/relocate_range.go +++ b/pkg/kv/kvserver/asim/op/relocate_range.go @@ -72,7 +72,7 @@ func (s *SimRelocateOneOptions) StorePool() storepool.AllocatorStorePool { // SpanConfig returns the span configuration for the range with start key. func (s *SimRelocateOneOptions) SpanConfig( ctx context.Context, startKey roachpb.RKey, -) (roachpb.SpanConfig, error) { +) (*roachpb.SpanConfig, error) { return s.state.RangeFor(state.ToKey(startKey.AsRawKey())).SpanConfig(), nil } diff --git a/pkg/kv/kvserver/asim/queue/allocator_replica.go b/pkg/kv/kvserver/asim/queue/allocator_replica.go index 8a0926f23c00..9185465ff35b 100644 --- a/pkg/kv/kvserver/asim/queue/allocator_replica.go +++ b/pkg/kv/kvserver/asim/queue/allocator_replica.go @@ -59,7 +59,7 @@ func (sr *SimulatorReplica) HasCorrectLeaseType(lease roachpb.Lease) bool { return true } -// CurrentLeaseStatus returns the status of the current lease for the +// LeaseStatusAt returns the status of the current lease for the // timestamp given. // // Common operations to perform on the resulting status are to check if @@ -85,7 +85,7 @@ func (sr *SimulatorReplica) LeaseStatusAt( // error or no preferences defined then it will return false and consider that // to be in-conformance. func (sr *SimulatorReplica) LeaseViolatesPreferences( - _ context.Context, conf roachpb.SpanConfig, + _ context.Context, conf *roachpb.SpanConfig, ) bool { descs := sr.state.StoreDescriptors(true /* useCached */, sr.repl.StoreID()) if len(descs) != 1 { @@ -147,7 +147,7 @@ func (sr *SimulatorReplica) GetFirstIndex() kvpb.RaftIndex { return 2 } -func (sr *SimulatorReplica) SpanConfig() (roachpb.SpanConfig, error) { +func (sr *SimulatorReplica) SpanConfig() (*roachpb.SpanConfig, error) { return sr.rng.SpanConfig(), nil } diff --git a/pkg/kv/kvserver/asim/queue/replicate_queue.go b/pkg/kv/kvserver/asim/queue/replicate_queue.go index 5fecaec3a8d7..8971599bd5ce 100644 --- a/pkg/kv/kvserver/asim/queue/replicate_queue.go +++ b/pkg/kv/kvserver/asim/queue/replicate_queue.go @@ -60,7 +60,7 @@ func NewReplicateQueue( } func simCanTransferleaseFrom( - ctx context.Context, repl plan.LeaseCheckReplica, conf roachpb.SpanConfig, + ctx context.Context, repl plan.LeaseCheckReplica, conf *roachpb.SpanConfig, ) bool { return true } @@ -78,7 +78,7 @@ func (rq *replicateQueue) MaybeAdd(ctx context.Context, replica state.Replica, s if err != nil { log.Fatalf(ctx, "conf not found err=%v", err) } - log.VEventf(ctx, 1, "maybe add replica=%s, config=%s", desc, &conf) + log.VEventf(ctx, 1, "maybe add replica=%s, config=%s", desc, conf) shouldPlanChange, priority := rq.planner.ShouldPlanChange( ctx, diff --git a/pkg/kv/kvserver/asim/queue/replicate_queue_test.go b/pkg/kv/kvserver/asim/queue/replicate_queue_test.go index a4fb8c1ce1be..4d2b935b6bf6 100644 --- a/pkg/kv/kvserver/asim/queue/replicate_queue_test.go +++ b/pkg/kv/kvserver/asim/queue/replicate_queue_test.go @@ -110,7 +110,7 @@ func TestReplicateQueue(t *testing.T) { } } - testingState := func(replicaCounts map[state.StoreID]int, spanConfig roachpb.SpanConfig, initialRF int) state.State { + testingState := func(replicaCounts map[state.StoreID]int, spanConfig *roachpb.SpanConfig, initialRF int) state.State { s := state.NewStateWithReplCounts(replicaCounts, initialRF, 1000 /* keyspace */, testSettings) for _, r := range s.Ranges() { s.SetSpanConfigForRange(r.RangeID(), spanConfig) @@ -362,7 +362,7 @@ func TestReplicateQueue(t *testing.T) { initialRF = 2 } - s := testingState(tc.replicaCounts, spanConfig, initialRF) + s := testingState(tc.replicaCounts, &spanConfig, initialRF) changer := state.NewReplicaChanger() store, _ := s.Store(testingStore) rq := NewReplicateQueue( diff --git a/pkg/kv/kvserver/asim/queue/split_queue_test.go b/pkg/kv/kvserver/asim/queue/split_queue_test.go index 91673b6408a1..4fc9dddc6de5 100644 --- a/pkg/kv/kvserver/asim/queue/split_queue_test.go +++ b/pkg/kv/kvserver/asim/queue/split_queue_test.go @@ -48,7 +48,7 @@ func TestSplitQueue(t *testing.T) { NumReplicas: replicationFactor, } for _, r := range s.Ranges() { - s.SetSpanConfigForRange(r.RangeID(), spanConfig) + s.SetSpanConfigForRange(r.RangeID(), &spanConfig) } s.TransferLease(state.RangeID(2 /* The interesting range */), leaseholder) return s diff --git a/pkg/kv/kvserver/asim/state/config_loader.go b/pkg/kv/kvserver/asim/state/config_loader.go index 8c4f24a46283..264af4c73df9 100644 --- a/pkg/kv/kvserver/asim/state/config_loader.go +++ b/pkg/kv/kvserver/asim/state/config_loader.go @@ -396,7 +396,7 @@ func LoadRangeInfo(s State, rangeInfos ...RangeInfo) { )) } - if !s.SetSpanConfigForRange(rng.RangeID(), *r.Config) { + if !s.SetSpanConfigForRange(rng.RangeID(), r.Config) { panic(fmt.Sprintf( "Unable to load config: cannot set span config for range %s", rng, diff --git a/pkg/kv/kvserver/asim/state/impl.go b/pkg/kv/kvserver/asim/state/impl.go index 6fc92ddfeb87..35f67860e2b7 100644 --- a/pkg/kv/kvserver/asim/state/impl.go +++ b/pkg/kv/kvserver/asim/state/impl.go @@ -127,7 +127,7 @@ func (rm *rmap) initFirstRange() { startKey: MinKey, endKey: MaxKey, desc: desc, - config: defaultSpanConfig, + config: &defaultSpanConfig, replicas: make(map[StoreID]*replica), leaseholder: -1, } @@ -658,7 +658,7 @@ func (s *state) removeReplica(rangeID RangeID, storeID StoreID) bool { } // SetSpanConfigForRange set the span config for the Range with ID RangeID. -func (s *state) SetSpanConfigForRange(rangeID RangeID, spanConfig roachpb.SpanConfig) bool { +func (s *state) SetSpanConfigForRange(rangeID RangeID, spanConfig *roachpb.SpanConfig) bool { if rng, ok := s.ranges.rangeMap[rangeID]; ok { rng.config = spanConfig return true @@ -668,7 +668,7 @@ func (s *state) SetSpanConfigForRange(rangeID RangeID, spanConfig roachpb.SpanCo // SetSpanConfig sets the span config for all ranges represented by the span, // splitting if necessary. -func (s *state) SetSpanConfig(span roachpb.Span, config roachpb.SpanConfig) { +func (s *state) SetSpanConfig(span roachpb.Span, config *roachpb.SpanConfig) { startKey := ToKey(span.Key) endKey := ToKey(span.EndKey) @@ -787,7 +787,7 @@ func (s *state) SplitRange(splitKey Key) (Range, Range, bool) { rangeID: rangeID, startKey: splitKey, desc: roachpb.RangeDescriptor{RangeID: roachpb.RangeID(rangeID), NextReplicaID: 1}, - config: defaultSpanConfig, + config: &defaultSpanConfig, replicas: make(map[StoreID]*replica), leaseholder: -1, } @@ -1242,7 +1242,7 @@ func (s *state) GetSpanConfigForKey( if rng == nil { panic(fmt.Sprintf("programming error: range for key %s doesn't exist", key)) } - return rng.config, nil + return *rng.config, nil } // Scan is added for the rangedesc.Scanner interface, required for @@ -1404,7 +1404,7 @@ type rng struct { rangeID RangeID startKey, endKey Key desc roachpb.RangeDescriptor - config roachpb.SpanConfig + config *roachpb.SpanConfig replicas map[StoreID]*replica leaseholder ReplicaID size int64 @@ -1450,7 +1450,7 @@ func (r *rng) String() string { } // SpanConfig returns the span config for this range. -func (r *rng) SpanConfig() roachpb.SpanConfig { +func (r *rng) SpanConfig() *roachpb.SpanConfig { return r.config } diff --git a/pkg/kv/kvserver/asim/state/state.go b/pkg/kv/kvserver/asim/state/state.go index 05acaa6cf5e6..9251da3c1f71 100644 --- a/pkg/kv/kvserver/asim/state/state.go +++ b/pkg/kv/kvserver/asim/state/state.go @@ -135,10 +135,10 @@ type State interface { // if it exists, otherwise it returns false. RangeSpan(RangeID) (Key, Key, bool) // SetSpanConfigForRange set the span config for the Range with ID RangeID. - SetSpanConfigForRange(RangeID, roachpb.SpanConfig) bool + SetSpanConfigForRange(RangeID, *roachpb.SpanConfig) bool // SetSpanConfig sets the span config for all ranges represented by the span, // splitting if necessary. - SetSpanConfig(roachpb.Span, roachpb.SpanConfig) + SetSpanConfig(roachpb.Span, *roachpb.SpanConfig) // SetRangeBytes sets the size of the range with ID RangeID to be equal to // the bytes given. SetRangeBytes(RangeID, int64) @@ -249,7 +249,7 @@ type Range interface { // String returns a string representing the state of the range. String() string // SpanConfig returns the span config for this range. - SpanConfig() roachpb.SpanConfig + SpanConfig() *roachpb.SpanConfig // Replicas returns all replicas which exist for this range. Replicas() []Replica // Replica returns the replica that is on the store with ID StoreID if it diff --git a/pkg/kv/kvserver/asim/state/state_test.go b/pkg/kv/kvserver/asim/state/state_test.go index b83012f8d4c9..ce9e677730e3 100644 --- a/pkg/kv/kvserver/asim/state/state_test.go +++ b/pkg/kv/kvserver/asim/state/state_test.go @@ -95,7 +95,7 @@ func TestRangeMap(t *testing.T) { require.Equal(t, firstRange.startKey, MinKey) require.Equal(t, firstRange.desc.StartKey, MinKey.ToRKey()) require.Equal(t, firstRange.desc.EndKey, MaxKey.ToRKey()) - require.Equal(t, defaultSpanConfig, firstRange.SpanConfig()) + require.Equal(t, defaultSpanConfig, *firstRange.SpanConfig()) k2 := Key(1) k3 := Key(2) @@ -611,7 +611,7 @@ func TestSetSpanConfig(t *testing.T) { Key: tc.start.ToRKey().AsRawKey(), EndKey: tc.end.ToRKey().AsRawKey(), } - s.SetSpanConfig(span, config) + s.SetSpanConfig(span, &config) for _, rng := range s.Ranges() { start, _, ok := s.RangeSpan(rng.RangeID()) require.True(t, ok) diff --git a/pkg/kv/kvserver/asim/storerebalancer/candidate_replica.go b/pkg/kv/kvserver/asim/storerebalancer/candidate_replica.go index 4853d942b984..5a7e5ccecd41 100644 --- a/pkg/kv/kvserver/asim/storerebalancer/candidate_replica.go +++ b/pkg/kv/kvserver/asim/storerebalancer/candidate_replica.go @@ -79,7 +79,7 @@ func (sr *simulatorReplica) GetFirstIndex() kvpb.RaftIndex { // DescAndSpanConfig returns the authoritative range descriptor as well // as the span config for the replica. -func (sr *simulatorReplica) DescAndSpanConfig() (*roachpb.RangeDescriptor, roachpb.SpanConfig) { +func (sr *simulatorReplica) DescAndSpanConfig() (*roachpb.RangeDescriptor, *roachpb.SpanConfig) { return sr.rng.Descriptor(), sr.rng.SpanConfig() } diff --git a/pkg/kv/kvserver/helpers_test.go b/pkg/kv/kvserver/helpers_test.go index 39d7c870e0ea..de3e6f639a0e 100644 --- a/pkg/kv/kvserver/helpers_test.go +++ b/pkg/kv/kvserver/helpers_test.go @@ -56,7 +56,7 @@ func (s *Store) Transport() *RaftTransport { } func (s *Store) FindTargetAndTransferLease( - ctx context.Context, repl *Replica, desc *roachpb.RangeDescriptor, conf roachpb.SpanConfig, + ctx context.Context, repl *Replica, desc *roachpb.RangeDescriptor, conf *roachpb.SpanConfig, ) (bool, error) { transferStatus, err := s.replicateQueue.shedLease( ctx, repl, desc, conf, allocator.TransferLeaseOptions{ExcludeLeaseRepl: true}, diff --git a/pkg/kv/kvserver/replica.go b/pkg/kv/kvserver/replica.go index d461812d16db..22d96e1d614a 100644 --- a/pkg/kv/kvserver/replica.go +++ b/pkg/kv/kvserver/replica.go @@ -1061,17 +1061,23 @@ func (r *Replica) IsQuiescent() bool { // DescAndSpanConfig returns the authoritative range descriptor as well // as the span config for the replica. -func (r *Replica) DescAndSpanConfig() (*roachpb.RangeDescriptor, roachpb.SpanConfig) { +func (r *Replica) DescAndSpanConfig() (*roachpb.RangeDescriptor, *roachpb.SpanConfig) { r.mu.RLock() defer r.mu.RUnlock() - return r.mu.state.Desc, r.mu.conf + // This method is being removed shortly. We can't pass out a pointer to the + // underlying replica's SpanConfig. + conf := r.mu.conf + return r.mu.state.Desc, &conf } // SpanConfig returns the authoritative span config for the replica. -func (r *Replica) SpanConfig() roachpb.SpanConfig { +func (r *Replica) SpanConfig() *roachpb.SpanConfig { r.mu.RLock() defer r.mu.RUnlock() - return r.mu.conf + // This method is being removed shortly. We can't pass out a pointer to the + // underlying replica's SpanConfig. + conf := r.mu.conf + return &conf } // Desc returns the authoritative range descriptor, acquiring a replica lock in diff --git a/pkg/kv/kvserver/replica_command.go b/pkg/kv/kvserver/replica_command.go index 8c0aaacd2011..fedb98cbfb58 100644 --- a/pkg/kv/kvserver/replica_command.go +++ b/pkg/kv/kvserver/replica_command.go @@ -3561,7 +3561,7 @@ type RelocateOneOptions interface { // StorePool returns the store's configured store pool. StorePool() storepool.AllocatorStorePool // SpanConfig returns the span configuration for the range with start key. - SpanConfig(ctx context.Context, startKey roachpb.RKey) (roachpb.SpanConfig, error) + SpanConfig(ctx context.Context, startKey roachpb.RKey) (*roachpb.SpanConfig, error) // LeaseHolder returns the descriptor of the replica which holds the lease // on the range with start key. Leaseholder(ctx context.Context, startKey roachpb.RKey) (roachpb.ReplicaDescriptor, error) @@ -3584,16 +3584,16 @@ func (roo *replicaRelocateOneOptions) StorePool() storepool.AllocatorStorePool { // SpanConfig returns the span configuration for the range with start key. func (roo *replicaRelocateOneOptions) SpanConfig( ctx context.Context, startKey roachpb.RKey, -) (roachpb.SpanConfig, error) { +) (*roachpb.SpanConfig, error) { confReader, err := roo.store.GetConfReader(ctx) if err != nil { - return roachpb.SpanConfig{}, errors.Wrap(err, "can't relocate range") + return nil, errors.Wrap(err, "can't relocate range") } conf, err := confReader.GetSpanConfigForKey(ctx, startKey) if err != nil { - return roachpb.SpanConfig{}, err + return nil, err } - return conf, nil + return &conf, nil } // Leaseholder returns the descriptor of the replica which holds the lease on @@ -3996,7 +3996,7 @@ func (r *Replica) adminScatter( var allowLeaseTransfer bool var err error requeue := true - canTransferLease := func(ctx context.Context, repl plan.LeaseCheckReplica, conf roachpb.SpanConfig) bool { + canTransferLease := func(ctx context.Context, repl plan.LeaseCheckReplica, conf *roachpb.SpanConfig) bool { return allowLeaseTransfer } for re := retry.StartWithCtx(ctx, retryOpts); re.Next(); { diff --git a/pkg/kv/kvserver/replica_range_lease.go b/pkg/kv/kvserver/replica_range_lease.go index 4fe38b618491..f5a36ff6ff50 100644 --- a/pkg/kv/kvserver/replica_range_lease.go +++ b/pkg/kv/kvserver/replica_range_lease.go @@ -1571,7 +1571,7 @@ const ( // LeaseViolatesPreferences checks if this replica owns the lease and if it // violates the lease preferences defined in the span config. If no preferences // are defined then it will return false and consider it to be in conformance. -func (r *Replica) LeaseViolatesPreferences(ctx context.Context, conf roachpb.SpanConfig) bool { +func (r *Replica) LeaseViolatesPreferences(ctx context.Context, conf *roachpb.SpanConfig) bool { storeID := r.store.StoreID() preferences := conf.LeasePreferences leaseStatus := r.CurrentLeaseStatus(ctx) diff --git a/pkg/kv/kvserver/replica_rankings.go b/pkg/kv/kvserver/replica_rankings.go index b7f13dacd444..043a38bc4bd4 100644 --- a/pkg/kv/kvserver/replica_rankings.go +++ b/pkg/kv/kvserver/replica_rankings.go @@ -47,7 +47,7 @@ type CandidateReplica interface { GetFirstIndex() kvpb.RaftIndex // DescAndSpanConfig returns the authoritative range descriptor as well // as the span config for the replica. - DescAndSpanConfig() (*roachpb.RangeDescriptor, roachpb.SpanConfig) + DescAndSpanConfig() (*roachpb.RangeDescriptor, *roachpb.SpanConfig) // Desc returns the authoritative range descriptor. Desc() *roachpb.RangeDescriptor // RangeUsageInfo returns usage information (sizes and traffic) needed by diff --git a/pkg/kv/kvserver/replica_test.go b/pkg/kv/kvserver/replica_test.go index 4c202827b389..e678f9829573 100644 --- a/pkg/kv/kvserver/replica_test.go +++ b/pkg/kv/kvserver/replica_test.go @@ -13465,7 +13465,7 @@ func TestReplicateQueueProcessOne(t *testing.T) { tc.repl, tc.repl.Desc(), tc.repl.SpanConfig(), - func(ctx context.Context, repl plan.LeaseCheckReplica, conf roachpb.SpanConfig) bool { return false }, + func(ctx context.Context, repl plan.LeaseCheckReplica, conf *roachpb.SpanConfig) bool { return false }, false, /* scatter */ true, /* dryRun */ ) diff --git a/pkg/kv/kvserver/replicate_queue.go b/pkg/kv/kvserver/replicate_queue.go index e3a85ad6e20d..80df32dcd32f 100644 --- a/pkg/kv/kvserver/replicate_queue.go +++ b/pkg/kv/kvserver/replicate_queue.go @@ -640,7 +640,7 @@ func (rq *replicateQueue) shouldQueue( now, repl, desc, - conf, + &conf, rq.canTransferLeaseFrom, ) } @@ -665,7 +665,7 @@ func (rq *replicateQueue) process( // usually signaling that a rebalancing reservation could not be made with the // selected target. for r := retry.StartWithCtx(ctx, retryOpts); r.Next(); { - requeue, err := rq.processOneChangeWithTracing(ctx, repl, desc, conf) + requeue, err := rq.processOneChangeWithTracing(ctx, repl, desc, &conf) if isSnapshotError(err) { // If ChangeReplicas failed because the snapshot failed, we attempt to // retry the operation. The most likely causes of the snapshot failing @@ -749,7 +749,7 @@ func filterTracingSpans(rec tracingpb.Recording, opNamesToFilter ...string) trac // logging the resulting traces to the DEV channel in the case of errors or // when the configured log traces threshold is exceeded. func (rq *replicateQueue) processOneChangeWithTracing( - ctx context.Context, repl *Replica, desc *roachpb.RangeDescriptor, conf roachpb.SpanConfig, + ctx context.Context, repl *Replica, desc *roachpb.RangeDescriptor, conf *roachpb.SpanConfig, ) (requeue bool, _ error) { processStart := timeutil.Now() ctx, sp := tracing.EnsureChildSpan(ctx, rq.Tracer, "process replica", @@ -832,7 +832,9 @@ func (rq *replicateQueue) applyChange( // ShouldRequeue determines whether a replica should be requeued into the // replicate queue, using the planned change and error returned from either // application or planning. -func ShouldRequeue(ctx context.Context, change plan.ReplicateChange, conf roachpb.SpanConfig) bool { +func ShouldRequeue( + ctx context.Context, change plan.ReplicateChange, conf *roachpb.SpanConfig, +) bool { var requeue bool if _, ok := change.Op.(plan.AllocationNoop); ok { @@ -868,7 +870,7 @@ func (rq *replicateQueue) processOneChange( ctx context.Context, repl *Replica, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, canTransferLeaseFrom plan.CanTransferLeaseFrom, scatter, dryRun bool, ) (requeue bool, _ error) { @@ -990,7 +992,7 @@ func (rq *replicateQueue) shedLease( ctx context.Context, repl *Replica, desc *roachpb.RangeDescriptor, - conf roachpb.SpanConfig, + conf *roachpb.SpanConfig, opts allocator.TransferLeaseOptions, ) (allocator.LeaseTransferOutcome, error) { rangeUsageInfo := repl.RangeUsageInfo() @@ -1120,7 +1122,7 @@ func (rq *replicateQueue) changeReplicas( // replica. It considers two factors if the replica is in -conformance with // lease preferences and the last time a transfer occurred to avoid thrashing. func (rq *replicateQueue) canTransferLeaseFrom( - ctx context.Context, repl plan.LeaseCheckReplica, conf roachpb.SpanConfig, + ctx context.Context, repl plan.LeaseCheckReplica, conf *roachpb.SpanConfig, ) bool { if !repl.OwnsValidLease(ctx, rq.store.cfg.Clock.NowAsClockTimestamp()) { // This replica is not the leaseholder, so it can't transfer the lease. diff --git a/pkg/kv/kvserver/store.go b/pkg/kv/kvserver/store.go index 6241538fd7dc..0c0ecfc84154 100644 --- a/pkg/kv/kvserver/store.go +++ b/pkg/kv/kvserver/store.go @@ -3497,7 +3497,7 @@ func (s *Store) ReplicateQueueDryRun( s.cfg.AmbientCtx.Tracer, "replicate queue dry run", ) defer collectAndFinish() - canTransferLease := func(ctx context.Context, repl plan.LeaseCheckReplica, conf roachpb.SpanConfig) bool { + canTransferLease := func(ctx context.Context, repl plan.LeaseCheckReplica, conf *roachpb.SpanConfig) bool { return true } desc := repl.Desc() @@ -3561,7 +3561,7 @@ func (s *Store) AllocatorCheckRange( storePool = s.cfg.StorePool } - action, _ := s.allocator.ComputeAction(ctx, storePool, conf, desc) + action, _ := s.allocator.ComputeAction(ctx, storePool, &conf, desc) // In the case that the action does not require a target, return immediately. if !(action.Add() || action.Replace()) { @@ -3575,7 +3575,7 @@ func (s *Store) AllocatorCheckRange( return action, roachpb.ReplicationTarget{}, sp.FinishAndGetConfiguredRecording(), err } - target, _, err := s.allocator.AllocateTarget(ctx, storePool, conf, + target, _, err := s.allocator.AllocateTarget(ctx, storePool, &conf, filteredVoters, filteredNonVoters, replacing, action.ReplicaStatus(), action.TargetReplicaType(), ) if err == nil { @@ -3586,7 +3586,7 @@ func (s *Store) AllocatorCheckRange( fragileQuorumErr := s.allocator.CheckAvoidsFragileQuorum( ctx, storePool, - conf, + &conf, desc.Replicas().VoterDescriptors(), filteredVoters, action.ReplicaStatus(), diff --git a/pkg/kv/kvserver/store_rebalancer.go b/pkg/kv/kvserver/store_rebalancer.go index 492c6a445bd9..1d359eed9484 100644 --- a/pkg/kv/kvserver/store_rebalancer.go +++ b/pkg/kv/kvserver/store_rebalancer.go @@ -821,7 +821,7 @@ func (sr *StoreRebalancer) chooseLeaseToTransfer( type rangeRebalanceContext struct { candidateReplica CandidateReplica rangeDesc *roachpb.RangeDescriptor - conf roachpb.SpanConfig + conf *roachpb.SpanConfig } func (sr *StoreRebalancer) chooseRangeToRebalance(